From a3c58152142b42bdbd66b1fdaa0a432062ec5094 Mon Sep 17 00:00:00 2001
From: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Date: Tue, 29 Oct 2024 08:17:08 +0000
Subject: [PATCH 01/25] Darren/logdb remove leading zeros (#865)
---
logdb/logdb.go | 17 ++++++-
logdb/logdb_bench_test.go | 45 +++++++++---------
logdb/logdb_test.go | 97 ++++++++++++++++++++++++++-------------
logdb/types.go | 2 +-
4 files changed, 102 insertions(+), 59 deletions(-)
diff --git a/logdb/logdb.go b/logdb/logdb.go
index bcd793e94..b1979813f 100644
--- a/logdb/logdb.go
+++ b/logdb/logdb.go
@@ -398,11 +398,23 @@ func (db *LogDB) NewWriterSyncOff() *Writer {
func topicValue(topics []thor.Bytes32, i int) []byte {
if i < len(topics) {
- return topics[i][:]
+ return removeLeadingZeros(topics[i][:])
}
return nil
}
+func removeLeadingZeros(bytes []byte) []byte {
+ i := 0
+ // increase i until it reaches the first non-zero byte
+ for ; i < len(bytes) && bytes[i] == 0; i++ {
+ }
+ // ensure at least 1 byte exists
+ if i == len(bytes) {
+ return []byte{0}
+ }
+ return bytes[i:]
+}
+
// Writer is the transactional log writer.
type Writer struct {
conn *sql.Conn
@@ -481,7 +493,8 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
topicValue(ev.Topics, 1),
topicValue(ev.Topics, 2),
topicValue(ev.Topics, 3),
- topicValue(ev.Topics, 4)); err != nil {
+ topicValue(ev.Topics, 4),
+ ); err != nil {
return err
}
diff --git a/logdb/logdb_bench_test.go b/logdb/logdb_bench_test.go
index e421ffce3..9e667999b 100644
--- a/logdb/logdb_bench_test.go
+++ b/logdb/logdb_bench_test.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package logdb_test
+package logdb
import (
"context"
@@ -15,7 +15,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vechain/thor/v2/block"
- "github.com/vechain/thor/v2/logdb"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tx"
)
@@ -39,7 +38,7 @@ func init() {
flag.StringVar(&dbPath, "dbPath", "", "Path to the database file")
}
-// TestLogDB_NewestBlockID performs a series of read/write benchmarks on the NewestBlockID functionality of the LogDB.
+// TestLogDB_NewestBlockID performs a series of read/write benchmarks on the NewestBlockID functionality of LogDB.
// It benchmarks the creating, writing, committing a new block, followed by fetching this new block as the NewestBlockID
func BenchmarkFakeDB_NewestBlockID(t *testing.B) {
db, err := createTempDB()
@@ -155,7 +154,7 @@ func BenchmarkTestDB_HasBlockID(b *testing.B) {
defer db.Close()
// find the first 500k blocks with events
- events, err := db.FilterEvents(context.Background(), &logdb.EventFilter{Options: &logdb.Options{Offset: 0, Limit: 500_000}})
+ events, err := db.FilterEvents(context.Background(), &EventFilter{Options: &Options{Offset: 0, Limit: 500_000}})
require.NoError(b, err)
require.GreaterOrEqual(b, len(events), 500_000, "there should be more than 500k events in the db")
@@ -178,12 +177,12 @@ func BenchmarkTestDB_FilterEvents(b *testing.B) {
vthoAddress := thor.MustParseAddress(VTHO_ADDRESS)
topic := thor.MustParseBytes32(VTHO_TOPIC)
- addressFilterCriteria := []*logdb.EventCriteria{
+ addressFilterCriteria := []*EventCriteria{
{
Address: &vthoAddress,
},
}
- topicFilterCriteria := []*logdb.EventCriteria{
+ topicFilterCriteria := []*EventCriteria{
{
Topics: [5]*thor.Bytes32{&topic, nil, nil, nil, nil},
},
@@ -191,14 +190,14 @@ func BenchmarkTestDB_FilterEvents(b *testing.B) {
tests := []struct {
name string
- arg *logdb.EventFilter
+ arg *EventFilter
}{
- {"AddressCriteriaFilter", &logdb.EventFilter{CriteriaSet: addressFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}},
- {"TopicCriteriaFilter", &logdb.EventFilter{CriteriaSet: topicFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}},
- {"EventLimit", &logdb.EventFilter{Order: logdb.ASC, Options: &logdb.Options{Offset: 0, Limit: 500000}}},
- {"EventLimitDesc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Offset: 0, Limit: 500000}}},
- {"EventRange", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}}},
- {"EventRangeDesc", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}, Order: logdb.DESC}},
+ {"AddressCriteriaFilter", &EventFilter{CriteriaSet: addressFilterCriteria, Options: &Options{Offset: 0, Limit: 500000}}},
+ {"TopicCriteriaFilter", &EventFilter{CriteriaSet: topicFilterCriteria, Options: &Options{Offset: 0, Limit: 500000}}},
+ {"EventLimit", &EventFilter{Order: ASC, Options: &Options{Offset: 0, Limit: 500000}}},
+ {"EventLimitDesc", &EventFilter{Order: DESC, Options: &Options{Offset: 0, Limit: 500000}}},
+ {"EventRange", &EventFilter{Range: &Range{From: 500000, To: 1_000_000}}},
+ {"EventRangeDesc", &EventFilter{Range: &Range{From: 500000, To: 1_000_000}, Order: DESC}},
}
for _, tt := range tests {
@@ -222,7 +221,7 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) {
defer db.Close()
txOrigin := thor.MustParseAddress(TEST_ADDRESS)
- transferCriteria := []*logdb.TransferCriteria{
+ transferCriteria := []*TransferCriteria{
{
TxOrigin: &txOrigin,
Sender: nil,
@@ -232,12 +231,12 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) {
tests := []struct {
name string
- arg *logdb.TransferFilter
+ arg *TransferFilter
}{
- {"TransferCriteria", &logdb.TransferFilter{CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}},
- {"TransferCriteriaDesc", &logdb.TransferFilter{Order: logdb.DESC, CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}},
- {"Ranged500K", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}}},
- {"Ranged500KDesc", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}, Order: logdb.DESC}},
+ {"TransferCriteria", &TransferFilter{CriteriaSet: transferCriteria, Options: &Options{Offset: 0, Limit: 500_000}}},
+ {"TransferCriteriaDesc", &TransferFilter{Order: DESC, CriteriaSet: transferCriteria, Options: &Options{Offset: 0, Limit: 500_000}}},
+ {"Ranged500K", &TransferFilter{Range: &Range{From: 500_000, To: 1_000_000}}},
+ {"Ranged500KDesc", &TransferFilter{Range: &Range{From: 500_000, To: 1_000_000}, Order: DESC}},
}
for _, tt := range tests {
@@ -253,7 +252,7 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) {
}
}
-func createTempDB() (*logdb.LogDB, error) {
+func createTempDB() (*LogDB, error) {
dir, err := os.MkdirTemp("", "tempdir-")
if err != nil {
return nil, fmt.Errorf("failed to create temp directory: %w", err)
@@ -268,7 +267,7 @@ func createTempDB() (*logdb.LogDB, error) {
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
- db, err := logdb.New(tmpFile.Name())
+ db, err := New(tmpFile.Name())
if err != nil {
return nil, fmt.Errorf("unable to load logdb: %w", err)
}
@@ -276,10 +275,10 @@ func createTempDB() (*logdb.LogDB, error) {
return db, nil
}
-func loadDBFromDisk(b *testing.B) (*logdb.LogDB, error) {
+func loadDBFromDisk(b *testing.B) (*LogDB, error) {
if dbPath == "" {
b.Fatal("Please provide a dbPath")
}
- return logdb.New(dbPath)
+ return New(dbPath)
}
diff --git a/logdb/logdb_test.go b/logdb/logdb_test.go
index 7ffdd59b1..fc7c6af56 100644
--- a/logdb/logdb_test.go
+++ b/logdb/logdb_test.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package logdb_test
+package logdb
import (
"context"
@@ -11,10 +11,10 @@ import (
"math/big"
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/block"
- logdb "github.com/vechain/thor/v2/logdb"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tx"
)
@@ -84,9 +84,9 @@ func newTransferOnlyReceipt() *tx.Receipt {
}
}
-type eventLogs []*logdb.Event
+type eventLogs []*Event
-func (logs eventLogs) Filter(f func(ev *logdb.Event) bool) (ret eventLogs) {
+func (logs eventLogs) Filter(f func(ev *Event) bool) (ret eventLogs) {
for _, ev := range logs {
if f(ev) {
ret = append(ret, ev)
@@ -102,9 +102,9 @@ func (logs eventLogs) Reverse() (ret eventLogs) {
return
}
-type transferLogs []*logdb.Transfer
+type transferLogs []*Transfer
-func (logs transferLogs) Filter(f func(tr *logdb.Transfer) bool) (ret transferLogs) {
+func (logs transferLogs) Filter(f func(tr *Transfer) bool) (ret transferLogs) {
for _, tr := range logs {
if f(tr) {
ret = append(ret, tr)
@@ -121,7 +121,7 @@ func (logs transferLogs) Reverse() (ret transferLogs) {
}
func TestEvents(t *testing.T) {
- db, err := logdb.NewMem()
+ db, err := NewMem()
if err != nil {
t.Fatal(err)
}
@@ -144,7 +144,7 @@ func TestEvents(t *testing.T) {
tx := b.Transactions()[j]
receipt := receipts[j]
origin, _ := tx.Origin()
- allEvents = append(allEvents, &logdb.Event{
+ allEvents = append(allEvents, &Event{
BlockNumber: b.Header().Number(),
Index: uint32(j),
BlockID: b.Header().ID(),
@@ -157,7 +157,7 @@ func TestEvents(t *testing.T) {
Data: receipt.Outputs[0].Events[0].Data,
})
- allTransfers = append(allTransfers, &logdb.Transfer{
+ allTransfers = append(allTransfers, &Transfer{
BlockNumber: b.Header().Number(),
Index: uint32(j),
BlockID: b.Header().ID(),
@@ -184,21 +184,21 @@ func TestEvents(t *testing.T) {
{
tests := []struct {
name string
- arg *logdb.EventFilter
+ arg *EventFilter
want eventLogs
}{
- {"query all events", &logdb.EventFilter{}, allEvents},
+ {"query all events", &EventFilter{}, allEvents},
{"query all events with nil option", nil, allEvents},
- {"query all events asc", &logdb.EventFilter{Order: logdb.ASC}, allEvents},
- {"query all events desc", &logdb.EventFilter{Order: logdb.DESC}, allEvents.Reverse()},
- {"query all events limit offset", &logdb.EventFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allEvents[1:11]},
- {"query all events range", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 })},
- {"query events with range and desc", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 }).Reverse()},
- {"query events with limit with desc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allEvents.Reverse()[0:10]},
- {"query all events with criteria", &logdb.EventFilter{CriteriaSet: []*logdb.EventCriteria{{Address: &allEvents[1].Address}}}, allEvents.Filter(func(ev *logdb.Event) bool {
+ {"query all events asc", &EventFilter{Order: ASC}, allEvents},
+ {"query all events desc", &EventFilter{Order: DESC}, allEvents.Reverse()},
+ {"query all events limit offset", &EventFilter{Options: &Options{Offset: 1, Limit: 10}}, allEvents[1:11]},
+ {"query all events range", &EventFilter{Range: &Range{From: 10, To: 20}}, allEvents.Filter(func(ev *Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 })},
+ {"query events with range and desc", &EventFilter{Range: &Range{From: 10, To: 20}, Order: DESC}, allEvents.Filter(func(ev *Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 }).Reverse()},
+ {"query events with limit with desc", &EventFilter{Order: DESC, Options: &Options{Limit: 10}}, allEvents.Reverse()[0:10]},
+ {"query all events with criteria", &EventFilter{CriteriaSet: []*EventCriteria{{Address: &allEvents[1].Address}}}, allEvents.Filter(func(ev *Event) bool {
return ev.Address == allEvents[1].Address
})},
- {"query all events with multi-criteria", &logdb.EventFilter{CriteriaSet: []*logdb.EventCriteria{{Address: &allEvents[1].Address}, {Topics: [5]*thor.Bytes32{allEvents[2].Topics[0]}}, {Topics: [5]*thor.Bytes32{allEvents[3].Topics[0]}}}}, allEvents.Filter(func(ev *logdb.Event) bool {
+ {"query all events with multi-criteria", &EventFilter{CriteriaSet: []*EventCriteria{{Address: &allEvents[1].Address}, {Topics: [5]*thor.Bytes32{allEvents[2].Topics[0]}}, {Topics: [5]*thor.Bytes32{allEvents[3].Topics[0]}}}}, allEvents.Filter(func(ev *Event) bool {
return ev.Address == allEvents[1].Address || *ev.Topics[0] == *allEvents[2].Topics[0] || *ev.Topics[0] == *allEvents[3].Topics[0]
})},
}
@@ -215,21 +215,21 @@ func TestEvents(t *testing.T) {
{
tests := []struct {
name string
- arg *logdb.TransferFilter
+ arg *TransferFilter
want transferLogs
}{
- {"query all transfers", &logdb.TransferFilter{}, allTransfers},
+ {"query all transfers", &TransferFilter{}, allTransfers},
{"query all transfers with nil option", nil, allTransfers},
- {"query all transfers asc", &logdb.TransferFilter{Order: logdb.ASC}, allTransfers},
- {"query all transfers desc", &logdb.TransferFilter{Order: logdb.DESC}, allTransfers.Reverse()},
- {"query all transfers limit offset", &logdb.TransferFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allTransfers[1:11]},
- {"query all transfers range", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 })},
- {"query transfers with range and desc", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 }).Reverse()},
- {"query transfers with limit with desc", &logdb.TransferFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allTransfers.Reverse()[0:10]},
- {"query all transfers with criteria", &logdb.TransferFilter{CriteriaSet: []*logdb.TransferCriteria{{Sender: &allTransfers[1].Sender}}}, allTransfers.Filter(func(tr *logdb.Transfer) bool {
+ {"query all transfers asc", &TransferFilter{Order: ASC}, allTransfers},
+ {"query all transfers desc", &TransferFilter{Order: DESC}, allTransfers.Reverse()},
+ {"query all transfers limit offset", &TransferFilter{Options: &Options{Offset: 1, Limit: 10}}, allTransfers[1:11]},
+ {"query all transfers range", &TransferFilter{Range: &Range{From: 10, To: 20}}, allTransfers.Filter(func(tr *Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 })},
+ {"query transfers with range and desc", &TransferFilter{Range: &Range{From: 10, To: 20}, Order: DESC}, allTransfers.Filter(func(tr *Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 }).Reverse()},
+ {"query transfers with limit with desc", &TransferFilter{Order: DESC, Options: &Options{Limit: 10}}, allTransfers.Reverse()[0:10]},
+ {"query all transfers with criteria", &TransferFilter{CriteriaSet: []*TransferCriteria{{Sender: &allTransfers[1].Sender}}}, allTransfers.Filter(func(tr *Transfer) bool {
return tr.Sender == allTransfers[1].Sender
})},
- {"query all transfers with multi-criteria", &logdb.TransferFilter{CriteriaSet: []*logdb.TransferCriteria{{Sender: &allTransfers[1].Sender}, {Recipient: &allTransfers[2].Recipient}}}, allTransfers.Filter(func(tr *logdb.Transfer) bool {
+ {"query all transfers with multi-criteria", &TransferFilter{CriteriaSet: []*TransferCriteria{{Sender: &allTransfers[1].Sender}, {Recipient: &allTransfers[2].Recipient}}}, allTransfers.Filter(func(tr *Transfer) bool {
return tr.Sender == allTransfers[1].Sender || tr.Recipient == allTransfers[2].Recipient
})},
}
@@ -244,10 +244,10 @@ func TestEvents(t *testing.T) {
}
}
-// TestLogDB_NewestBlockID performs a series of read/write tests on the NewestBlockID functionality of the LogDB.
+// TestLogDB_NewestBlockID performs a series of read/write tests on the NewestBlockID functionality of the
// It validates the correctness of the NewestBlockID method under various scenarios.
func TestLogDB_NewestBlockID(t *testing.T) {
- db, err := logdb.NewMem()
+ db, err := NewMem()
if err != nil {
t.Fatal(err)
}
@@ -368,9 +368,9 @@ func TestLogDB_NewestBlockID(t *testing.T) {
}
}
-// TestLogDB_HasBlockID performs a series of tests on the HasBlockID functionality of the LogDB.
+// TestLogDB_HasBlockID performs a series of tests on the HasBlockID functionality of the
func TestLogDB_HasBlockID(t *testing.T) {
- db, err := logdb.NewMem()
+ db, err := NewMem()
if err != nil {
t.Fatal(err)
}
@@ -431,3 +431,34 @@ func TestLogDB_HasBlockID(t *testing.T) {
}
assert.True(t, has)
}
+
+func TestRemoveLeadingZeros(t *testing.T) {
+ tests := []struct {
+ name string
+ input []byte
+ expected []byte
+ }{
+ {
+ "should remove leading zeros",
+ common.Hex2Bytes("0000000000000000000000006d95e6dca01d109882fe1726a2fb9865fa41e7aa"),
+ common.Hex2Bytes("6d95e6dca01d109882fe1726a2fb9865fa41e7aa"),
+ },
+ {
+ "should not remove any bytes",
+ common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
+ common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
+ },
+ {
+ "should have at least 1 byte",
+ common.Hex2Bytes("00000000000000000"),
+ []byte{0},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := removeLeadingZeros(tt.input)
+ assert.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/logdb/types.go b/logdb/types.go
index e4ebb1be4..7aa5ce990 100644
--- a/logdb/types.go
+++ b/logdb/types.go
@@ -71,7 +71,7 @@ func (c *EventCriteria) toWhereCondition() (cond string, args []interface{}) {
for i, topic := range c.Topics {
if topic != nil {
cond += fmt.Sprintf(" AND topic%v = ", i) + refIDQuery
- args = append(args, topic.Bytes())
+ args = append(args, removeLeadingZeros(topic.Bytes()))
}
}
return
From f471d950665743220cd0b309e27e4ee556f1870f Mon Sep 17 00:00:00 2001
From: Paolo Galli
Date: Fri, 8 Nov 2024 16:01:55 +0000
Subject: [PATCH 02/25] Add LogIndex and TxIndex into logs/event response body
(#862)
* Thor client (#818)
* feat: add thorclient
* refactor: remove roundTripper
* refactor: change null check
* clean: remove commented code
* feat: add account revision and pending tx
* fix: add licence headers and fix linter issue
* refactor: rename package
* refactor: change revision type to string
* refactor: rename GetLogs and GetTransfers to FilterEvents and FilterTransfers
* refactor: change FilterEvents and FilterTransactions request type to EventFilter
* Adding common.EventWrapper to handle channel errors
* tweak
* update rawclient + update account tests
* tidy up names
* update tests
* pr comments
* adding raw tx
* Tidy up method names and calls
* options client
* tweaks
* pr comments
* Update thorclient/common/common.go
Co-authored-by: libotony
* pr comments
* Adding Subscriptions
* Pr comments
* adjust func orders
* pr comments
* changing subscribe to use the channel close vs multiple channels
* adding go-doc
* no error after unsubscribe
* pr comments
* checking status code is 2xx
* fix: change FilterTransfers argument
---------
Co-authored-by: otherview
Co-authored-by: libotony
* Show all issues on lint (#869)
* Show all issues on lint
* fix lint
* fix(docker): using AWS docker repo for trivy (#872)
* fix(docker): using AWS docker repo for trivy
* fix(docker): using AWS docker repo for trivy
* Darren/feat/add subscription cache (#866)
* ehancement: create a cache for block based subscriptions
* minor: change function names for subscriptions
* test: add unit test for message cache
* chore: add license headers
* refactor: fix up error handling
* fix: remove bad test
* fix: PR comments
* fix: PR comments - remove block cache
* refactor(subscriptions): store structs in cache, not bytes
* fix(license): add license header
* chore(subscriptions): revert unit test changes
* enhancement: resolve pr comments to use simplelru
* enhancement: resolve pr comments - use id as key
* Add additional block tests (#863)
* enhancement(logging): leverage trace level (#873)
* Add testchain package (#844)
* Refactor thor node
* thorchain allows insertion of blocks
* remove thorNode, added testchain
* clean up + comments
* adding license headers
* adding templating tests for thorclient
* Remove test event hacks
* remove types
* removed chain_builder + added logdb to testchain
* pr comments
* Update test/testchain/chain.go
Co-authored-by: libotony
---------
Co-authored-by: libotony
* chore(docs): update spec for validator nodes (#875)
* chore(docs): update spec for validator nodes
* chore(docs): update cores
* chore(docs): remove public node stuff
* Darren/logdb remove leading zeros (#865)
* feat: add new txIndex column to event meta response
* test: add convert event test
* feat: make txLog and txIndex as optional return params
* chore: update swagger with new event optional data
* feat: save logIndex in sequence
* feat: tweaked bits in sequence
* refactor: rename optional log meta field
* refactor: comments, yaml and txIndex counts
* rebase to master
* fix: remove stale struct
* add txIndex to returned logdb query
* reset to 0 eventCount and transferCount each receipt and write blockId only once
* fix lint
* rephrase logIndex description in yaml file
* refactor: use filter.Option instead of eventFilter.Option
* move includeIndexes to api
---------
Co-authored-by: otherview
Co-authored-by: libotony
Co-authored-by: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Co-authored-by: Makis Christou
---
api/doc/thor.yaml | 38 +++++++++++++++++-
api/events/events.go | 9 +++--
api/events/events_test.go | 52 +++++++++++++++++++++++-
api/events/types.go | 63 +++++++++++++----------------
api/events/types_test.go | 71 ++++++++++++++++++++++++++-------
api/transfers/transfers.go | 16 +++++---
api/transfers/transfers_test.go | 53 +++++++++++++++++++++++-
api/transfers/types.go | 15 +++++--
cmd/thor/sync_logdb.go | 12 +++++-
logdb/logdb.go | 33 ++++++++-------
logdb/logdb_test.go | 6 ++-
logdb/sequence.go | 41 ++++++++++++++-----
logdb/sequence_test.go | 36 +++++++++++------
logdb/types.go | 6 ++-
thor/params.go | 5 +++
15 files changed, 348 insertions(+), 108 deletions(-)
diff --git a/api/doc/thor.yaml b/api/doc/thor.yaml
index 732a5f1a3..2a0d30b9e 100644
--- a/api/doc/thor.yaml
+++ b/api/doc/thor.yaml
@@ -1325,6 +1325,16 @@ components:
description: The index of the clause in the transaction, from which the log was generated.
example: 0
nullable: false
+ txIndex:
+ description: The index of the transaction in the block, from which the log was generated.
+ type: integer
+ nullable: true
+ example: 1
+ logIndex:
+ description: The index of the log in the receipt's outputs. This is an overall index among all clauses.
+ type: integer
+ nullable: true
+ example: 1
Block:
title: Block
@@ -1855,6 +1865,11 @@ components:
The limit of records to be included in the output. Use this parameter for pagination.
Default's to all results.
+ includeIndexes:
+ type: boolean
+ example: true
+ nullable: true
+ description: Include both transaction and log index in the response.
description: |
Include these parameters to receive filtered results in a paged format.
@@ -1865,7 +1880,8 @@ components:
{
"options": {
"offset": 0,
- "limit": 10
+ "limit": 10,
+ "includeIndexes": true
}
}
```
@@ -1916,6 +1932,26 @@ components:
}
```
This refers to the range from block 10 to block 1000.
+
+ EventOptionalData:
+ nullable: true
+ type: object
+ title: EventOptionalData
+ properties:
+ txIndex:
+ type: boolean
+ example: true
+ nullable: true
+ description: |
+ Specifies whether to include in the response the event transaction index.
+ loglIndex:
+ type: boolean
+ example: true
+ nullable: true
+ description: |
+ Specifies whether to include in the response the event log index.
+ description: |
+ Specifies all the optional data that can be included in the response.
EventCriteria:
type: object
diff --git a/api/events/events.go b/api/events/events.go
index 40dff7b09..b4c93fadc 100644
--- a/api/events/events.go
+++ b/api/events/events.go
@@ -44,7 +44,7 @@ func (e *Events) filter(ctx context.Context, ef *EventFilter) ([]*FilteredEvent,
}
fes := make([]*FilteredEvent, len(events))
for i, e := range events {
- fes[i] = convertEvent(e)
+ fes[i] = convertEvent(e, ef.Options.IncludeIndexes)
}
return fes, nil
}
@@ -60,9 +60,10 @@ func (e *Events) handleFilter(w http.ResponseWriter, req *http.Request) error {
if filter.Options == nil {
// if filter.Options is nil, set to the default limit +1
// to detect whether there are more logs than the default limit
- filter.Options = &logdb.Options{
- Offset: 0,
- Limit: e.limit + 1,
+ filter.Options = &Options{
+ Offset: 0,
+ Limit: e.limit + 1,
+ IncludeIndexes: false,
}
}
diff --git a/api/events/events_test.go b/api/events/events_test.go
index b1268d378..89aafd36f 100644
--- a/api/events/events_test.go
+++ b/api/events/events_test.go
@@ -56,6 +56,56 @@ func TestEvents(t *testing.T) {
testEventWithBlocks(t, blocksToInsert)
}
+func TestOptionalIndexes(t *testing.T) {
+ thorChain := initEventServer(t, defaultLogLimit)
+ defer ts.Close()
+ insertBlocks(t, thorChain.LogDB(), 5)
+ tclient = thorclient.New(ts.URL)
+
+ testCases := []struct {
+ name string
+ includeIndexes bool
+ expected *uint32
+ }{
+ {
+ name: "do not include indexes",
+ includeIndexes: false,
+ expected: nil,
+ },
+ {
+ name: "include indexes",
+ includeIndexes: true,
+ expected: new(uint32),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ filter := events.EventFilter{
+ CriteriaSet: make([]*events.EventCriteria, 0),
+ Range: nil,
+ Options: &events.Options{Limit: 6, IncludeIndexes: tc.includeIndexes},
+ Order: logdb.DESC,
+ }
+
+ res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, statusCode)
+ var tLogs []*events.FilteredEvent
+ if err := json.Unmarshal(res, &tLogs); err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, http.StatusOK, statusCode)
+ assert.Equal(t, 5, len(tLogs))
+
+ for _, tLog := range tLogs {
+ assert.Equal(t, tc.expected, tLog.Meta.TxIndex)
+ assert.Equal(t, tc.expected, tLog.Meta.LogIndex)
+ }
+ })
+ }
+}
+
func TestOption(t *testing.T) {
thorChain := initEventServer(t, 5)
defer ts.Close()
@@ -65,7 +115,7 @@ func TestOption(t *testing.T) {
filter := events.EventFilter{
CriteriaSet: make([]*events.EventCriteria, 0),
Range: nil,
- Options: &logdb.Options{Limit: 6},
+ Options: &events.Options{Limit: 6},
Order: logdb.DESC,
}
diff --git a/api/events/types.go b/api/events/types.go
index 0dce06aa4..575f8d855 100644
--- a/api/events/types.go
+++ b/api/events/types.go
@@ -6,7 +6,6 @@
package events
import (
- "fmt"
"math"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -23,6 +22,8 @@ type LogMeta struct {
TxID thor.Bytes32 `json:"txID"`
TxOrigin thor.Address `json:"txOrigin"`
ClauseIndex uint32 `json:"clauseIndex"`
+ TxIndex *uint32 `json:"txIndex,omitempty"`
+ LogIndex *uint32 `json:"logIndex,omitempty"`
}
type TopicSet struct {
@@ -42,8 +43,8 @@ type FilteredEvent struct {
}
// convert a logdb.Event into a json format Event
-func convertEvent(event *logdb.Event) *FilteredEvent {
- fe := FilteredEvent{
+func convertEvent(event *logdb.Event, addIndexes bool) *FilteredEvent {
+ fe := &FilteredEvent{
Address: event.Address,
Data: hexutil.Encode(event.Data),
Meta: LogMeta{
@@ -55,38 +56,19 @@ func convertEvent(event *logdb.Event) *FilteredEvent {
ClauseIndex: event.ClauseIndex,
},
}
+
+ if addIndexes {
+ fe.Meta.TxIndex = &event.TxIndex
+ fe.Meta.LogIndex = &event.LogIndex
+ }
+
fe.Topics = make([]*thor.Bytes32, 0)
for i := 0; i < 5; i++ {
if event.Topics[i] != nil {
fe.Topics = append(fe.Topics, event.Topics[i])
}
}
- return &fe
-}
-
-func (e *FilteredEvent) String() string {
- return fmt.Sprintf(`
- Event(
- address: %v,
- topics: %v,
- data: %v,
- meta: (blockID %v,
- blockNumber %v,
- blockTimestamp %v),
- txID %v,
- txOrigin %v,
- clauseIndex %v)
- )`,
- e.Address,
- e.Topics,
- e.Data,
- e.Meta.BlockID,
- e.Meta.BlockNumber,
- e.Meta.BlockTimestamp,
- e.Meta.TxID,
- e.Meta.TxOrigin,
- e.Meta.ClauseIndex,
- )
+ return fe
}
type EventCriteria struct {
@@ -94,11 +76,17 @@ type EventCriteria struct {
TopicSet
}
+type Options struct {
+ Offset uint64
+ Limit uint64
+ IncludeIndexes bool
+}
+
type EventFilter struct {
- CriteriaSet []*EventCriteria `json:"criteriaSet"`
- Range *Range `json:"range"`
- Options *logdb.Options `json:"options"`
- Order logdb.Order `json:"order"`
+ CriteriaSet []*EventCriteria
+ Range *Range
+ Options *Options
+ Order logdb.Order // default asc
}
func convertEventFilter(chain *chain.Chain, filter *EventFilter) (*logdb.EventFilter, error) {
@@ -107,9 +95,12 @@ func convertEventFilter(chain *chain.Chain, filter *EventFilter) (*logdb.EventFi
return nil, err
}
f := &logdb.EventFilter{
- Range: rng,
- Options: filter.Options,
- Order: filter.Order,
+ Range: rng,
+ Options: &logdb.Options{
+ Offset: filter.Options.Offset,
+ Limit: filter.Options.Limit,
+ },
+ Order: filter.Order,
}
if len(filter.CriteriaSet) > 0 {
f.CriteriaSet = make([]*logdb.EventCriteria, len(filter.CriteriaSet))
diff --git a/api/events/types_test.go b/api/events/types_test.go
index a02f441c5..75eafe3a7 100644
--- a/api/events/types_test.go
+++ b/api/events/types_test.go
@@ -3,19 +3,20 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package events_test
+package events
import (
"math"
"testing"
+ "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/api/events"
"github.com/vechain/thor/v2/chain"
"github.com/vechain/thor/v2/genesis"
"github.com/vechain/thor/v2/logdb"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
+ "github.com/vechain/thor/v2/thor"
)
func TestEventsTypes(t *testing.T) {
@@ -33,13 +34,13 @@ func TestEventsTypes(t *testing.T) {
}
func testConvertRangeWithBlockRangeType(t *testing.T, chain *chain.Chain) {
- rng := &events.Range{
- Unit: events.BlockRangeType,
+ rng := &Range{
+ Unit: BlockRangeType,
From: 1,
To: 2,
}
- convertedRng, err := events.ConvertRange(chain, rng)
+ convertedRng, err := ConvertRange(chain, rng)
assert.NoError(t, err)
assert.Equal(t, uint32(rng.From), convertedRng.From)
@@ -47,8 +48,8 @@ func testConvertRangeWithBlockRangeType(t *testing.T, chain *chain.Chain) {
}
func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *chain.Chain) {
- rng := &events.Range{
- Unit: events.TimeRangeType,
+ rng := &Range{
+ Unit: TimeRangeType,
From: 1,
To: 2,
}
@@ -57,7 +58,7 @@ func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *chain
To: math.MaxUint32,
}
- convRng, err := events.ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain, rng)
assert.NoError(t, err)
assert.Equal(t, expectedEmptyRange, convRng)
@@ -68,8 +69,8 @@ func testConvertRangeWithTimeRangeType(t *testing.T, chain *chain.Chain) {
if err != nil {
t.Fatal(err)
}
- rng := &events.Range{
- Unit: events.TimeRangeType,
+ rng := &Range{
+ Unit: TimeRangeType,
From: 1,
To: genesis.Timestamp(),
}
@@ -78,7 +79,7 @@ func testConvertRangeWithTimeRangeType(t *testing.T, chain *chain.Chain) {
To: 0,
}
- convRng, err := events.ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain, rng)
assert.NoError(t, err)
assert.Equal(t, expectedZeroRange, convRng)
@@ -89,8 +90,8 @@ func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *chain.Chain
if err != nil {
t.Fatal(err)
}
- rng := &events.Range{
- Unit: events.TimeRangeType,
+ rng := &Range{
+ Unit: TimeRangeType,
From: genesis.Timestamp() + 1_000,
To: genesis.Timestamp() + 10_000,
}
@@ -99,7 +100,7 @@ func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *chain.Chain
To: math.MaxUint32,
}
- convRng, err := events.ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain, rng)
assert.NoError(t, err)
assert.Equal(t, expectedEmptyRange, convRng)
@@ -123,3 +124,45 @@ func initChain(t *testing.T) *chain.Chain {
return repo.NewBestChain()
}
+
+func TestConvertEvent(t *testing.T) {
+ event := &logdb.Event{
+ Address: thor.Address{0x01},
+ Data: []byte{0x02, 0x03},
+ BlockID: thor.Bytes32{0x04},
+ BlockNumber: 5,
+ BlockTime: 6,
+ TxID: thor.Bytes32{0x07},
+ TxIndex: 8,
+ LogIndex: 9,
+ TxOrigin: thor.Address{0x0A},
+ ClauseIndex: 10,
+ Topics: [5]*thor.Bytes32{
+ {0x0B},
+ {0x0C},
+ nil,
+ nil,
+ nil,
+ },
+ }
+
+ expectedTopics := []*thor.Bytes32{
+ {0x0B},
+ {0x0C},
+ }
+ expectedData := hexutil.Encode(event.Data)
+
+ result := convertEvent(event, true)
+
+ assert.Equal(t, event.Address, result.Address)
+ assert.Equal(t, expectedData, result.Data)
+ assert.Equal(t, event.BlockID, result.Meta.BlockID)
+ assert.Equal(t, event.BlockNumber, result.Meta.BlockNumber)
+ assert.Equal(t, event.BlockTime, result.Meta.BlockTimestamp)
+ assert.Equal(t, event.TxID, result.Meta.TxID)
+ assert.Equal(t, event.TxIndex, *result.Meta.TxIndex)
+ assert.Equal(t, event.LogIndex, *result.Meta.LogIndex)
+ assert.Equal(t, event.TxOrigin, result.Meta.TxOrigin)
+ assert.Equal(t, event.ClauseIndex, result.Meta.ClauseIndex)
+ assert.Equal(t, expectedTopics, result.Topics)
+}
diff --git a/api/transfers/transfers.go b/api/transfers/transfers.go
index cad4ee6b3..2a6cbfb9e 100644
--- a/api/transfers/transfers.go
+++ b/api/transfers/transfers.go
@@ -42,15 +42,18 @@ func (t *Transfers) filter(ctx context.Context, filter *TransferFilter) ([]*Filt
transfers, err := t.db.FilterTransfers(ctx, &logdb.TransferFilter{
CriteriaSet: filter.CriteriaSet,
Range: rng,
- Options: filter.Options,
- Order: filter.Order,
+ Options: &logdb.Options{
+ Offset: filter.Options.Offset,
+ Limit: filter.Options.Limit,
+ },
+ Order: filter.Order,
})
if err != nil {
return nil, err
}
tLogs := make([]*FilteredTransfer, len(transfers))
for i, trans := range transfers {
- tLogs[i] = convertTransfer(trans)
+ tLogs[i] = convertTransfer(trans, filter.Options.IncludeIndexes)
}
return tLogs, nil
}
@@ -66,9 +69,10 @@ func (t *Transfers) handleFilterTransferLogs(w http.ResponseWriter, req *http.Re
if filter.Options == nil {
// if filter.Options is nil, set to the default limit +1
// to detect whether there are more logs than the default limit
- filter.Options = &logdb.Options{
- Offset: 0,
- Limit: t.limit + 1,
+ filter.Options = &events.Options{
+ Offset: 0,
+ Limit: t.limit + 1,
+ IncludeIndexes: false,
}
}
diff --git a/api/transfers/transfers_test.go b/api/transfers/transfers_test.go
index 04a8c7b42..eb028414f 100644
--- a/api/transfers/transfers_test.go
+++ b/api/transfers/transfers_test.go
@@ -65,7 +65,7 @@ func TestOption(t *testing.T) {
filter := transfers.TransferFilter{
CriteriaSet: make([]*logdb.TransferCriteria, 0),
Range: nil,
- Options: &logdb.Options{Limit: 6},
+ Options: &events.Options{Limit: 6},
Order: logdb.DESC,
}
@@ -100,6 +100,57 @@ func TestOption(t *testing.T) {
assert.Equal(t, "the number of filtered logs exceeds the maximum allowed value of 5, please use pagination", strings.Trim(string(res), "\n"))
}
+func TestOptionalData(t *testing.T) {
+ db := createDb(t)
+ initTransferServer(t, db, defaultLogLimit)
+ defer ts.Close()
+ insertBlocks(t, db, 5)
+ tclient = thorclient.New(ts.URL)
+
+ testCases := []struct {
+ name string
+ includeIndexes bool
+ expected *uint32
+ }{
+ {
+ name: "do not include indexes",
+ includeIndexes: false,
+ expected: nil,
+ },
+ {
+ name: "include indexes",
+ includeIndexes: true,
+ expected: new(uint32),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ filter := transfers.TransferFilter{
+ CriteriaSet: make([]*logdb.TransferCriteria, 0),
+ Range: nil,
+ Options: &events.Options{Limit: 5, IncludeIndexes: tc.includeIndexes},
+ Order: logdb.DESC,
+ }
+
+ res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/transfers", filter)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, statusCode)
+ var tLogs []*transfers.FilteredTransfer
+ if err := json.Unmarshal(res, &tLogs); err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, http.StatusOK, statusCode)
+ assert.Equal(t, 5, len(tLogs))
+
+ for _, tLog := range tLogs {
+ assert.Equal(t, tc.expected, tLog.Meta.TxIndex)
+ assert.Equal(t, tc.expected, tLog.Meta.LogIndex)
+ }
+ })
+ }
+}
+
// Test functions
func testTransferBadRequest(t *testing.T) {
badBody := []byte{0x00, 0x01, 0x02}
diff --git a/api/transfers/types.go b/api/transfers/types.go
index 29ad9b328..1574acf5a 100644
--- a/api/transfers/types.go
+++ b/api/transfers/types.go
@@ -19,6 +19,8 @@ type LogMeta struct {
TxID thor.Bytes32 `json:"txID"`
TxOrigin thor.Address `json:"txOrigin"`
ClauseIndex uint32 `json:"clauseIndex"`
+ TxIndex *uint32 `json:"txIndex,omitempty"`
+ LogIndex *uint32 `json:"logIndex,omitempty"`
}
type FilteredTransfer struct {
@@ -28,9 +30,9 @@ type FilteredTransfer struct {
Meta LogMeta `json:"meta"`
}
-func convertTransfer(transfer *logdb.Transfer) *FilteredTransfer {
+func convertTransfer(transfer *logdb.Transfer, addIndexes bool) *FilteredTransfer {
v := math.HexOrDecimal256(*transfer.Amount)
- return &FilteredTransfer{
+ ft := &FilteredTransfer{
Sender: transfer.Sender,
Recipient: transfer.Recipient,
Amount: &v,
@@ -43,11 +45,18 @@ func convertTransfer(transfer *logdb.Transfer) *FilteredTransfer {
ClauseIndex: transfer.ClauseIndex,
},
}
+
+ if addIndexes {
+ ft.Meta.TxIndex = &transfer.TxIndex
+ ft.Meta.LogIndex = &transfer.LogIndex
+ }
+
+ return ft
}
type TransferFilter struct {
CriteriaSet []*logdb.TransferCriteria
Range *events.Range
- Options *logdb.Options
+ Options *events.Options
Order logdb.Order //default asc
}
diff --git a/cmd/thor/sync_logdb.go b/cmd/thor/sync_logdb.go
index 9fccf3127..edfb78793 100644
--- a/cmd/thor/sync_logdb.go
+++ b/cmd/thor/sync_logdb.go
@@ -285,6 +285,8 @@ func verifyLogDBPerBlock(
n := block.Header().Number()
id := block.Header().ID()
ts := block.Header().Timestamp()
+ evCount := 0
+ trCount := 0
var expectedEvLogs []*logdb.Event
var expectedTrLogs []*logdb.Transfer
@@ -292,6 +294,8 @@ func verifyLogDBPerBlock(
for txIndex, r := range receipts {
tx := txs[txIndex]
origin, _ := tx.Origin()
+ evCount = 0
+ trCount = 0
for clauseIndex, output := range r.Outputs {
for _, ev := range output.Events {
@@ -301,7 +305,7 @@ func verifyLogDBPerBlock(
}
expectedEvLogs = append(expectedEvLogs, &logdb.Event{
BlockNumber: n,
- Index: uint32(len(expectedEvLogs)),
+ LogIndex: uint32(evCount),
BlockID: id,
BlockTime: ts,
TxID: tx.ID(),
@@ -310,12 +314,14 @@ func verifyLogDBPerBlock(
Address: ev.Address,
Topics: convertTopics(ev.Topics),
Data: data,
+ TxIndex: uint32(txIndex),
})
+ evCount++
}
for _, tr := range output.Transfers {
expectedTrLogs = append(expectedTrLogs, &logdb.Transfer{
BlockNumber: n,
- Index: uint32(len(expectedTrLogs)),
+ LogIndex: uint32(trCount),
BlockID: id,
BlockTime: ts,
TxID: tx.ID(),
@@ -324,7 +330,9 @@ func verifyLogDBPerBlock(
Sender: tr.Sender,
Recipient: tr.Recipient,
Amount: tr.Amount,
+ TxIndex: uint32(txIndex),
})
+ trCount++
}
}
}
diff --git a/logdb/logdb.go b/logdb/logdb.go
index b1979813f..f172ebf1d 100644
--- a/logdb/logdb.go
+++ b/logdb/logdb.go
@@ -9,7 +9,6 @@ import (
"context"
"database/sql"
"fmt"
- "math"
"math/big"
sqlite3 "github.com/mattn/go-sqlite3"
@@ -118,10 +117,10 @@ FROM (%v) e
if filter.Range != nil {
subQuery += " AND seq >= ?"
- args = append(args, newSequence(filter.Range.From, 0))
+ args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
- args = append(args, newSequence(filter.Range.To, uint32(math.MaxInt32)))
+ args = append(args, newSequence(filter.Range.To, txIndexMask, logIndexMask))
}
}
@@ -184,10 +183,10 @@ FROM (%v) t
if filter.Range != nil {
subQuery += " AND seq >= ?"
- args = append(args, newSequence(filter.Range.From, 0))
+ args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
- args = append(args, newSequence(filter.Range.To, uint32(math.MaxInt32)))
+ args = append(args, newSequence(filter.Range.To, txIndexMask, logIndexMask))
}
}
@@ -272,10 +271,11 @@ func (db *LogDB) queryEvents(ctx context.Context, query string, args ...interfac
}
event := &Event{
BlockNumber: seq.BlockNumber(),
- Index: seq.Index(),
+ LogIndex: seq.LogIndex(),
BlockID: thor.BytesToBytes32(blockID),
BlockTime: blockTime,
TxID: thor.BytesToBytes32(txID),
+ TxIndex: seq.TxIndex(),
TxOrigin: thor.BytesToAddress(txOrigin),
ClauseIndex: clauseIndex,
Address: thor.BytesToAddress(address),
@@ -334,10 +334,11 @@ func (db *LogDB) queryTransfers(ctx context.Context, query string, args ...inter
}
trans := &Transfer{
BlockNumber: seq.BlockNumber(),
- Index: seq.Index(),
+ LogIndex: seq.LogIndex(),
BlockID: thor.BytesToBytes32(blockID),
BlockTime: blockTime,
TxID: thor.BytesToBytes32(txID),
+ TxIndex: seq.TxIndex(),
TxOrigin: thor.BytesToAddress(txOrigin),
ClauseIndex: clauseIndex,
Sender: thor.BytesToAddress(sender),
@@ -376,7 +377,7 @@ func (db *LogDB) HasBlockID(id thor.Bytes32) (bool, error) {
UNION
SELECT * FROM (SELECT seq FROM event WHERE seq=? AND blockID=` + refIDQuery + ` LIMIT 1))`
- seq := newSequence(block.Number(id), 0)
+ seq := newSequence(block.Number(id), 0, 0)
row := db.stmtCache.MustPrepare(query).QueryRow(seq, id[:], seq, id[:])
var count int
if err := row.Scan(&count); err != nil {
@@ -426,7 +427,7 @@ type Writer struct {
// Truncate truncates the database by deleting logs after blockNum (included).
func (w *Writer) Truncate(blockNum uint32) error {
- seq := newSequence(blockNum, 0)
+ seq := newSequence(blockNum, 0, 0)
if err := w.exec("DELETE FROM event WHERE seq >= ?", seq); err != nil {
return err
}
@@ -443,8 +444,6 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
blockNum = b.Header().Number()
blockTimestamp = b.Header().Timestamp()
txs = b.Transactions()
- eventCount,
- transferCount uint32
isReceiptEmpty = func(r *tx.Receipt) bool {
for _, o := range r.Outputs {
if len(o.Events) > 0 || len(o.Transfers) > 0 {
@@ -453,20 +452,24 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
}
return true
}
+ blockIDInserted bool
)
for i, r := range receipts {
+ eventCount, transferCount := uint32(0), uint32(0)
+
if isReceiptEmpty(r) {
continue
}
- if eventCount == 0 && transferCount == 0 {
+ if !blockIDInserted {
// block id is not yet inserted
if err := w.exec(
"INSERT OR IGNORE INTO ref(data) VALUES(?)",
blockID[:]); err != nil {
return err
}
+ blockIDInserted = true
}
var (
@@ -478,6 +481,8 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
txID = tx.ID()
txOrigin, _ = tx.Origin()
}
+
+ txIndex := i
if err := w.exec(
"INSERT OR IGNORE INTO ref(data) VALUES(?),(?)",
txID[:], txOrigin[:]); err != nil {
@@ -517,7 +522,7 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
if err := w.exec(
query,
- newSequence(blockNum, eventCount),
+ newSequence(blockNum, uint32(txIndex), eventCount),
blockTimestamp,
clauseIndex,
eventData,
@@ -552,7 +557,7 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
if err := w.exec(
query,
- newSequence(blockNum, transferCount),
+ newSequence(blockNum, uint32(txIndex), transferCount),
blockTimestamp,
clauseIndex,
tr.Amount.Bytes(),
diff --git a/logdb/logdb_test.go b/logdb/logdb_test.go
index fc7c6af56..454d3a1e8 100644
--- a/logdb/logdb_test.go
+++ b/logdb/logdb_test.go
@@ -146,7 +146,8 @@ func TestEvents(t *testing.T) {
origin, _ := tx.Origin()
allEvents = append(allEvents, &Event{
BlockNumber: b.Header().Number(),
- Index: uint32(j),
+ LogIndex: uint32(0),
+ TxIndex: uint32(j),
BlockID: b.Header().ID(),
BlockTime: b.Header().Timestamp(),
TxID: tx.ID(),
@@ -159,7 +160,8 @@ func TestEvents(t *testing.T) {
allTransfers = append(allTransfers, &Transfer{
BlockNumber: b.Header().Number(),
- Index: uint32(j),
+ LogIndex: uint32(0),
+ TxIndex: uint32(j),
BlockID: b.Header().ID(),
BlockTime: b.Header().Timestamp(),
TxID: tx.ID(),
diff --git a/logdb/sequence.go b/logdb/sequence.go
index 52909ffe4..b76ad4821 100644
--- a/logdb/sequence.go
+++ b/logdb/sequence.go
@@ -5,21 +5,44 @@
package logdb
-import "math"
-
type sequence int64
-func newSequence(blockNum uint32, index uint32) sequence {
- if (index & math.MaxInt32) != index {
- panic("index too large")
+// Adjust these constants based on your bit allocation requirements
+const (
+ blockNumBits = 28
+ txIndexBits = 15
+ logIndexBits = 21
+ // Max = 2^28 - 1 = 268,435,455
+ blockNumMask = (1 << blockNumBits) - 1
+ // Max = 2^15 - 1 = 32,767
+ txIndexMask = (1 << txIndexBits) - 1
+ // Max = 2^21 - 1 = 2,097,151
+ logIndexMask = (1 << logIndexBits) - 1
+)
+
+func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) sequence {
+ if blockNum > blockNumMask {
+ panic("block number too large")
+ }
+ if txIndex > txIndexMask {
+ panic("transaction index too large")
}
- return (sequence(blockNum) << 31) | sequence(index)
+ if logIndex > logIndexMask {
+ panic("log index too large")
+ }
+ return (sequence(blockNum) << (txIndexBits + logIndexBits)) |
+ (sequence(txIndex) << logIndexBits) |
+ sequence(logIndex)
}
func (s sequence) BlockNumber() uint32 {
- return uint32(s >> 31)
+ return uint32(s>>(txIndexBits+logIndexBits)) & blockNumMask
+}
+
+func (s sequence) TxIndex() uint32 {
+ return uint32((s >> logIndexBits) & txIndexMask)
}
-func (s sequence) Index() uint32 {
- return uint32(s & math.MaxInt32)
+func (s sequence) LogIndex() uint32 {
+ return uint32(s & logIndexMask)
}
diff --git a/logdb/sequence_test.go b/logdb/sequence_test.go
index 9fa19fff0..b16e2d0da 100644
--- a/logdb/sequence_test.go
+++ b/logdb/sequence_test.go
@@ -6,33 +6,36 @@
package logdb
import (
- "math"
"testing"
)
func TestSequence(t *testing.T) {
type args struct {
blockNum uint32
- index uint32
+ txIndex uint32
+ logIndex uint32
}
tests := []struct {
name string
args args
- want args
}{
- {"regular", args{1, 2}, args{1, 2}},
- {"max bn", args{math.MaxUint32, 1}, args{math.MaxUint32, 1}},
- {"max index", args{5, math.MaxInt32}, args{5, math.MaxInt32}},
- {"both max", args{math.MaxUint32, math.MaxInt32}, args{math.MaxUint32, math.MaxInt32}},
+ {"regular", args{1, 2, 3}},
+ {"max bn", args{blockNumMask, 1, 2}},
+ {"max tx index", args{5, txIndexMask, 4}},
+ {"max log index", args{5, 4, logIndexMask}},
+ {"both max", args{blockNumMask, txIndexMask, logIndexMask}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got := newSequence(tt.args.blockNum, tt.args.index)
- if bn := got.BlockNumber(); bn != tt.want.blockNum {
- t.Errorf("seq.blockNum() = %v, want %v", bn, tt.want.blockNum)
+ got := newSequence(tt.args.blockNum, tt.args.txIndex, tt.args.logIndex)
+ if bn := got.BlockNumber(); bn != tt.args.blockNum {
+ t.Errorf("seq.blockNum() = %v, want %v", bn, tt.args.blockNum)
}
- if i := got.Index(); i != tt.want.index {
- t.Errorf("seq.index() = %v, want %v", i, tt.want.index)
+ if ti := got.TxIndex(); ti != tt.args.txIndex {
+ t.Errorf("seq.txIndex() = %v, want %v", ti, tt.args.txIndex)
+ }
+ if i := got.LogIndex(); i != tt.args.logIndex {
+ t.Errorf("seq.index() = %v, want %v", i, tt.args.logIndex)
}
})
}
@@ -42,5 +45,12 @@ func TestSequence(t *testing.T) {
t.Errorf("newSequence should panic on 2nd arg > math.MaxInt32")
}
}()
- newSequence(1, math.MaxInt32+1)
+ newSequence(1, txIndexMask+1, 5)
+
+ defer func() {
+ if e := recover(); e == nil {
+ t.Errorf("newSequence should panic on 3rd arg > math.MaxInt32")
+ }
+ }()
+ newSequence(1, 5, logIndexMask+1)
}
diff --git a/logdb/types.go b/logdb/types.go
index 7aa5ce990..8e772cc0c 100644
--- a/logdb/types.go
+++ b/logdb/types.go
@@ -15,10 +15,11 @@ import (
// Event represents tx.Event that can be stored in db.
type Event struct {
BlockNumber uint32
- Index uint32
+ LogIndex uint32
BlockID thor.Bytes32
BlockTime uint64
TxID thor.Bytes32
+ TxIndex uint32
TxOrigin thor.Address //contract caller
ClauseIndex uint32
Address thor.Address // always a contract address
@@ -29,10 +30,11 @@ type Event struct {
// Transfer represents tx.Transfer that can be stored in db.
type Transfer struct {
BlockNumber uint32
- Index uint32
+ LogIndex uint32
BlockID thor.Bytes32
BlockTime uint64
TxID thor.Bytes32
+ TxIndex uint32
TxOrigin thor.Address
ClauseIndex uint32
Sender thor.Address
diff --git a/thor/params.go b/thor/params.go
index 5912c46c9..3ec8462f8 100644
--- a/thor/params.go
+++ b/thor/params.go
@@ -12,6 +12,11 @@ import (
"github.com/ethereum/go-ethereum/params"
)
+/*
+ NOTE: any changes to gas limit or block interval may affect how the txIndex and blockNumber are stored in logdb/sequence.go:
+ - an increase in gas limit may require more bits for txIndex;
+ - if block frequency is increased, blockNumber will increment faster, potentially exhausting the allocated bits sooner than expected.
+*/
// Constants of block chain.
const (
BlockInterval uint64 = 10 // time interval between two consecutive blocks.
From 7b0a7ca73466afd392f7fa2d479fef6701988de5 Mon Sep 17 00:00:00 2001
From: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Date: Mon, 18 Nov 2024 12:01:16 +0000
Subject: [PATCH 03/25] fix: set range.To to max logDB block (#880)
---
logdb/logdb.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/logdb/logdb.go b/logdb/logdb.go
index f172ebf1d..9125ce464 100644
--- a/logdb/logdb.go
+++ b/logdb/logdb.go
@@ -117,6 +117,9 @@ FROM (%v) e
if filter.Range != nil {
subQuery += " AND seq >= ?"
+ if filter.Range.To > blockNumMask {
+ filter.Range.To = blockNumMask
+ }
args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
@@ -183,6 +186,9 @@ FROM (%v) t
if filter.Range != nil {
subQuery += " AND seq >= ?"
+ if filter.Range.To > blockNumMask {
+ filter.Range.To = blockNumMask
+ }
args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
From a1ffbd45a9c1d2d354b70460d828c4f45c210c21 Mon Sep 17 00:00:00 2001
From: Pedro Gomes
Date: Mon, 18 Nov 2024 14:22:18 +0000
Subject: [PATCH 04/25] Update Convert Filter to match BlockMask bit space
(#881)
* Update Convert Filter to match BlockMask bit space
* use exported BlockNumMask
---
api/events/types.go | 16 ++++++++++++++--
logdb/logdb.go | 8 ++++----
logdb/sequence.go | 8 ++++----
logdb/sequence_test.go | 4 ++--
4 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/api/events/types.go b/api/events/types.go
index 575f8d855..2a107ed54 100644
--- a/api/events/types.go
+++ b/api/events/types.go
@@ -173,8 +173,20 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
To: toHeader.Number(),
}, nil
}
+
+ // Units are blocks, locked a max of 28bits in the logdb
+ from := uint32(r.From)
+ to := uint32(r.To)
+
+ // Ensure the values are capped at the 28-bit maximum
+ if uint32(r.From) > logdb.BlockNumMask {
+ from = logdb.BlockNumMask
+ }
+ if uint32(r.To) > logdb.BlockNumMask {
+ to = logdb.BlockNumMask
+ }
return &logdb.Range{
- From: uint32(r.From),
- To: uint32(r.To),
+ From: from,
+ To: to,
}, nil
}
diff --git a/logdb/logdb.go b/logdb/logdb.go
index 9125ce464..610a2cac1 100644
--- a/logdb/logdb.go
+++ b/logdb/logdb.go
@@ -117,8 +117,8 @@ FROM (%v) e
if filter.Range != nil {
subQuery += " AND seq >= ?"
- if filter.Range.To > blockNumMask {
- filter.Range.To = blockNumMask
+ if filter.Range.To > BlockNumMask {
+ return nil, fmt.Errorf("invalid block number range")
}
args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
@@ -186,8 +186,8 @@ FROM (%v) t
if filter.Range != nil {
subQuery += " AND seq >= ?"
- if filter.Range.To > blockNumMask {
- filter.Range.To = blockNumMask
+ if filter.Range.To > BlockNumMask {
+ return nil, fmt.Errorf("invalid block number range")
}
args = append(args, newSequence(filter.Range.From, 0, 0))
if filter.Range.To >= filter.Range.From {
diff --git a/logdb/sequence.go b/logdb/sequence.go
index b76ad4821..efe41e55d 100644
--- a/logdb/sequence.go
+++ b/logdb/sequence.go
@@ -12,8 +12,8 @@ const (
blockNumBits = 28
txIndexBits = 15
logIndexBits = 21
- // Max = 2^28 - 1 = 268,435,455
- blockNumMask = (1 << blockNumBits) - 1
+ // BlockNumMask Max = 2^28 - 1 = 268,435,455 (unsigned int 28)
+ BlockNumMask = (1 << blockNumBits) - 1
// Max = 2^15 - 1 = 32,767
txIndexMask = (1 << txIndexBits) - 1
// Max = 2^21 - 1 = 2,097,151
@@ -21,7 +21,7 @@ const (
)
func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) sequence {
- if blockNum > blockNumMask {
+ if blockNum > BlockNumMask {
panic("block number too large")
}
if txIndex > txIndexMask {
@@ -36,7 +36,7 @@ func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) sequence {
}
func (s sequence) BlockNumber() uint32 {
- return uint32(s>>(txIndexBits+logIndexBits)) & blockNumMask
+ return uint32(s>>(txIndexBits+logIndexBits)) & BlockNumMask
}
func (s sequence) TxIndex() uint32 {
diff --git a/logdb/sequence_test.go b/logdb/sequence_test.go
index b16e2d0da..6442ad689 100644
--- a/logdb/sequence_test.go
+++ b/logdb/sequence_test.go
@@ -20,10 +20,10 @@ func TestSequence(t *testing.T) {
args args
}{
{"regular", args{1, 2, 3}},
- {"max bn", args{blockNumMask, 1, 2}},
+ {"max bn", args{BlockNumMask, 1, 2}},
{"max tx index", args{5, txIndexMask, 4}},
{"max log index", args{5, 4, logIndexMask}},
- {"both max", args{blockNumMask, txIndexMask, logIndexMask}},
+ {"both max", args{BlockNumMask, txIndexMask, logIndexMask}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
From 4aaf122719ef121cff7274f1478f41413a0dfbc3 Mon Sep 17 00:00:00 2001
From: libotony
Date: Tue, 19 Nov 2024 21:30:18 +0800
Subject: [PATCH 05/25] return error in newSequence (#885)
* return error in newSequence
* revert type change of sequence
* adjust sequence to 63bit
* fix test
---------
Co-authored-by: otherview
---
api/events/types.go | 13 +++----
api/events/types_test.go | 73 +++++++++++++++++++++-------------------
logdb/logdb.go | 51 +++++++++++++++++++++-------
logdb/sequence.go | 26 ++++++++------
logdb/sequence_test.go | 28 ++++++---------
5 files changed, 111 insertions(+), 80 deletions(-)
diff --git a/api/events/types.go b/api/events/types.go
index 2a107ed54..6cecfe9a8 100644
--- a/api/events/types.go
+++ b/api/events/types.go
@@ -174,17 +174,18 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
}, nil
}
- // Units are blocks, locked a max of 28bits in the logdb
+ // Units are block numbers - numbers will have a max ceiling at chain head block number
+ headNum := block.Number(chain.HeadID())
from := uint32(r.From)
to := uint32(r.To)
- // Ensure the values are capped at the 28-bit maximum
- if uint32(r.From) > logdb.BlockNumMask {
- from = logdb.BlockNumMask
+ if from > headNum {
+ from = headNum
}
- if uint32(r.To) > logdb.BlockNumMask {
- to = logdb.BlockNumMask
+ if to > headNum {
+ to = headNum
}
+
return &logdb.Range{
From: from,
To: to,
diff --git a/api/events/types_test.go b/api/events/types_test.go
index 75eafe3a7..e223bb158 100644
--- a/api/events/types_test.go
+++ b/api/events/types_test.go
@@ -11,17 +11,17 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/chain"
+ "github.com/stretchr/testify/require"
"github.com/vechain/thor/v2/genesis"
"github.com/vechain/thor/v2/logdb"
- "github.com/vechain/thor/v2/muxdb"
- "github.com/vechain/thor/v2/state"
+ "github.com/vechain/thor/v2/test/testchain"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/tx"
)
func TestEventsTypes(t *testing.T) {
c := initChain(t)
- for name, tt := range map[string]func(*testing.T, *chain.Chain){
+ for name, tt := range map[string]func(*testing.T, *testchain.Chain){
"testConvertRangeWithBlockRangeType": testConvertRangeWithBlockRangeType,
"testConvertRangeWithTimeRangeTypeLessThenGenesis": testConvertRangeWithTimeRangeTypeLessThenGenesis,
"testConvertRangeWithTimeRangeType": testConvertRangeWithTimeRangeType,
@@ -33,21 +33,38 @@ func TestEventsTypes(t *testing.T) {
}
}
-func testConvertRangeWithBlockRangeType(t *testing.T, chain *chain.Chain) {
+func testConvertRangeWithBlockRangeType(t *testing.T, chain *testchain.Chain) {
rng := &Range{
Unit: BlockRangeType,
From: 1,
To: 2,
}
- convertedRng, err := ConvertRange(chain, rng)
+ convertedRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
assert.NoError(t, err)
assert.Equal(t, uint32(rng.From), convertedRng.From)
assert.Equal(t, uint32(rng.To), convertedRng.To)
+
+ // ensure wild block numbers have a max ceiling of chain.head
+ rng = &Range{
+ Unit: BlockRangeType,
+ From: 100,
+ To: 200,
+ }
+
+ convertedRng, err = ConvertRange(chain.Repo().NewBestChain(), rng)
+ require.NoError(t, err)
+
+ bestBlock, err := chain.BestBlock()
+ require.NoError(t, err)
+
+ assert.NoError(t, err)
+ assert.Equal(t, bestBlock.Header().Number(), convertedRng.From)
+ assert.Equal(t, bestBlock.Header().Number(), convertedRng.To)
}
-func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *chain.Chain) {
+func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *testchain.Chain) {
rng := &Range{
Unit: TimeRangeType,
From: 1,
@@ -58,17 +75,15 @@ func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *chain
To: math.MaxUint32,
}
- convRng, err := ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
assert.NoError(t, err)
assert.Equal(t, expectedEmptyRange, convRng)
}
-func testConvertRangeWithTimeRangeType(t *testing.T, chain *chain.Chain) {
- genesis, err := chain.GetBlockHeader(0)
- if err != nil {
- t.Fatal(err)
- }
+func testConvertRangeWithTimeRangeType(t *testing.T, chain *testchain.Chain) {
+ genesis := chain.GenesisBlock().Header()
+
rng := &Range{
Unit: TimeRangeType,
From: 1,
@@ -79,17 +94,15 @@ func testConvertRangeWithTimeRangeType(t *testing.T, chain *chain.Chain) {
To: 0,
}
- convRng, err := ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
assert.NoError(t, err)
assert.Equal(t, expectedZeroRange, convRng)
}
-func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *chain.Chain) {
- genesis, err := chain.GetBlockHeader(0)
- if err != nil {
- t.Fatal(err)
- }
+func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *testchain.Chain) {
+ genesis := chain.GenesisBlock().Header()
+
rng := &Range{
Unit: TimeRangeType,
From: genesis.Timestamp() + 1_000,
@@ -100,29 +113,21 @@ func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *chain.Chain
To: math.MaxUint32,
}
- convRng, err := ConvertRange(chain, rng)
+ convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
assert.NoError(t, err)
assert.Equal(t, expectedEmptyRange, convRng)
}
// Init functions
-func initChain(t *testing.T) *chain.Chain {
- muxDb := muxdb.NewMem()
- stater := state.NewStater(muxDb)
- gene := genesis.NewDevnet()
-
- b, _, _, err := gene.Build(stater)
- if err != nil {
- t.Fatal(err)
- }
+func initChain(t *testing.T) *testchain.Chain {
+ thorChain, err := testchain.NewIntegrationTestChain()
+ require.NoError(t, err)
- repo, err := chain.NewRepository(muxDb, b)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, thorChain.MintBlock(genesis.DevAccounts()[0], []*tx.Transaction{}...))
+ require.NoError(t, thorChain.MintBlock(genesis.DevAccounts()[0], []*tx.Transaction{}...))
- return repo.NewBestChain()
+ return thorChain
}
func TestConvertEvent(t *testing.T) {
diff --git a/logdb/logdb.go b/logdb/logdb.go
index 610a2cac1..af043e9d2 100644
--- a/logdb/logdb.go
+++ b/logdb/logdb.go
@@ -117,13 +117,18 @@ FROM (%v) e
if filter.Range != nil {
subQuery += " AND seq >= ?"
- if filter.Range.To > BlockNumMask {
- return nil, fmt.Errorf("invalid block number range")
+ from, err := newSequence(filter.Range.From, 0, 0)
+ if err != nil {
+ return nil, err
}
- args = append(args, newSequence(filter.Range.From, 0, 0))
+ args = append(args, from)
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
- args = append(args, newSequence(filter.Range.To, txIndexMask, logIndexMask))
+ to, err := newSequence(filter.Range.To, txIndexMask, logIndexMask)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, to)
}
}
@@ -186,13 +191,18 @@ FROM (%v) t
if filter.Range != nil {
subQuery += " AND seq >= ?"
- if filter.Range.To > BlockNumMask {
- return nil, fmt.Errorf("invalid block number range")
+ from, err := newSequence(filter.Range.From, 0, 0)
+ if err != nil {
+ return nil, err
}
- args = append(args, newSequence(filter.Range.From, 0, 0))
+ args = append(args, from)
if filter.Range.To >= filter.Range.From {
subQuery += " AND seq <= ?"
- args = append(args, newSequence(filter.Range.To, txIndexMask, logIndexMask))
+ to, err := newSequence(filter.Range.To, txIndexMask, logIndexMask)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, to)
}
}
@@ -383,7 +393,10 @@ func (db *LogDB) HasBlockID(id thor.Bytes32) (bool, error) {
UNION
SELECT * FROM (SELECT seq FROM event WHERE seq=? AND blockID=` + refIDQuery + ` LIMIT 1))`
- seq := newSequence(block.Number(id), 0, 0)
+ seq, err := newSequence(block.Number(id), 0, 0)
+ if err != nil {
+ return false, err
+ }
row := db.stmtCache.MustPrepare(query).QueryRow(seq, id[:], seq, id[:])
var count int
if err := row.Scan(&count); err != nil {
@@ -433,7 +446,11 @@ type Writer struct {
// Truncate truncates the database by deleting logs after blockNum (included).
func (w *Writer) Truncate(blockNum uint32) error {
- seq := newSequence(blockNum, 0, 0)
+ seq, err := newSequence(blockNum, 0, 0)
+ if err != nil {
+ return err
+ }
+
if err := w.exec("DELETE FROM event WHERE seq >= ?", seq); err != nil {
return err
}
@@ -526,9 +543,14 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
eventData = ev.Data
}
+ seq, err := newSequence(blockNum, uint32(txIndex), eventCount)
+ if err != nil {
+ return err
+ }
+
if err := w.exec(
query,
- newSequence(blockNum, uint32(txIndex), eventCount),
+ seq,
blockTimestamp,
clauseIndex,
eventData,
@@ -561,9 +583,14 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error {
refIDQuery + "," +
refIDQuery + ")"
+ seq, err := newSequence(blockNum, uint32(txIndex), transferCount)
+ if err != nil {
+ return err
+ }
+
if err := w.exec(
query,
- newSequence(blockNum, uint32(txIndex), transferCount),
+ seq,
blockTimestamp,
clauseIndex,
tr.Amount.Bytes(),
diff --git a/logdb/sequence.go b/logdb/sequence.go
index efe41e55d..1e98458b7 100644
--- a/logdb/sequence.go
+++ b/logdb/sequence.go
@@ -5,38 +5,42 @@
package logdb
+import "errors"
+
type sequence int64
// Adjust these constants based on your bit allocation requirements
+// 64th bit is the sign bit so we have 63 bits to use
const (
blockNumBits = 28
txIndexBits = 15
- logIndexBits = 21
- // BlockNumMask Max = 2^28 - 1 = 268,435,455 (unsigned int 28)
- BlockNumMask = (1 << blockNumBits) - 1
+ logIndexBits = 20
+ // Max = 2^28 - 1 = 268,435,455 (unsigned int 28)
+ blockNumMask = (1 << blockNumBits) - 1
// Max = 2^15 - 1 = 32,767
txIndexMask = (1 << txIndexBits) - 1
- // Max = 2^21 - 1 = 2,097,151
+ // Max = 2^20 - 1 = 1,048,575
logIndexMask = (1 << logIndexBits) - 1
)
-func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) sequence {
- if blockNum > BlockNumMask {
- panic("block number too large")
+func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) (sequence, error) {
+ if blockNum > blockNumMask {
+ return 0, errors.New("block number out of range: uint28")
}
if txIndex > txIndexMask {
- panic("transaction index too large")
+ return 0, errors.New("tx index out of range: uint15")
}
if logIndex > logIndexMask {
- panic("log index too large")
+ return 0, errors.New("log index out of range: uint21")
}
+
return (sequence(blockNum) << (txIndexBits + logIndexBits)) |
(sequence(txIndex) << logIndexBits) |
- sequence(logIndex)
+ sequence(logIndex), nil
}
func (s sequence) BlockNumber() uint32 {
- return uint32(s>>(txIndexBits+logIndexBits)) & BlockNumMask
+ return uint32(s>>(txIndexBits+logIndexBits)) & blockNumMask
}
func (s sequence) TxIndex() uint32 {
diff --git a/logdb/sequence_test.go b/logdb/sequence_test.go
index 6442ad689..2855447f9 100644
--- a/logdb/sequence_test.go
+++ b/logdb/sequence_test.go
@@ -7,6 +7,8 @@ package logdb
import (
"testing"
+
+ "github.com/stretchr/testify/assert"
)
func TestSequence(t *testing.T) {
@@ -20,14 +22,20 @@ func TestSequence(t *testing.T) {
args args
}{
{"regular", args{1, 2, 3}},
- {"max bn", args{BlockNumMask, 1, 2}},
+ {"max bn", args{blockNumMask, 1, 2}},
{"max tx index", args{5, txIndexMask, 4}},
{"max log index", args{5, 4, logIndexMask}},
- {"both max", args{BlockNumMask, txIndexMask, logIndexMask}},
+ {"close to max", args{blockNumMask - 5, txIndexMask - 5, logIndexMask - 5}},
+ {"both max", args{blockNumMask, txIndexMask, logIndexMask}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got := newSequence(tt.args.blockNum, tt.args.txIndex, tt.args.logIndex)
+ got, err := newSequence(tt.args.blockNum, tt.args.txIndex, tt.args.logIndex)
+ if err != nil {
+ t.Error(err)
+ }
+
+ assert.True(t, got > 0, "sequence should be positive")
if bn := got.BlockNumber(); bn != tt.args.blockNum {
t.Errorf("seq.blockNum() = %v, want %v", bn, tt.args.blockNum)
}
@@ -39,18 +47,4 @@ func TestSequence(t *testing.T) {
}
})
}
-
- defer func() {
- if e := recover(); e == nil {
- t.Errorf("newSequence should panic on 2nd arg > math.MaxInt32")
- }
- }()
- newSequence(1, txIndexMask+1, 5)
-
- defer func() {
- if e := recover(); e == nil {
- t.Errorf("newSequence should panic on 3rd arg > math.MaxInt32")
- }
- }()
- newSequence(1, 5, logIndexMask+1)
}
From e4e5913bdfd6d1af081b8f4b85e0430f0da7da87 Mon Sep 17 00:00:00 2001
From: libotony
Date: Fri, 22 Nov 2024 15:20:43 +0800
Subject: [PATCH 06/25] add safety guard to the sequence (#887)
* add safety guard to the sequence
* move bit distribution to tests
---
logdb/sequence_test.go | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/logdb/sequence_test.go b/logdb/sequence_test.go
index 2855447f9..4b4d4e421 100644
--- a/logdb/sequence_test.go
+++ b/logdb/sequence_test.go
@@ -6,6 +6,7 @@
package logdb
import (
+ "math/rand/v2"
"testing"
"github.com/stretchr/testify/assert"
@@ -48,3 +49,43 @@ func TestSequence(t *testing.T) {
})
}
}
+
+// In case some one messes up the bit allocation
+func TestSequenceValue(t *testing.T) {
+ //#nosec G404
+ for i := 0; i < 2; i++ {
+ blk := rand.Uint32N(blockNumMask)
+ txIndex := rand.Uint32N(txIndexMask)
+ logIndex := rand.Uint32N(logIndexMask)
+
+ seq, err := newSequence(blk, txIndex, logIndex)
+ assert.Nil(t, err)
+ assert.True(t, seq > 0, "sequence should be positive")
+
+ a := rand.Uint32N(blockNumMask)
+ b := rand.Uint32N(txIndexMask)
+ c := rand.Uint32N(logIndexMask)
+
+ seq1, err := newSequence(a, b, c)
+ assert.Nil(t, err)
+ assert.True(t, seq1 > 0, "sequence should be positive")
+
+ expected := func() bool {
+ if blk != a {
+ return blk > a
+ }
+ if txIndex != b {
+ return txIndex > b
+ }
+ if logIndex != c {
+ return logIndex > c
+ }
+ return false
+ }()
+ assert.Equal(t, expected, seq > seq1)
+ }
+}
+
+func TestBitDistribution(t *testing.T) {
+ assert.Less(t, blockNumBits+txIndexBits+logIndexBits, 64, "total bits in sequence should be less than 64")
+}
From 60e80d6f79551cb38b7d28294d0ba5c27bf8fd2d Mon Sep 17 00:00:00 2001
From: Darren Kelly
Date: Fri, 29 Nov 2024 13:55:13 +0000
Subject: [PATCH 07/25] chore: update version for mainDB v4
---
cmd/thor/VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/thor/VERSION b/cmd/thor/VERSION
index 7d2ed7c70..ccbccc3dc 100644
--- a/cmd/thor/VERSION
+++ b/cmd/thor/VERSION
@@ -1 +1 @@
-2.1.4
+2.2.0
From ddc157088335ec833c0aefa72b0bb5259d11fbcb Mon Sep 17 00:00:00 2001
From: libotony
Date: Mon, 9 Dec 2024 14:06:50 +0000
Subject: [PATCH 08/25] maindb v4 (#868)
* fix(documentation): use absolute links in markdown (#889)
* Add benchmark test to node block process (#892)
* Add benchmark test to node block process
* added file-based storage
* use tempdir
* update dependency go-ethereum (#895)
* chore: update API metrics bucket and endpoint names (#893)
* chore: update API metrics bucket and endpoint names
* fix: typo & tests
* fix: lint
* chore: add websocket total counter
* fix: txs endpoints names & ws subject
* fix: unit tests
* chore: standardise naming convention
* chore: add websocke duration & http code
* chore: add websocke duration & http code
* fix: lint issues
* fix: sync issues with metrics
* chore: update websocket durations bucket
* fix: PR comments - use sync.Once
* chore: update builtin generation (#896)
* chore: update builtin generation
* fix: update GHA
* Thor client (#818)
* feat: add thorclient
* refactor: remove roundTripper
* refactor: change null check
* clean: remove commented code
* feat: add account revision and pending tx
* fix: add licence headers and fix linter issue
* refactor: rename package
* refactor: change revision type to string
* refactor: rename GetLogs and GetTransfers to FilterEvents and FilterTransfers
* refactor: change FilterEvents and FilterTransactions request type to EventFilter
* Adding common.EventWrapper to handle channel errors
* tweak
* update rawclient + update account tests
* tidy up names
* update tests
* pr comments
* adding raw tx
* Tidy up method names and calls
* options client
* tweaks
* pr comments
* Update thorclient/common/common.go
Co-authored-by: libotony
* pr comments
* Adding Subscriptions
* Pr comments
* adjust func orders
* pr comments
* changing subscribe to use the channel close vs multiple channels
* adding go-doc
* no error after unsubscribe
* pr comments
* checking status code is 2xx
* fix: change FilterTransfers argument
---------
Co-authored-by: otherview
Co-authored-by: libotony
* Add testchain package (#844)
* Refactor thor node
* thorchain allows insertion of blocks
* remove thorNode, added testchain
* clean up + comments
* adding license headers
* adding templating tests for thorclient
* Remove test event hacks
* remove types
* removed chain_builder + added logdb to testchain
* pr comments
* Update test/testchain/chain.go
Co-authored-by: libotony
---------
Co-authored-by: libotony
* cmd/thor: update instance dir to v4
* trie: implement varint-prefix coder
* deps: add github.com/qianbin/drlp
* trie: implement appendHexToCompact & compactLen
* trie: temporarily remove merkle proof stuff
* trie: many changes
* disk usage reduced by 33% (force embedding shortnode)
* new encoding method for storing nodes
* optimize trie hashing
* versioning standalone nodes
* remove extended trie
* improve trie interface
* simplify NodeIterator, remove unused codes
* trie: optimize full-node encoding/decoding
* trie: tweak shortnode encoding
* muxdb: move engine pkg
* trie: add Version() method for node interface
* muxdb: refactor due to trie updates and:
* remove leafbank stuff
* simplify muxdb.Trie implementation
* improve root node cache using ttl eviction
* add leaf key filter
* chain: a lot of changes
* improve block content storage scheme
* remove steady block tracking
* remove tx & receipt cache
* state: changes due to update of trie
* lowrlp: remove this pkg
* txpool: changes due to underlying pkg update
* genesis: changes due to underlying pkg update
* consensus: changes due to underlying pkg update
* builtin: changes due to underlying pkg update
* runtime: changes due to underlying pkg update
* api: changes due to underlying pkg update
* cmd/thor/pruner: rename pkg optimizer to pruner
* cmd/thor: changes due to underlying pkg update
* muxdb: abandon leaf filter
* cmd/thor/pruner: use smaller period when nearly synced
* muxdb: improve trie node path encoding
* trie: treat short nodes as standalone nodes when skipping hash
* cmd/thor: fix disablePrunerFlag not work
* trie: improve refNode encoding/decoding
* muxdb: improve history node key encoding
* cmd/thor: adjust pruner parameters
* build: fix test cases
* lint: fix lint error
* muxdb: fix ver encoding in node blob cache
* muxdb: add test cases for cache
* runtime: fix test compile error
* make build and test pass after rebase
* add back pruner tests
* add tests for node encoding
* minor typo
* update named store space prefix
* add more tests
* fix block summary in repo
* make build and test pass after rebase
* add back pruner tests
* remove SetBestBlockID from tests
* minor improvement
* pr comments
* adding a comment
* Metrics: Cache hit/miss (#886)
* change
* reverted to previous format
* Add dummy cache for inmem ops (#883)
* Add empty cache for inmem ops
* changing name to dummyCache
* Metrics: Reuse `shouldLog` so we get cache hit/miss data at the same pace (#888)
* first commit
* first commit
* Metrics: Disk IO reads and writes (#890)
* changes
* removed log
* sleeping any way
* pr review
* Pedro/maindb v4/benchmarks (#891)
* Adding Benchmark tests
* processing txs
* Working benchmarks
* lint
* adding tempdir
* improve cache stats log and metric
* totally removed SetBestBlockID
* Maindb v4 Transaction benchmark plus cache (#894)
* Thor client (#818)
* feat: add thorclient
* refactor: remove roundTripper
* refactor: change null check
* clean: remove commented code
* feat: add account revision and pending tx
* fix: add licence headers and fix linter issue
* refactor: rename package
* refactor: change revision type to string
* refactor: rename GetLogs and GetTransfers to FilterEvents and FilterTransfers
* refactor: change FilterEvents and FilterTransactions request type to EventFilter
* Adding common.EventWrapper to handle channel errors
* tweak
* update rawclient + update account tests
* tidy up names
* update tests
* pr comments
* adding raw tx
* Tidy up method names and calls
* options client
* tweaks
* pr comments
* Update thorclient/common/common.go
Co-authored-by: libotony
* pr comments
* Adding Subscriptions
* Pr comments
* adjust func orders
* pr comments
* changing subscribe to use the channel close vs multiple channels
* adding go-doc
* no error after unsubscribe
* pr comments
* checking status code is 2xx
* fix: change FilterTransfers argument
---------
Co-authored-by: otherview
Co-authored-by: libotony
* Add testchain package (#844)
* Refactor thor node
* thorchain allows insertion of blocks
* remove thorNode, added testchain
* clean up + comments
* adding license headers
* adding templating tests for thorclient
* Remove test event hacks
* remove types
* removed chain_builder + added logdb to testchain
* pr comments
* Update test/testchain/chain.go
Co-authored-by: libotony
---------
Co-authored-by: libotony
* cmd/thor: update instance dir to v4
* trie: implement varint-prefix coder
* deps: add github.com/qianbin/drlp
* trie: implement appendHexToCompact & compactLen
* trie: temporarily remove merkle proof stuff
* trie: many changes
* disk usage reduced by 33% (force embedding shortnode)
* new encoding method for storing nodes
* optimize trie hashing
* versioning standalone nodes
* remove extended trie
* improve trie interface
* simplify NodeIterator, remove unused codes
* trie: optimize full-node encoding/decoding
* trie: tweak shortnode encoding
* muxdb: move engine pkg
* trie: add Version() method for node interface
* muxdb: refactor due to trie updates and:
* remove leafbank stuff
* simplify muxdb.Trie implementation
* improve root node cache using ttl eviction
* add leaf key filter
* chain: a lot of changes
* improve block content storage scheme
* remove steady block tracking
* remove tx & receipt cache
* state: changes due to update of trie
* lowrlp: remove this pkg
* txpool: changes due to underlying pkg update
* genesis: changes due to underlying pkg update
* consensus: changes due to underlying pkg update
* builtin: changes due to underlying pkg update
* runtime: changes due to underlying pkg update
* api: changes due to underlying pkg update
* cmd/thor/pruner: rename pkg optimizer to pruner
* cmd/thor: changes due to underlying pkg update
* muxdb: abandon leaf filter
* cmd/thor/pruner: use smaller period when nearly synced
* muxdb: improve trie node path encoding
* trie: treat short nodes as standalone nodes when skipping hash
* cmd/thor: fix disablePrunerFlag not work
* trie: improve refNode encoding/decoding
* muxdb: improve history node key encoding
* cmd/thor: adjust pruner parameters
* build: fix test cases
* lint: fix lint error
* muxdb: fix ver encoding in node blob cache
* muxdb: add test cases for cache
* runtime: fix test compile error
* make build and test pass after rebase
* add back pruner tests
* add tests for node encoding
* minor typo
* update named store space prefix
* add more tests
* fix block summary in repo
* make build and test pass after rebase
* add back pruner tests
* remove SetBestBlockID from tests
* minor improvement
* pr comments
* adding a comment
* Metrics: Cache hit/miss (#886)
* change
* reverted to previous format
* Add dummy cache for inmem ops (#883)
* Add empty cache for inmem ops
* changing name to dummyCache
* Metrics: Reuse `shouldLog` so we get cache hit/miss data at the same pace (#888)
* first commit
* first commit
* Metrics: Disk IO reads and writes (#890)
* changes
* removed log
* sleeping any way
* pr review
* Pedro/maindb v4/benchmarks (#891)
* Adding Benchmark tests
* processing txs
* Working benchmarks
* lint
* adding tempdir
* Adding transactions benchmark + repository cache
* improve cache stats log and metric
* totally removed SetBestBlockID
* removed unused tests
* update bench tests
* getreceipts metrics + lint
---------
Co-authored-by: Paolo Galli
Co-authored-by: libotony
Co-authored-by: qianbin
Co-authored-by: Darren Kelly
Co-authored-by: Miguel Angel Rojo
* reduce clauses() allocations
* bug: fix logs endpoints query (#900)
* Thor client (#818)
* feat: add thorclient
* refactor: remove roundTripper
* refactor: change null check
* clean: remove commented code
* feat: add account revision and pending tx
* fix: add licence headers and fix linter issue
* refactor: rename package
* refactor: change revision type to string
* refactor: rename GetLogs and GetTransfers to FilterEvents and FilterTransfers
* refactor: change FilterEvents and FilterTransactions request type to EventFilter
* Adding common.EventWrapper to handle channel errors
* tweak
* update rawclient + update account tests
* tidy up names
* update tests
* pr comments
* adding raw tx
* Tidy up method names and calls
* options client
* tweaks
* pr comments
* Update thorclient/common/common.go
Co-authored-by: libotony
* pr comments
* Adding Subscriptions
* Pr comments
* adjust func orders
* pr comments
* changing subscribe to use the channel close vs multiple channels
* adding go-doc
* no error after unsubscribe
* pr comments
* checking status code is 2xx
* fix: change FilterTransfers argument
---------
Co-authored-by: otherview
Co-authored-by: libotony
* Add testchain package (#844)
* Refactor thor node
* thorchain allows insertion of blocks
* remove thorNode, added testchain
* clean up + comments
* adding license headers
* adding templating tests for thorclient
* Remove test event hacks
* remove types
* removed chain_builder + added logdb to testchain
* pr comments
* Update test/testchain/chain.go
Co-authored-by: libotony
---------
Co-authored-by: libotony
* cmd/thor: update instance dir to v4
* trie: implement varint-prefix coder
* deps: add github.com/qianbin/drlp
* trie: implement appendHexToCompact & compactLen
* trie: temporarily remove merkle proof stuff
* trie: many changes
* disk usage reduced by 33% (force embedding shortnode)
* new encoding method for storing nodes
* optimize trie hashing
* versioning standalone nodes
* remove extended trie
* improve trie interface
* simplify NodeIterator, remove unused codes
* trie: optimize full-node encoding/decoding
* trie: tweak shortnode encoding
* muxdb: move engine pkg
* trie: add Version() method for node interface
* muxdb: refactor due to trie updates and:
* remove leafbank stuff
* simplify muxdb.Trie implementation
* improve root node cache using ttl eviction
* add leaf key filter
* chain: a lot of changes
* improve block content storage scheme
* remove steady block tracking
* remove tx & receipt cache
* state: changes due to update of trie
* lowrlp: remove this pkg
* txpool: changes due to underlying pkg update
* genesis: changes due to underlying pkg update
* consensus: changes due to underlying pkg update
* builtin: changes due to underlying pkg update
* runtime: changes due to underlying pkg update
* api: changes due to underlying pkg update
* cmd/thor/pruner: rename pkg optimizer to pruner
* cmd/thor: changes due to underlying pkg update
* muxdb: abandon leaf filter
* cmd/thor/pruner: use smaller period when nearly synced
* muxdb: improve trie node path encoding
* trie: treat short nodes as standalone nodes when skipping hash
* cmd/thor: fix disablePrunerFlag not work
* trie: improve refNode encoding/decoding
* muxdb: improve history node key encoding
* cmd/thor: adjust pruner parameters
* build: fix test cases
* lint: fix lint error
* muxdb: fix ver encoding in node blob cache
* muxdb: add test cases for cache
* runtime: fix test compile error
* make build and test pass after rebase
* add back pruner tests
* add tests for node encoding
* minor typo
* update named store space prefix
* add more tests
* fix block summary in repo
* make build and test pass after rebase
* add back pruner tests
* remove SetBestBlockID from tests
* minor improvement
* pr comments
* adding a comment
* Metrics: Cache hit/miss (#886)
* change
* reverted to previous format
* Add dummy cache for inmem ops (#883)
* Add empty cache for inmem ops
* changing name to dummyCache
* Metrics: Reuse `shouldLog` so we get cache hit/miss data at the same pace (#888)
* first commit
* first commit
* Metrics: Disk IO reads and writes (#890)
* changes
* removed log
* sleeping any way
* pr review
* Pedro/maindb v4/benchmarks (#891)
* Adding Benchmark tests
* processing txs
* Working benchmarks
* lint
* adding tempdir
* improve cache stats log and metric
* totally removed SetBestBlockID
* fix: logs API not returning results when to=0,from=omitted
* minor update
* fix: PR comments + lint
* improve convert range logic
* chore: remove debug log
---------
Co-authored-by: Paolo Galli
Co-authored-by: otherview
Co-authored-by: libotony
Co-authored-by: qianbin
Co-authored-by: Miguel Angel Rojo
* chore(chain): add repo cache metrics (#910)
* chore(chain): add repo cache metrics
* refactor(chain): cache hit miss
---------
Co-authored-by: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Co-authored-by: Pedro Gomes
Co-authored-by: Paolo Galli
Co-authored-by: qianbin
Co-authored-by: Miguel Angel Rojo
Co-authored-by: Darren Kelly
---
.github/workflows/lint-go.yaml | 7 +
README.md | 14 +-
api/accounts/accounts.go | 12 +-
api/blocks/blocks.go | 2 +-
api/debug/debug.go | 6 +-
api/debug/debug_test.go | 7 +-
api/doc/README.md | 4 +-
api/events/events.go | 2 +-
api/events/events_test.go | 105 ++--
api/events/types.go | 58 +-
api/events/types_test.go | 42 +-
api/metrics.go | 28 +-
api/metrics_test.go | 24 +-
api/node/node.go | 2 +-
api/subscriptions/pending_tx_test.go | 5 +-
api/transactions/transactions.go | 22 +-
.../transactions_benchmark_test.go | 530 +++++++++++++++++
api/transactions/types.go | 8 +-
api/transfers/transfers.go | 2 +-
api/utils/revisions.go | 5 +-
bft/engine.go | 2 +-
bft/engine_test.go | 26 +-
builtin/authority/authority_test.go | 4 +-
builtin/energy/energy_test.go | 22 +-
builtin/executor_test.go | 3 +-
builtin/gen/bindata.go | 27 +-
builtin/gen/gen.go | 5 +-
builtin/native_calls_test.go | 37 +-
builtin/params/params_test.go | 4 +-
builtin/prototype/prototype_test.go | 4 +-
builtin/prototype_native.go | 4 +-
chain/block_reader_test.go | 33 +-
chain/chain.go | 127 ++--
chain/chain_test.go | 56 +-
chain/metrics.go | 12 +
chain/persist.go | 70 +--
chain/repository.go | 274 ++++-----
chain/repository_test.go | 98 ++-
cmd/thor/main.go | 14 +-
cmd/thor/node/node.go | 21 +-
cmd/thor/node/node_benchmark_test.go | 541 +++++++++++++++++
cmd/thor/node/packer_loop.go | 23 +-
cmd/thor/optimizer/optimizer.go | 292 ---------
cmd/thor/pruner/pruner.go | 241 ++++++++
.../pruner_test.go} | 159 ++---
cmd/thor/{optimizer => pruner}/status.go | 5 +-
cmd/thor/solo/solo.go | 14 +-
cmd/thor/solo/solo_test.go | 2 +-
cmd/thor/utils.go | 8 +-
consensus/consensus.go | 4 +-
consensus/consensus_test.go | 6 +-
consensus/validator.go | 3 +-
docs/CONTRIBUTING.md | 2 +-
docs/hosting-a-node.md | 2 +-
docs/usage.md | 4 +-
genesis/builder.go | 9 +-
genesis/genesis_test.go | 9 +-
go.mod | 15 +-
go.sum | 32 +-
logdb/sequence.go | 2 +
lowrlp/encoder.go | 236 --------
metrics/noop.go | 7 +-
metrics/prometheus.go | 61 ++
metrics/telemetry.go | 18 +-
muxdb/backend.go | 94 +++
muxdb/cache.go | 230 ++++++++
muxdb/cache_test.go | 95 +++
muxdb/{internal => }/engine/engine.go | 0
muxdb/{internal => }/engine/leveldb.go | 0
muxdb/internal/trie/cache.go | 213 -------
muxdb/internal/trie/leaf_bank.go | 253 --------
muxdb/internal/trie/leaf_bank_test.go | 78 ---
muxdb/internal/trie/trie.go | 456 --------------
muxdb/internal/trie/trie_test.go | 122 ----
muxdb/internal/trie/util.go | 85 ---
muxdb/internal/trie/util_test.go | 32 -
muxdb/metrics.go | 7 +
muxdb/muxdb.go | 81 +--
muxdb/muxdb_test.go | 144 +++++
muxdb/trie.go | 227 +++++++
muxdb/trie_test.go | 78 +++
packer/flow.go | 3 +-
packer/packer.go | 4 +-
packer/packer_test.go | 10 +-
poa/candidates_test.go | 4 +-
poa/seed_test.go | 15 +-
runtime/native_return_gas_test.go | 4 +-
runtime/resolved_tx_test.go | 2 +-
runtime/runtime_test.go | 17 +-
runtime/statedb/statedb_test.go | 6 +-
state/account.go | 29 +-
state/account_test.go | 31 +-
state/cached_object.go | 16 +-
state/cached_object_test.go | 9 +-
state/stage.go | 12 +-
state/stage_test.go | 14 +-
state/state.go | 132 +++--
state/state_test.go | 29 +-
state/stater.go | 6 +-
state/stater_test.go | 13 +-
test/datagen/bytes.go | 14 +
test/datagen/numbers.go | 4 +
test/testchain/chain.go | 47 +-
tracers/tracers_test.go | 5 +-
trie/derive_root.go | 17 +-
trie/derive_root_test.go | 39 +-
trie/encoding.go | 29 +
trie/encoding_test.go | 14 +
trie/errors.go | 10 +-
trie/extended.go | 201 -------
trie/fast_node_encoder.go | 71 ---
trie/hasher.go | 294 ++++-----
trie/iterator.go | 439 ++------------
trie/iterator_test.go | 243 ++------
trie/node.go | 557 +++++++++++-------
trie/node_test.go | 261 ++++++--
trie/proof.go | 145 -----
trie/proof_test.go | 159 -----
trie/trie.go | 382 ++++++------
trie/trie_test.go | 356 +++++------
trie/vp.go | 52 ++
trie/vp_test.go | 46 ++
txpool/tx_object_map_test.go | 14 +-
txpool/tx_object_test.go | 14 +-
txpool/tx_pool.go | 4 +-
txpool/tx_pool_test.go | 28 +-
126 files changed, 4524 insertions(+), 4895 deletions(-)
create mode 100644 api/transactions/transactions_benchmark_test.go
create mode 100644 chain/metrics.go
create mode 100644 cmd/thor/node/node_benchmark_test.go
delete mode 100644 cmd/thor/optimizer/optimizer.go
create mode 100644 cmd/thor/pruner/pruner.go
rename cmd/thor/{optimizer/optimizer_test.go => pruner/pruner_test.go} (61%)
rename cmd/thor/{optimizer => pruner}/status.go (92%)
delete mode 100644 lowrlp/encoder.go
create mode 100644 muxdb/backend.go
create mode 100644 muxdb/cache.go
create mode 100644 muxdb/cache_test.go
rename muxdb/{internal => }/engine/engine.go (100%)
rename muxdb/{internal => }/engine/leveldb.go (100%)
delete mode 100644 muxdb/internal/trie/cache.go
delete mode 100644 muxdb/internal/trie/leaf_bank.go
delete mode 100644 muxdb/internal/trie/leaf_bank_test.go
delete mode 100644 muxdb/internal/trie/trie.go
delete mode 100644 muxdb/internal/trie/trie_test.go
delete mode 100644 muxdb/internal/trie/util.go
delete mode 100644 muxdb/internal/trie/util_test.go
create mode 100644 muxdb/metrics.go
create mode 100644 muxdb/muxdb_test.go
create mode 100644 muxdb/trie.go
create mode 100644 muxdb/trie_test.go
create mode 100644 test/datagen/bytes.go
delete mode 100644 trie/extended.go
delete mode 100644 trie/fast_node_encoder.go
delete mode 100644 trie/proof.go
delete mode 100644 trie/proof_test.go
create mode 100644 trie/vp.go
create mode 100644 trie/vp_test.go
diff --git a/.github/workflows/lint-go.yaml b/.github/workflows/lint-go.yaml
index ea4d2195a..0f49ceb39 100644
--- a/.github/workflows/lint-go.yaml
+++ b/.github/workflows/lint-go.yaml
@@ -17,6 +17,13 @@ jobs:
with:
go-version: '1.22'
cache: false
+
+ - name: Check `builtins` directory
+ # if it has any changes in the 'builtins' dir after running `go generate`, echo an error and fail the workflow
+ run: |
+ go generate ./builtin/gen
+ git diff --exit-code builtin/gen || (echo "\n\n\nbuiltin/gen directory is not up to date, run 'go generate ./...' to update it" && exit 1)
+
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
diff --git a/README.md b/README.md
index 50ee4e85c..c41d0923a 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,8 @@
-
-
+
+
@@ -44,9 +44,9 @@ ___
## Documentation
-- [Build](./docs/build.md) - How to build the `thor` binary.
-- [Usage](./docs/usage.md) - How to run thor with different configurations.
-- [Hosting a Node](./docs/hosting-a-node.md) - Considerations and requirements for hosting a node.
+- [Build](https://github.com/vechain/thor/blob/master/docs/build.md) - How to build the `thor` binary.
+- [Usage](https://github.com/vechain/thor/blob/master/docs/usage.md) - How to run thor with different configurations.
+- [Hosting a Node](https://github.com/vechain/thor/blob/master/docs/hosting-a-node.md) - Considerations and requirements for hosting a node.
- [Core Concepts](https://docs.vechain.org/core-concepts) - Core concepts of the VeChainThor blockchain.
- [API Reference](https://mainnet.vechain.org) - The API reference for the VeChainThor blockchain.
@@ -67,7 +67,7 @@ To chat with other community members you can join:
-Do note that our [Code of Conduct](./docs/CODE_OF_CONDUCT.md) applies to all VeChain community channels. Users are
+Do note that our [Code of Conduct](https://github.com/vechain/thor/blob/master/docs/CODE_OF_CONDUCT.md) applies to all VeChain community channels. Users are
**highly encouraged** to read and adhere to them to avoid repercussions.
---
@@ -75,7 +75,7 @@ Do note that our [Code of Conduct](./docs/CODE_OF_CONDUCT.md) applies to all VeC
## Contributing
Contributions to VeChainThor are welcome and highly appreciated. However, before you jump right into it, we would like
-you to review our [Contribution Guidelines](./docs/CONTRIBUTING.md) to make sure you have a smooth experience
+you to review our [Contribution Guidelines](https://github.com/vechain/thor/blob/master/docs/CONTRIBUTING.md) to make sure you have a smooth experience
contributing to VeChainThor.
---
diff --git a/api/accounts/accounts.go b/api/accounts/accounts.go
index 4bd940709..54058a160 100644
--- a/api/accounts/accounts.go
+++ b/api/accounts/accounts.go
@@ -358,27 +358,27 @@ func (a *Accounts) Mount(root *mux.Router, pathPrefix string) {
sub.Path("/*").
Methods(http.MethodPost).
- Name("accounts_call_batch_code").
+ Name("POST /accounts/*").
HandlerFunc(utils.WrapHandlerFunc(a.handleCallBatchCode))
sub.Path("/{address}").
Methods(http.MethodGet).
- Name("accounts_get_account").
+ Name("GET /accounts/{address}").
HandlerFunc(utils.WrapHandlerFunc(a.handleGetAccount))
sub.Path("/{address}/code").
Methods(http.MethodGet).
- Name("accounts_get_code").
+ Name("GET /accounts/{address}/code").
HandlerFunc(utils.WrapHandlerFunc(a.handleGetCode))
sub.Path("/{address}/storage/{key}").
Methods("GET").
- Name("accounts_get_storage").
+ Name("GET /accounts/{address}/storage").
HandlerFunc(utils.WrapHandlerFunc(a.handleGetStorage))
// These two methods are currently deprecated
sub.Path("").
Methods(http.MethodPost).
- Name("accounts_call_contract").
+ Name("POST /accounts").
HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract))
sub.Path("/{address}").
Methods(http.MethodPost).
- Name("accounts_call_contract_address").
+ Name("POST /accounts/{address}").
HandlerFunc(utils.WrapHandlerFunc(a.handleCallContract))
}
diff --git a/api/blocks/blocks.go b/api/blocks/blocks.go
index bddb3ac12..ff86e02e6 100644
--- a/api/blocks/blocks.go
+++ b/api/blocks/blocks.go
@@ -95,6 +95,6 @@ func (b *Blocks) Mount(root *mux.Router, pathPrefix string) {
sub := root.PathPrefix(pathPrefix).Subrouter()
sub.Path("/{revision}").
Methods(http.MethodGet).
- Name("blocks_get_block").
+ Name("GET /blocks/{revision}").
HandlerFunc(utils.WrapHandlerFunc(b.handleGetBlock))
}
diff --git a/api/debug/debug.go b/api/debug/debug.go
index e84a88d57..5ff54f1dc 100644
--- a/api/debug/debug.go
+++ b/api/debug/debug.go
@@ -466,14 +466,14 @@ func (d *Debug) Mount(root *mux.Router, pathPrefix string) {
sub.Path("/tracers").
Methods(http.MethodPost).
- Name("debug_trace_clause").
+ Name("POST /debug/tracers").
HandlerFunc(utils.WrapHandlerFunc(d.handleTraceClause))
sub.Path("/tracers/call").
Methods(http.MethodPost).
- Name("debug_trace_call").
+ Name("POST /debug/tracers/call").
HandlerFunc(utils.WrapHandlerFunc(d.handleTraceCall))
sub.Path("/storage-range").
Methods(http.MethodPost).
- Name("debug_trace_storage").
+ Name("POST /debug/storage-range").
HandlerFunc(utils.WrapHandlerFunc(d.handleDebugStorage))
}
diff --git a/api/debug/debug_test.go b/api/debug/debug_test.go
index 1275a9030..0b0b2d3b3 100644
--- a/api/debug/debug_test.go
+++ b/api/debug/debug_test.go
@@ -29,6 +29,7 @@ import (
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/thorclient"
"github.com/vechain/thor/v2/tracers/logger"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
// Force-load the tracer native engines to trigger registration
@@ -94,8 +95,7 @@ func TestDebug(t *testing.T) {
}
func TestStorageRangeFunc(t *testing.T) {
- db := muxdb.NewMem()
- state := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ state := state.New(muxdb.NewMem(), trie.Root{})
// Create an account and set storage values
addr := thor.BytesToAddress([]byte("account1"))
@@ -124,8 +124,7 @@ func TestStorageRangeFunc(t *testing.T) {
}
func TestStorageRangeMaxResult(t *testing.T) {
- db := muxdb.NewMem()
- state := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ state := state.New(muxdb.NewMem(), trie.Root{})
addr := thor.BytesToAddress([]byte("account1"))
for i := 0; i < 1001; i++ {
diff --git a/api/doc/README.md b/api/doc/README.md
index 2f2c0ee62..644641053 100644
--- a/api/doc/README.md
+++ b/api/doc/README.md
@@ -1,7 +1,7 @@
## Swagger
swagger-ui from https://github.com/swagger-api/swagger-ui @v5.11.2
-- Created [window-observer.js](swagger-ui/window-observer.js) to remove `Try it out` functionality for subscription endpoints
+- Created [window-observer.js](./swagger-ui/window-observer.js) to remove `Try it out` functionality for subscription endpoints
```bash
curl https://unpkg.com/swagger-ui-dist@5.11.2/swagger-ui.css > swagger-ui/swagger-ui.css
@@ -11,7 +11,7 @@ curl https://unpkg.com/swagger-ui-dist@5.11.2/swagger-ui-standalone-preset.js >
## Stoplight
Spotlight UI from https://github.com/stoplightio/elements @v8.0.3
-- Created [window-observer.js](stoplight-ui/window-observer.js) to remove `Send API Request` functionality for subscription endpoints
+- Created [window-observer.js](./stoplight-ui/window-observer.js) to remove `Send API Request` functionality for subscription endpoints
```bash
curl https://unpkg.com/@stoplight/elements@8.0.3/styles.min.css > stoplight-ui/styles.min.css
diff --git a/api/events/events.go b/api/events/events.go
index b4c93fadc..d203212db 100644
--- a/api/events/events.go
+++ b/api/events/events.go
@@ -85,6 +85,6 @@ func (e *Events) Mount(root *mux.Router, pathPrefix string) {
sub.Path("").
Methods(http.MethodPost).
- Name("logs_filter_event").
+ Name("POST /logs/event").
HandlerFunc(utils.WrapHandlerFunc(e.handleFilter))
}
diff --git a/api/events/events_test.go b/api/events/events_test.go
index 89aafd36f..1054266fe 100644
--- a/api/events/events_test.go
+++ b/api/events/events_test.go
@@ -7,6 +7,7 @@ package events_test
import (
"encoding/json"
+ "math/big"
"net/http"
"net/http/httptest"
"strings"
@@ -16,8 +17,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vechain/thor/v2/api/events"
- "github.com/vechain/thor/v2/block"
+ "github.com/vechain/thor/v2/builtin"
+ "github.com/vechain/thor/v2/genesis"
"github.com/vechain/thor/v2/logdb"
+ "github.com/vechain/thor/v2/test/datagen"
"github.com/vechain/thor/v2/test/testchain"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/thorclient"
@@ -28,8 +31,6 @@ const defaultLogLimit uint64 = 1000
var (
ts *httptest.Server
- addr = thor.BytesToAddress([]byte("address"))
- topic = thor.BytesToBytes32([]byte("topic"))
tclient *thorclient.Client
)
@@ -52,14 +53,14 @@ func TestEvents(t *testing.T) {
blocksToInsert := 5
tclient = thorclient.New(ts.URL)
- insertBlocks(t, thorChain.LogDB(), blocksToInsert)
+ insertBlocks(t, thorChain, blocksToInsert)
testEventWithBlocks(t, blocksToInsert)
}
func TestOptionalIndexes(t *testing.T) {
thorChain := initEventServer(t, defaultLogLimit)
defer ts.Close()
- insertBlocks(t, thorChain.LogDB(), 5)
+ insertBlocks(t, thorChain, 5)
tclient = thorclient.New(ts.URL)
testCases := []struct {
@@ -109,7 +110,7 @@ func TestOptionalIndexes(t *testing.T) {
func TestOption(t *testing.T) {
thorChain := initEventServer(t, 5)
defer ts.Close()
- insertBlocks(t, thorChain.LogDB(), 5)
+ insertBlocks(t, thorChain, 5)
tclient = thorclient.New(ts.URL)
filter := events.EventFilter{
@@ -143,13 +144,47 @@ func TestOption(t *testing.T) {
assert.Equal(t, 5, len(tLogs))
// when the filtered events exceed the limit, should return the forbidden
- insertBlocks(t, thorChain.LogDB(), 6)
+ insertBlocks(t, thorChain, 6)
res, statusCode, err = tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter)
require.NoError(t, err)
assert.Equal(t, http.StatusForbidden, statusCode)
assert.Equal(t, "the number of filtered logs exceeds the maximum allowed value of 5, please use pagination", strings.Trim(string(res), "\n"))
}
+func TestZeroFrom(t *testing.T) {
+ thorChain := initEventServer(t, 5)
+ defer ts.Close()
+ insertBlocks(t, thorChain, 5)
+
+ tclient = thorclient.New(ts.URL)
+ transferTopic := thor.MustParseBytes32("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
+ criteria := []*events.EventCriteria{
+ {
+ TopicSet: events.TopicSet{
+ Topic0: &transferTopic,
+ },
+ },
+ }
+
+ from := uint64(0)
+ filter := events.EventFilter{
+ CriteriaSet: criteria,
+ Range: &events.Range{From: &from},
+ Options: nil,
+ Order: logdb.DESC,
+ }
+
+ res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter)
+ require.NoError(t, err)
+ var tLogs []*events.FilteredEvent
+ if err := json.Unmarshal(res, &tLogs); err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, http.StatusOK, statusCode)
+ assert.NotEmpty(t, tLogs)
+}
+
// Test functions
func testEventsBadRequest(t *testing.T) {
badBody := []byte{0x00, 0x01, 0x02}
@@ -199,16 +234,14 @@ func testEventWithBlocks(t *testing.T, expectedBlocks int) {
assert.NotEmpty(t, tLog)
}
+ transferEvent := thor.MustParseBytes32("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
+
// Test with matching filter
matchingFilter := events.EventFilter{
CriteriaSet: []*events.EventCriteria{{
- Address: &addr,
+ Address: &builtin.Energy.Address,
TopicSet: events.TopicSet{
- &topic,
- &topic,
- &topic,
- &topic,
- &topic,
+ Topic0: &transferEvent,
},
}},
}
@@ -239,41 +272,17 @@ func initEventServer(t *testing.T, limit uint64) *testchain.Chain {
}
// Utilities functions
-func insertBlocks(t *testing.T, db *logdb.LogDB, n int) {
- b := new(block.Builder).Build()
- for i := 0; i < n; i++ {
- b = new(block.Builder).
- ParentID(b.Header().ID()).
- Build()
- receipts := tx.Receipts{newReceipt()}
-
- w := db.NewWriter()
- if err := w.Write(b, receipts); err != nil {
- t.Fatal(err)
- }
-
- if err := w.Commit(); err != nil {
- t.Fatal(err)
- }
- }
-}
+func insertBlocks(t *testing.T, chain *testchain.Chain, n int) {
+ transferABI, ok := builtin.Energy.ABI.MethodByName("transfer")
+ require.True(t, ok)
-func newReceipt() *tx.Receipt {
- return &tx.Receipt{
- Outputs: []*tx.Output{
- {
- Events: tx.Events{{
- Address: addr,
- Topics: []thor.Bytes32{
- topic,
- topic,
- topic,
- topic,
- topic,
- },
- Data: []byte("0x0"),
- }},
- },
- },
+ encoded, err := transferABI.EncodeInput(genesis.DevAccounts()[2].Address, new(big.Int).SetUint64(datagen.RandUint64()))
+ require.NoError(t, err)
+
+ transferClause := tx.NewClause(&builtin.Energy.Address).WithData(encoded)
+
+ for i := 0; i < n; i++ {
+ err := chain.MintClauses(genesis.DevAccounts()[0], []*tx.Clause{transferClause})
+ require.NoError(t, err)
}
}
diff --git a/api/events/types.go b/api/events/types.go
index 6cecfe9a8..bfb032095 100644
--- a/api/events/types.go
+++ b/api/events/types.go
@@ -6,8 +6,6 @@
package events
import (
- "math"
-
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/vechain/thor/v2/block"
"github.com/vechain/thor/v2/chain"
@@ -129,43 +127,50 @@ const (
type Range struct {
Unit RangeType
- From uint64
- To uint64
+ From *uint64 `json:"from,omitempty"`
+ To *uint64 `json:"to,omitempty"`
+}
+
+var emptyRange = logdb.Range{
+ From: logdb.MaxBlockNumber,
+ To: logdb.MaxBlockNumber,
}
func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
if r == nil {
return nil, nil
}
- if r.Unit == TimeRangeType {
- emptyRange := logdb.Range{
- From: math.MaxUint32,
- To: math.MaxUint32,
- }
+ if r.Unit == TimeRangeType {
genesis, err := chain.GetBlockHeader(0)
if err != nil {
return nil, err
}
- if r.To < genesis.Timestamp() {
+ if r.To != nil && *r.To < genesis.Timestamp() {
return &emptyRange, nil
}
head, err := chain.GetBlockHeader(block.Number(chain.HeadID()))
if err != nil {
return nil, err
}
- if r.From > head.Timestamp() {
+ if r.From != nil && *r.From > head.Timestamp() {
return &emptyRange, nil
}
- fromHeader, err := chain.FindBlockHeaderByTimestamp(r.From, 1)
- if err != nil {
- return nil, err
+ fromHeader := genesis
+ if r.From != nil {
+ fromHeader, err = chain.FindBlockHeaderByTimestamp(*r.From, 1)
+ if err != nil {
+ return nil, err
+ }
}
- toHeader, err := chain.FindBlockHeaderByTimestamp(r.To, -1)
- if err != nil {
- return nil, err
+ toHeader := head
+ if r.To != nil {
+ toHeader, err = chain.FindBlockHeaderByTimestamp(*r.From, -1)
+ if err != nil {
+ return nil, err
+ }
}
return &logdb.Range{
@@ -174,16 +179,19 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
}, nil
}
- // Units are block numbers - numbers will have a max ceiling at chain head block number
- headNum := block.Number(chain.HeadID())
- from := uint32(r.From)
- to := uint32(r.To)
+ // Units are block numbers - numbers will have a max ceiling at logdb.MaxBlockNumbe
+ if r.From != nil && *r.From > logdb.MaxBlockNumber {
+ return &emptyRange, nil
+ }
- if from > headNum {
- from = headNum
+ from := uint32(0)
+ if r.From != nil {
+ from = uint32(*r.From)
}
- if to > headNum {
- to = headNum
+
+ to := uint32(logdb.MaxBlockNumber)
+ if r.To != nil && *r.To < logdb.MaxBlockNumber {
+ to = uint32(*r.To)
}
return &logdb.Range{
diff --git a/api/events/types_test.go b/api/events/types_test.go
index e223bb158..7b911b453 100644
--- a/api/events/types_test.go
+++ b/api/events/types_test.go
@@ -19,6 +19,14 @@ import (
"github.com/vechain/thor/v2/tx"
)
+func newRange(unit RangeType, from uint64, to uint64) *Range {
+ return &Range{
+ Unit: unit,
+ From: &from,
+ To: &to,
+ }
+}
+
func TestEventsTypes(t *testing.T) {
c := initChain(t)
for name, tt := range map[string]func(*testing.T, *testchain.Chain){
@@ -34,24 +42,16 @@ func TestEventsTypes(t *testing.T) {
}
func testConvertRangeWithBlockRangeType(t *testing.T, chain *testchain.Chain) {
- rng := &Range{
- Unit: BlockRangeType,
- From: 1,
- To: 2,
- }
+ rng := newRange(BlockRangeType, 1, 2)
convertedRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
assert.NoError(t, err)
- assert.Equal(t, uint32(rng.From), convertedRng.From)
- assert.Equal(t, uint32(rng.To), convertedRng.To)
+ assert.Equal(t, uint32(*rng.From), convertedRng.From)
+ assert.Equal(t, uint32(*rng.To), convertedRng.To)
// ensure wild block numbers have a max ceiling of chain.head
- rng = &Range{
- Unit: BlockRangeType,
- From: 100,
- To: 200,
- }
+ rng = newRange(BlockRangeType, 100, 2200)
convertedRng, err = ConvertRange(chain.Repo().NewBestChain(), rng)
require.NoError(t, err)
@@ -65,11 +65,7 @@ func testConvertRangeWithBlockRangeType(t *testing.T, chain *testchain.Chain) {
}
func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *testchain.Chain) {
- rng := &Range{
- Unit: TimeRangeType,
- From: 1,
- To: 2,
- }
+ rng := newRange(TimeRangeType, 100, 2200)
expectedEmptyRange := &logdb.Range{
From: math.MaxUint32,
To: math.MaxUint32,
@@ -84,11 +80,7 @@ func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *testc
func testConvertRangeWithTimeRangeType(t *testing.T, chain *testchain.Chain) {
genesis := chain.GenesisBlock().Header()
- rng := &Range{
- Unit: TimeRangeType,
- From: 1,
- To: genesis.Timestamp(),
- }
+ rng := newRange(TimeRangeType, 1, genesis.Timestamp())
expectedZeroRange := &logdb.Range{
From: 0,
To: 0,
@@ -103,11 +95,7 @@ func testConvertRangeWithTimeRangeType(t *testing.T, chain *testchain.Chain) {
func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *testchain.Chain) {
genesis := chain.GenesisBlock().Header()
- rng := &Range{
- Unit: TimeRangeType,
- From: genesis.Timestamp() + 1_000,
- To: genesis.Timestamp() + 10_000,
- }
+ rng := newRange(TimeRangeType, genesis.Timestamp()+1_000, genesis.Timestamp()+10_000)
expectedEmptyRange := &logdb.Range{
From: math.MaxUint32,
To: math.MaxUint32,
diff --git a/api/metrics.go b/api/metrics.go
index 9fd5c3d94..57631be71 100644
--- a/api/metrics.go
+++ b/api/metrics.go
@@ -19,9 +19,15 @@ import (
)
var (
+ websocketDurations = []int64{
+ 0, 1, 2, 5, 10, 25, 50, 100, 250, 500, 1_000, 2_500, 5_000, 10_000, 25_000,
+ 50_000, 100_000, 250_000, 500_000, 1000_000, 2_500_000, 5_000_000, 10_000_000,
+ }
metricHTTPReqCounter = metrics.LazyLoadCounterVec("api_request_count", []string{"name", "code", "method"})
metricHTTPReqDuration = metrics.LazyLoadHistogramVec("api_duration_ms", []string{"name", "code", "method"}, metrics.BucketHTTPReqs)
- metricActiveWebsocketCount = metrics.LazyLoadGaugeVec("api_active_websocket_count", []string{"subject"})
+ metricWebsocketDuration = metrics.LazyLoadHistogramVec("api_websocket_duration", []string{"name", "code"}, websocketDurations)
+ metricActiveWebsocketGauge = metrics.LazyLoadGaugeVec("api_active_websocket_gauge", []string{"name"})
+ metricWebsocketCounter = metrics.LazyLoadCounterVec("api_websocket_counter", []string{"name"})
)
// metricsResponseWriter is a wrapper around http.ResponseWriter that captures the status code.
@@ -62,7 +68,7 @@ func metricsMiddleware(next http.Handler) http.Handler {
var (
enabled = false
name = ""
- subscription = ""
+ subscription = false
)
// all named route will be recorded
@@ -70,24 +76,24 @@ func metricsMiddleware(next http.Handler) http.Handler {
enabled = true
name = rt.GetName()
if strings.HasPrefix(name, "subscriptions") {
- // example path: /subscriptions/txpool -> subject = txpool
- paths := strings.Split(r.URL.Path, "/")
- if len(paths) > 2 {
- subscription = paths[2]
- }
+ subscription = true
+ name = "WS " + r.URL.Path
}
}
now := time.Now()
mrw := newMetricsResponseWriter(w)
- if subscription != "" {
- metricActiveWebsocketCount().AddWithLabel(1, map[string]string{"subject": subscription})
+ if subscription {
+ metricActiveWebsocketGauge().AddWithLabel(1, map[string]string{"name": name})
+ metricWebsocketCounter().AddWithLabel(1, map[string]string{"name": name})
}
next.ServeHTTP(mrw, r)
- if subscription != "" {
- metricActiveWebsocketCount().AddWithLabel(-1, map[string]string{"subject": subscription})
+ if subscription {
+ metricActiveWebsocketGauge().AddWithLabel(-1, map[string]string{"name": name})
+ // record websocket duration in seconds, not MS
+ metricWebsocketDuration().ObserveWithLabels(time.Since(now).Milliseconds()/1000, map[string]string{"name": name, "code": strconv.Itoa(mrw.statusCode)})
} else if enabled {
metricHTTPReqCounter().AddWithLabel(1, map[string]string{"name": name, "code": strconv.Itoa(mrw.statusCode), "method": r.Method})
metricHTTPReqDuration().ObserveWithLabels(time.Since(now).Milliseconds(), map[string]string{"name": name, "code": strconv.Itoa(mrw.statusCode), "method": r.Method})
diff --git a/api/metrics_test.go b/api/metrics_test.go
index 7cb1794e4..b7b1b7e0d 100644
--- a/api/metrics_test.go
+++ b/api/metrics_test.go
@@ -38,7 +38,7 @@ func TestMetricsMiddleware(t *testing.T) {
require.NoError(t, err)
// inject some invalid data to db
- data := thorChain.Database().NewStore("chain.data")
+ data := thorChain.Database().NewStore("chain.hdr")
var blkID thor.Bytes32
rand.Read(blkID[:])
data.Put(blkID[:], []byte("invalid data"))
@@ -77,7 +77,7 @@ func TestMetricsMiddleware(t *testing.T) {
assert.Equal(t, "method", labels[1].GetName())
assert.Equal(t, "GET", labels[1].GetValue())
assert.Equal(t, "name", labels[2].GetName())
- assert.Equal(t, "accounts_get_account", labels[2].GetValue())
+ assert.Equal(t, "GET /accounts/{address}", labels[2].GetValue())
labels = m[1].GetLabel()
assert.Equal(t, 3, len(labels))
@@ -86,7 +86,7 @@ func TestMetricsMiddleware(t *testing.T) {
assert.Equal(t, "method", labels[1].GetName())
assert.Equal(t, "GET", labels[1].GetValue())
assert.Equal(t, "name", labels[2].GetName())
- assert.Equal(t, "accounts_get_account", labels[2].GetValue())
+ assert.Equal(t, "GET /accounts/{address}", labels[2].GetValue())
labels = m[2].GetLabel()
assert.Equal(t, 3, len(labels))
@@ -95,7 +95,7 @@ func TestMetricsMiddleware(t *testing.T) {
assert.Equal(t, "method", labels[1].GetName())
assert.Equal(t, "GET", labels[1].GetValue())
assert.Equal(t, "name", labels[2].GetName())
- assert.Equal(t, "accounts_get_account", labels[2].GetValue())
+ assert.Equal(t, "GET /accounts/{address}", labels[2].GetValue())
}
func TestWebsocketMetrics(t *testing.T) {
@@ -120,13 +120,13 @@ func TestWebsocketMetrics(t *testing.T) {
metrics, err := parser.TextToMetricFamilies(bytes.NewReader(body))
assert.Nil(t, err)
- m := metrics["thor_metrics_api_active_websocket_count"].GetMetric()
+ m := metrics["thor_metrics_api_active_websocket_gauge"].GetMetric()
assert.Equal(t, 1, len(m), "should be 1 metric entries")
assert.Equal(t, float64(1), m[0].GetGauge().GetValue())
labels := m[0].GetLabel()
- assert.Equal(t, "subject", labels[0].GetName())
- assert.Equal(t, "beat", labels[0].GetValue())
+ assert.Equal(t, "name", labels[0].GetName())
+ assert.Equal(t, "WS /subscriptions/beat", labels[0].GetValue())
// initiate 1 beat subscription, active websocket should be 2
conn2, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
@@ -137,7 +137,7 @@ func TestWebsocketMetrics(t *testing.T) {
metrics, err = parser.TextToMetricFamilies(bytes.NewReader(body))
assert.Nil(t, err)
- m = metrics["thor_metrics_api_active_websocket_count"].GetMetric()
+ m = metrics["thor_metrics_api_active_websocket_gauge"].GetMetric()
assert.Equal(t, 1, len(m), "should be 1 metric entries")
assert.Equal(t, float64(2), m[0].GetGauge().GetValue())
@@ -151,16 +151,16 @@ func TestWebsocketMetrics(t *testing.T) {
metrics, err = parser.TextToMetricFamilies(bytes.NewReader(body))
assert.Nil(t, err)
- m = metrics["thor_metrics_api_active_websocket_count"].GetMetric()
+ m = metrics["thor_metrics_api_active_websocket_gauge"].GetMetric()
assert.Equal(t, 2, len(m), "should be 2 metric entries")
// both m[0] and m[1] should have the value of 1
assert.Equal(t, float64(2), m[0].GetGauge().GetValue())
assert.Equal(t, float64(1), m[1].GetGauge().GetValue())
- // m[1] should have the subject of block
+ // m[1] should have the name of block
labels = m[1].GetLabel()
- assert.Equal(t, "subject", labels[0].GetName())
- assert.Equal(t, "block", labels[0].GetValue())
+ assert.Equal(t, "name", labels[0].GetName())
+ assert.Equal(t, "WS /subscriptions/block", labels[0].GetValue())
}
func httpGet(t *testing.T, url string) ([]byte, int) {
diff --git a/api/node/node.go b/api/node/node.go
index 11c1ce7e1..21c69dcdf 100644
--- a/api/node/node.go
+++ b/api/node/node.go
@@ -35,6 +35,6 @@ func (n *Node) Mount(root *mux.Router, pathPrefix string) {
sub.Path("/network/peers").
Methods(http.MethodGet).
- Name("node_get_peers").
+ Name("GET /node/network/peers").
HandlerFunc(utils.WrapHandlerFunc(n.handleNetwork))
}
diff --git a/api/subscriptions/pending_tx_test.go b/api/subscriptions/pending_tx_test.go
index 00e6a0140..d22c87d01 100644
--- a/api/subscriptions/pending_tx_test.go
+++ b/api/subscriptions/pending_tx_test.go
@@ -133,10 +133,7 @@ func addNewBlock(repo *chain.Repository, stater *state.Stater, b0 *block.Block,
if _, err := stage.Commit(); err != nil {
t.Fatal(err)
}
- if err := repo.AddBlock(blk, receipts, 0); err != nil {
- t.Fatal(err)
- }
- if err := repo.SetBestBlockID(blk.Header().ID()); err != nil {
+ if err := repo.AddBlock(blk, receipts, 0, true); err != nil {
t.Fatal(err)
}
}
diff --git a/api/transactions/transactions.go b/api/transactions/transactions.go
index af32cb6da..e4846bbde 100644
--- a/api/transactions/transactions.go
+++ b/api/transactions/transactions.go
@@ -51,7 +51,7 @@ func (t *Transactions) getRawTransaction(txID thor.Bytes32, head thor.Bytes32, a
return nil, err
}
- summary, err := t.repo.GetBlockSummary(meta.BlockID)
+ header, err := chain.GetBlockHeader(meta.BlockNum)
if err != nil {
return nil, err
}
@@ -62,9 +62,9 @@ func (t *Transactions) getRawTransaction(txID thor.Bytes32, head thor.Bytes32, a
return &RawTransaction{
RawTx: RawTx{hexutil.Encode(raw)},
Meta: &TxMeta{
- BlockID: summary.Header.ID(),
- BlockNumber: summary.Header.Number(),
- BlockTimestamp: summary.Header.Timestamp(),
+ BlockID: header.ID(),
+ BlockNumber: header.Number(),
+ BlockTimestamp: header.Timestamp(),
},
}, nil
}
@@ -84,11 +84,11 @@ func (t *Transactions) getTransactionByID(txID thor.Bytes32, head thor.Bytes32,
return nil, err
}
- summary, err := t.repo.GetBlockSummary(meta.BlockID)
+ header, err := chain.GetBlockHeader(meta.BlockNum)
if err != nil {
return nil, err
}
- return convertTransaction(tx, summary.Header), nil
+ return convertTransaction(tx, header), nil
}
// GetTransactionReceiptByID get tx's receipt
@@ -107,12 +107,12 @@ func (t *Transactions) getTransactionReceiptByID(txID thor.Bytes32, head thor.By
return nil, err
}
- summary, err := t.repo.GetBlockSummary(meta.BlockID)
+ header, err := chain.GetBlockHeader(meta.BlockNum)
if err != nil {
return nil, err
}
- return convertReceipt(receipt, summary.Header, tx)
+ return convertReceipt(receipt, header, tx)
}
func (t *Transactions) handleSendTransaction(w http.ResponseWriter, req *http.Request) error {
var rawTx *RawTx
@@ -218,14 +218,14 @@ func (t *Transactions) Mount(root *mux.Router, pathPrefix string) {
sub.Path("").
Methods(http.MethodPost).
- Name("transactions_send_tx").
+ Name("POST /transactions").
HandlerFunc(utils.WrapHandlerFunc(t.handleSendTransaction))
sub.Path("/{id}").
Methods(http.MethodGet).
- Name("transactions_get_tx").
+ Name("GET /transactions/{id}").
HandlerFunc(utils.WrapHandlerFunc(t.handleGetTransactionByID))
sub.Path("/{id}/receipt").
Methods(http.MethodGet).
- Name("transactions_get_receipt").
+ Name("GET /transactions/{id}/receipt").
HandlerFunc(utils.WrapHandlerFunc(t.handleGetTransactionReceiptByID))
}
diff --git a/api/transactions/transactions_benchmark_test.go b/api/transactions/transactions_benchmark_test.go
new file mode 100644
index 000000000..be8080455
--- /dev/null
+++ b/api/transactions/transactions_benchmark_test.go
@@ -0,0 +1,530 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package transactions
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "fmt"
+ "math"
+ "math/big"
+ "path/filepath"
+ "runtime/debug"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/elastic/gosigar"
+ "github.com/ethereum/go-ethereum/common/fdlimit"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+ "github.com/vechain/thor/v2/block"
+ "github.com/vechain/thor/v2/chain"
+ "github.com/vechain/thor/v2/cmd/thor/solo"
+ "github.com/vechain/thor/v2/genesis"
+ "github.com/vechain/thor/v2/logdb"
+ "github.com/vechain/thor/v2/muxdb"
+ "github.com/vechain/thor/v2/packer"
+ "github.com/vechain/thor/v2/state"
+ "github.com/vechain/thor/v2/test/datagen"
+ "github.com/vechain/thor/v2/test/testchain"
+ "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/tx"
+ "github.com/vechain/thor/v2/txpool"
+)
+
+var (
+ cachedAccounts []genesis.DevAccount
+ once sync.Once
+ blockCount = 1_000
+)
+
+func getCachedAccounts(b *testing.B) []genesis.DevAccount {
+ once.Do(func() {
+ now := time.Now()
+ cachedAccounts = createAccounts(b, 10_000)
+ b.Logf("Created accounts in: %f secs", time.Since(now).Seconds())
+ })
+ return cachedAccounts
+}
+
+func BenchmarkFetchTx_RealDB_RandomSigners_ManyClausesPerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createManyClausesPerTx)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // create blocks
+ newChain, transactions := createPackedChain(b, db, blockCount, accounts, randomSignerFunc)
+
+ // shuffle the transaction into a randomized order
+ randomizedTransactions := shuffleSlice(transactions)
+ b.Logf("About to process %d txs", len(randomizedTransactions))
+
+ // run the benchmarks
+ b.Run("getTransaction", func(b *testing.B) {
+ benchmarkGetTransaction(b, newChain, randomizedTransactions)
+ })
+
+ b.Run("getReceipt", func(b *testing.B) {
+ benchmarkGetReceipt(b, newChain, randomizedTransactions)
+ })
+}
+
+func BenchmarkFetchTx_RealDB_RandomSigners_OneClausePerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createOneClausePerTx)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // create blocks
+ newChain, transactions := createPackedChain(b, db, blockCount, accounts, randomSignerFunc)
+
+ // shuffle the transaction into a randomized order
+ randomizedTransactions := shuffleSlice(transactions)
+ b.Logf("About to process %d txs", len(randomizedTransactions))
+
+ // run the benchmarks
+ b.Run("getTransaction", func(b *testing.B) {
+ benchmarkGetTransaction(b, newChain, randomizedTransactions)
+ })
+
+ b.Run("getReceipt", func(b *testing.B) {
+ benchmarkGetReceipt(b, newChain, randomizedTransactions)
+ })
+}
+
+func BenchmarkFetchTx_RandomSigners_ManyClausesPerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createManyClausesPerTx)
+
+ // create blocks
+ newChain, transactions := createPackedChain(b, muxdb.NewMem(), blockCount, accounts, randomSignerFunc)
+
+ // shuffle the transaction into a randomized order
+ randomizedTransactions := shuffleSlice(transactions)
+ b.Logf("About to process %d txs", len(randomizedTransactions))
+
+ // run the benchmarks
+ b.Run("getTransaction", func(b *testing.B) {
+ benchmarkGetTransaction(b, newChain, randomizedTransactions)
+ })
+
+ b.Run("getReceipt", func(b *testing.B) {
+ benchmarkGetReceipt(b, newChain, randomizedTransactions)
+ })
+}
+
+func BenchmarkFetchTx_RandomSigners_OneClausePerTx(b *testing.B) {
+ // Setup phase: Not part of the benchmark timing
+ b.StopTimer()
+
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createOneClausePerTx)
+
+ // create blocks
+ newChain, transactions := createPackedChain(b, muxdb.NewMem(), blockCount, accounts, randomSignerFunc)
+
+ // shuffle the transaction into a randomized order
+ randomizedTransactions := shuffleSlice(transactions)
+ b.Logf("About to process %d txs", len(randomizedTransactions))
+
+ // run the benchmarks
+ b.Run("getTransaction", func(b *testing.B) {
+ benchmarkGetTransaction(b, newChain, randomizedTransactions)
+ })
+
+ b.Run("getReceipt", func(b *testing.B) {
+ benchmarkGetReceipt(b, newChain, randomizedTransactions)
+ })
+}
+
+func benchmarkGetTransaction(b *testing.B, thorChain *testchain.Chain, randTxs tx.Transactions) {
+ mempool := txpool.New(thorChain.Repo(), thorChain.Stater(), txpool.Options{Limit: 10, LimitPerAccount: 16, MaxLifetime: 10 * time.Minute})
+ transactionAPI := New(thorChain.Repo(), mempool)
+ head := thorChain.Repo().BestBlockSummary().Header.ID()
+ var err error
+
+ // Measure memory usage
+ b.ReportAllocs()
+
+ // Benchmark execution
+ b.ResetTimer()
+
+ for _, randTx := range randTxs {
+ _, err = transactionAPI.getRawTransaction(randTx.ID(), head, false)
+ if err != nil {
+ b.Fatalf("getRawTransaction failed: %v", err)
+ }
+ }
+}
+
+func benchmarkGetReceipt(b *testing.B, thorChain *testchain.Chain, randTxs tx.Transactions) {
+ mempool := txpool.New(thorChain.Repo(), thorChain.Stater(), txpool.Options{Limit: 10, LimitPerAccount: 16, MaxLifetime: 10 * time.Minute})
+ transactionAPI := New(thorChain.Repo(), mempool)
+ head := thorChain.Repo().BestBlockSummary().Header.ID()
+ var err error
+
+ // Measure memory usage
+ b.ReportAllocs()
+
+ // Benchmark execution
+ b.ResetTimer()
+
+ for _, randTx := range randTxs {
+ _, err = transactionAPI.getTransactionReceiptByID(randTx.ID(), head)
+ if err != nil {
+ b.Fatalf("getTransactionReceiptByID failed: %v", err)
+ }
+ }
+}
+
+func createPackedChain(b *testing.B, db *muxdb.MuxDB, noBlocks int, accounts []genesis.DevAccount, createTxFunc func(chain *testchain.Chain) (tx.Transactions, error)) (*testchain.Chain, tx.Transactions) {
+ proposer := &accounts[0]
+
+ // mock a fake chain for block production
+ fakeChain, err := createChain(db, accounts)
+ require.NoError(b, err)
+
+ // pre-alloc blocks
+ var transactions tx.Transactions
+
+ // Start from the Genesis block
+ previousBlock := fakeChain.GenesisBlock()
+ for i := 0; i < noBlocks; i++ {
+ newTxs, err := createTxFunc(fakeChain)
+ require.NoError(b, err)
+ previousBlock, err = packTxsIntoBlock(
+ fakeChain,
+ proposer,
+ previousBlock,
+ newTxs,
+ )
+ require.NoError(b, err)
+ transactions = append(transactions, newTxs...)
+ }
+
+ return fakeChain, transactions
+}
+
+func createOneClausePerTx(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error) {
+ var transactions tx.Transactions
+ gasUsed := uint64(0)
+ for gasUsed < 9_500_000 {
+ toAddr := datagen.RandAddress()
+ cla := tx.NewClause(&toAddr).WithValue(big.NewInt(10000))
+ transaction := new(tx.Builder).
+ ChainTag(thorChain.Repo().ChainTag()).
+ GasPriceCoef(1).
+ Expiration(math.MaxUint32 - 1).
+ Gas(21_000).
+ Nonce(uint64(datagen.RandInt())).
+ Clause(cla).
+ BlockRef(tx.NewBlockRef(0)).
+ Build()
+
+ sig, err := crypto.Sign(transaction.SigningHash().Bytes(), signerPK)
+ if err != nil {
+ return nil, err
+ }
+ transaction = transaction.WithSignature(sig)
+
+ gasUsed += 21_000 // Gas per transaction
+ transactions = append(transactions, transaction)
+ }
+ return transactions, nil
+}
+
+func createManyClausesPerTx(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error) {
+ var transactions tx.Transactions
+ gasUsed := uint64(0)
+ txGas := uint64(42_000)
+
+ transactionBuilder := new(tx.Builder).
+ ChainTag(thorChain.Repo().ChainTag()).
+ GasPriceCoef(1).
+ Expiration(math.MaxUint32 - 1).
+ Nonce(uint64(datagen.RandInt())).
+ BlockRef(tx.NewBlockRef(0))
+
+ for ; gasUsed < 9_500_000; gasUsed += txGas {
+ toAddr := datagen.RandAddress()
+ transactionBuilder.Clause(tx.NewClause(&toAddr).WithValue(big.NewInt(10000)))
+ }
+
+ transaction := transactionBuilder.Gas(gasUsed).Build()
+
+ sig, err := crypto.Sign(transaction.SigningHash().Bytes(), signerPK)
+ if err != nil {
+ return nil, err
+ }
+ transaction = transaction.WithSignature(sig)
+
+ transactions = append(transactions, transaction)
+
+ return transactions, nil
+}
+
+func packTxsIntoBlock(thorChain *testchain.Chain, proposerAccount *genesis.DevAccount, parentBlk *block.Block, transactions tx.Transactions) (*block.Block, error) {
+ p := packer.New(thorChain.Repo(), thorChain.Stater(), proposerAccount.Address, &proposerAccount.Address, thorChain.GetForkConfig())
+
+ parentSum, err := thorChain.Repo().GetBlockSummary(parentBlk.Header().ID())
+ if err != nil {
+ return nil, err
+ }
+
+ flow, err := p.Schedule(parentSum, parentBlk.Header().Timestamp()+1)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, transaction := range transactions {
+ err = flow.Adopt(transaction)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ b1, stage, receipts, err := flow.Pack(proposerAccount.PrivateKey, 0, false)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := stage.Commit(); err != nil {
+ return nil, err
+ }
+
+ if err := thorChain.Repo().AddBlock(b1, receipts, 0, true); err != nil {
+ return nil, err
+ }
+
+ return b1, nil
+}
+
+func createChain(db *muxdb.MuxDB, accounts []genesis.DevAccount) (*testchain.Chain, error) {
+ forkConfig := thor.NoFork
+ forkConfig.VIP191 = 1
+ forkConfig.BLOCKLIST = 0
+ forkConfig.VIP214 = 2
+
+ // Create the state manager (Stater) with the initialized database.
+ stater := state.NewStater(db)
+
+ authAccs := make([]genesis.Authority, 0, len(accounts))
+ stateAccs := make([]genesis.Account, 0, len(accounts))
+
+ for _, acc := range accounts {
+ authAccs = append(authAccs, genesis.Authority{
+ MasterAddress: acc.Address,
+ EndorsorAddress: acc.Address,
+ Identity: thor.BytesToBytes32([]byte("master")),
+ })
+ bal, _ := new(big.Int).SetString("1000000000000000000000000000", 10)
+ stateAccs = append(stateAccs, genesis.Account{
+ Address: acc.Address,
+ Balance: (*genesis.HexOrDecimal256)(bal),
+ Energy: (*genesis.HexOrDecimal256)(bal),
+ Code: "",
+ Storage: nil,
+ })
+ }
+ mbp := uint64(1_000)
+ genConfig := genesis.CustomGenesis{
+ LaunchTime: 1526400000,
+ GasLimit: thor.InitialGasLimit,
+ ExtraData: "",
+ ForkConfig: &forkConfig,
+ Authority: authAccs,
+ Accounts: stateAccs,
+ Params: genesis.Params{
+ MaxBlockProposers: &mbp,
+ },
+ }
+
+ builder, err := genesis.NewCustomNet(&genConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the genesis and retrieve the genesis block
+ //gene := genesis.NewDevnet()
+ geneBlk, _, _, err := builder.Build(stater)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the repository which manages chain data, using the database and genesis block.
+ repo, err := chain.NewRepository(db, geneBlk)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create an inMemory logdb
+ logDb, err := logdb.NewMem()
+ if err != nil {
+ return nil, err
+ }
+
+ return testchain.New(
+ db,
+ builder,
+ solo.NewBFTEngine(repo),
+ repo,
+ stater,
+ geneBlk,
+ logDb,
+ thor.NoFork,
+ ), nil
+}
+
+func randomPickSignerFunc(
+ accounts []genesis.DevAccount,
+ createTxFun func(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error),
+) func(chain *testchain.Chain) (tx.Transactions, error) {
+ return func(chain *testchain.Chain) (tx.Transactions, error) {
+ // Ensure there are accounts available
+ if len(accounts) == 0 {
+ return nil, fmt.Errorf("no accounts available to pick a random sender")
+ }
+
+ // Securely pick a random index
+ maxLen := big.NewInt(int64(len(accounts)))
+ randomIndex, err := rand.Int(rand.Reader, maxLen)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate random index: %v", err)
+ }
+
+ // Use the selected account to create transactions
+ sender := accounts[randomIndex.Int64()]
+ return createTxFun(sender.PrivateKey, chain)
+ }
+}
+
+func createAccounts(b *testing.B, accountNo int) []genesis.DevAccount {
+ var accs []genesis.DevAccount
+
+ for i := 0; i < accountNo; i++ {
+ pk, err := crypto.GenerateKey()
+ require.NoError(b, err)
+ addr := crypto.PubkeyToAddress(pk.PublicKey)
+ accs = append(accs, genesis.DevAccount{Address: thor.Address(addr), PrivateKey: pk})
+ }
+
+ return accs
+}
+
+func openTempMainDB(dir string) (*muxdb.MuxDB, error) {
+ cacheMB := normalizeCacheSize(4096)
+
+ fdCache := suggestFDCache()
+
+ opts := muxdb.Options{
+ TrieNodeCacheSizeMB: cacheMB,
+ TrieCachedNodeTTL: 30, // 5min
+ TrieDedupedPartitionFactor: math.MaxUint32,
+ TrieWillCleanHistory: true,
+ OpenFilesCacheCapacity: fdCache,
+ ReadCacheMB: 256, // rely on os page cache other than huge db read cache.
+ WriteBufferMB: 128,
+ }
+
+ // go-ethereum stuff
+ // Ensure Go's GC ignores the database cache for trigger percentage
+ totalCacheMB := cacheMB + opts.ReadCacheMB + opts.WriteBufferMB*2
+ gogc := math.Max(10, math.Min(100, 50/(float64(totalCacheMB)/1024)))
+
+ debug.SetGCPercent(int(gogc))
+
+ if opts.TrieWillCleanHistory {
+ opts.TrieHistPartitionFactor = 256
+ } else {
+ opts.TrieHistPartitionFactor = 524288
+ }
+
+ db, err := muxdb.Open(filepath.Join(dir, "maindb"), &opts)
+ if err != nil {
+ return nil, errors.Wrapf(err, "open main database [%v]", dir)
+ }
+ return db, nil
+}
+
+func normalizeCacheSize(sizeMB int) int {
+ if sizeMB < 128 {
+ sizeMB = 128
+ }
+
+ var mem gosigar.Mem
+ if err := mem.Get(); err != nil {
+ fmt.Println("failed to get total mem:", "err", err)
+ } else {
+ total := int(mem.Total / 1024 / 1024)
+ half := total / 2
+
+ // limit to not less than total/2 and up to total-2GB
+ limitMB := total - 2048
+ if limitMB < half {
+ limitMB = half
+ }
+
+ if sizeMB > limitMB {
+ sizeMB = limitMB
+ fmt.Println("cache size(MB) limited", "limit", limitMB)
+ }
+ }
+ return sizeMB
+}
+
+func suggestFDCache() int {
+ limit, err := fdlimit.Current()
+ if err != nil {
+ fmt.Println("unable to get fdlimit", "error", err)
+ return 500
+ }
+ if limit <= 1024 {
+ fmt.Println("low fd limit, increase it if possible", "limit", limit)
+ }
+
+ n := limit / 2
+ if n > 5120 {
+ return 5120
+ }
+ return n
+}
+
+func shuffleSlice(slice tx.Transactions) tx.Transactions {
+ shuffled := make(tx.Transactions, len(slice))
+ copy(shuffled, slice)
+
+ for i := len(shuffled) - 1; i > 0; i-- {
+ n, err := rand.Int(rand.Reader, big.NewInt(int64(i+1)))
+ if err != nil {
+ panic(err) // Handle errors appropriately in real code
+ }
+
+ // Swap the current element with the random index
+ j := int(n.Int64())
+ shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
+ }
+
+ return shuffled
+}
diff --git a/api/transactions/types.go b/api/transactions/types.go
index 7c3a892ac..93cae1660 100644
--- a/api/transactions/types.go
+++ b/api/transactions/types.go
@@ -89,8 +89,9 @@ func convertTransaction(tx *tx.Transaction, header *block.Header) *Transaction {
origin, _ := tx.Origin()
delegator, _ := tx.Delegator()
- cls := make(Clauses, len(tx.Clauses()))
- for i, c := range tx.Clauses() {
+ txClauses := tx.Clauses()
+ cls := make(Clauses, len(txClauses))
+ for i, c := range txClauses {
cls[i] = convertClause(c)
}
br := tx.BlockRef()
@@ -187,9 +188,10 @@ func convertReceipt(txReceipt *tx.Receipt, header *block.Header, tx *tx.Transact
origin,
},
}
+ txClauses := tx.Clauses()
receipt.Outputs = make([]*Output, len(txReceipt.Outputs))
for i, output := range txReceipt.Outputs {
- clause := tx.Clauses()[i]
+ clause := txClauses[i]
var contractAddr *thor.Address
if clause.To() == nil {
cAddr := thor.CreateContractAddress(tx.ID(), uint32(i), 0)
diff --git a/api/transfers/transfers.go b/api/transfers/transfers.go
index 2a6cbfb9e..a036f30bd 100644
--- a/api/transfers/transfers.go
+++ b/api/transfers/transfers.go
@@ -94,6 +94,6 @@ func (t *Transfers) Mount(root *mux.Router, pathPrefix string) {
sub.Path("").
Methods(http.MethodPost).
- Name("logs_filter_transfer").
+ Name("POST /logs/transfer").
HandlerFunc(utils.WrapHandlerFunc(t.handleFilterTransferLogs))
}
diff --git a/api/utils/revisions.go b/api/utils/revisions.go
index de64473aa..11df0364f 100644
--- a/api/utils/revisions.go
+++ b/api/utils/revisions.go
@@ -136,7 +136,7 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer
mocked := builder.Build()
// state is also reused from the parent block
- st := stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum)
+ st := stater.NewState(best.Root())
// rebuild the block summary with the next header (mocked) AND the best block status
return &chain.BlockSummary{
@@ -144,7 +144,6 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer
Txs: best.Txs,
Size: uint64(mocked.Size()),
Conflicts: best.Conflicts,
- SteadyNum: best.SteadyNum,
}, st, nil
}
sum, err := GetSummary(rev, repo, bft)
@@ -152,6 +151,6 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer
return nil, nil, err
}
- st := stater.NewState(sum.Header.StateRoot(), sum.Header.Number(), sum.Conflicts, sum.SteadyNum)
+ st := stater.NewState(sum.Root())
return sum, st, nil
}
diff --git a/bft/engine.go b/bft/engine.go
index d4e893702..3e0c88059 100644
--- a/bft/engine.go
+++ b/bft/engine.go
@@ -392,7 +392,7 @@ func (engine *Engine) findCheckpointByQuality(target uint32, finalized, headID t
}
func (engine *Engine) getMaxBlockProposers(sum *chain.BlockSummary) (uint64, error) {
- state := engine.stater.NewState(sum.Header.StateRoot(), sum.Header.Number(), sum.Conflicts, sum.SteadyNum)
+ state := engine.stater.NewState(sum.Root())
params, err := builtin.Params.Native(state).Get(thor.KeyMaxBlockProposers)
if err != nil {
return 0, err
diff --git a/bft/engine_test.go b/bft/engine_test.go
index 54e2e8bec..d36494587 100644
--- a/bft/engine_test.go
+++ b/bft/engine_test.go
@@ -113,7 +113,7 @@ func (test *TestBFT) reCreateEngine() error {
return nil
}
-func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis.DevAccount, shouldVote bool) (*chain.BlockSummary, error) {
+func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis.DevAccount, shouldVote bool, asBest bool) (*chain.BlockSummary, error) {
packer := packer.New(test.repo, test.stater, master.Address, &thor.Address{}, test.fc)
flow, err := packer.Mock(parentSummary, parentSummary.Header.Timestamp()+thor.BlockInterval, parentSummary.Header.GasLimit())
if err != nil {
@@ -134,7 +134,7 @@ func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis.
return nil, err
}
- if err = test.repo.AddBlock(b, nil, conflicts); err != nil {
+ if err = test.repo.AddBlock(b, nil, conflicts, asBest); err != nil {
return nil, err
}
@@ -155,13 +155,13 @@ func (test *TestBFT) fastForward(cnt int) error {
acc := devAccounts[(int(parent.Header.Number())+1)%devCnt]
var err error
- parent, err = test.newBlock(parent, acc, true)
+ parent, err = test.newBlock(parent, acc, true, true)
if err != nil {
return err
}
}
- return test.repo.SetBestBlockID(parent.Header.ID())
+ return nil
}
func (test *TestBFT) fastForwardWithMinority(cnt int) error {
@@ -172,13 +172,13 @@ func (test *TestBFT) fastForwardWithMinority(cnt int) error {
acc := devAccounts[(int(parent.Header.Number())+1)%(devCnt/3)]
var err error
- parent, err = test.newBlock(parent, acc, true)
+ parent, err = test.newBlock(parent, acc, true, true)
if err != nil {
return err
}
}
- return test.repo.SetBestBlockID(parent.Header.ID())
+ return nil
}
func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) {
@@ -189,7 +189,7 @@ func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) {
acc := devAccounts[(int(parent.Header.Number())+1+4)%devCnt]
var err error
- parent, err = test.newBlock(parent, acc, true)
+ parent, err = test.newBlock(parent, acc, true, false)
if err != nil {
return nil, err
}
@@ -197,14 +197,14 @@ func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) {
return test.repo.NewChain(parent.Header.ID()), nil
}
-func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, best bool) (*chain.BlockSummary, error) {
+func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, asBest bool) (*chain.BlockSummary, error) {
acc := devAccounts[len(devAccounts)-1]
parent, err := test.repo.GetBlockSummary(parentID)
if err != nil {
return nil, err
}
- blk, err := test.newBlock(parent, acc, shouldVote)
+ blk, err := test.newBlock(parent, acc, shouldVote, asBest)
if err != nil {
return nil, err
}
@@ -215,12 +215,6 @@ func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, best bool) (*c
}
}
- if best {
- if err := test.repo.SetBestBlockID(blk.Header.ID()); err != nil {
- return nil, err
- }
- }
-
return test.repo.GetBlockSummary(blk.Header.ID())
}
@@ -255,7 +249,7 @@ func TestNewBlock(t *testing.T) {
PrivateKey: priv,
}
- summary, err := testBFT.newBlock(testBFT.repo.BestBlockSummary(), master, true)
+ summary, err := testBFT.newBlock(testBFT.repo.BestBlockSummary(), master, true, false)
if err != nil {
t.Fatal(err)
}
diff --git a/builtin/authority/authority_test.go b/builtin/authority/authority_test.go
index 7f07ae21d..b60b6fc88 100644
--- a/builtin/authority/authority_test.go
+++ b/builtin/authority/authority_test.go
@@ -13,6 +13,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func M(a ...interface{}) []interface{} {
@@ -20,8 +21,7 @@ func M(a ...interface{}) []interface{} {
}
func TestAuthority(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
p1 := thor.BytesToAddress([]byte("p1"))
p2 := thor.BytesToAddress([]byte("p2"))
diff --git a/builtin/energy/energy_test.go b/builtin/energy/energy_test.go
index e9a2c2373..065b563e0 100644
--- a/builtin/energy/energy_test.go
+++ b/builtin/energy/energy_test.go
@@ -13,6 +13,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func M(a ...interface{}) []interface{} {
@@ -20,8 +21,7 @@ func M(a ...interface{}) []interface{} {
}
func TestEnergy(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
acc := thor.BytesToAddress([]byte("a1"))
@@ -45,8 +45,7 @@ func TestEnergy(t *testing.T) {
}
func TestInitialSupply(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
eng := New(thor.BytesToAddress([]byte("eng")), st, 0)
@@ -63,8 +62,7 @@ func TestInitialSupply(t *testing.T) {
}
func TestInitialSupplyError(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
eng := New(thor.BytesToAddress([]byte("a1")), st, 0)
@@ -77,8 +75,7 @@ func TestInitialSupplyError(t *testing.T) {
}
func TestTotalSupply(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
eng := New(thor.BytesToAddress([]byte("eng")), st, 0)
@@ -91,8 +88,7 @@ func TestTotalSupply(t *testing.T) {
}
func TestTokenTotalSupply(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
eng := New(thor.BytesToAddress([]byte("eng")), st, 0)
@@ -105,8 +101,7 @@ func TestTokenTotalSupply(t *testing.T) {
}
func TestTotalBurned(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
eng := New(thor.BytesToAddress([]byte("eng")), st, 0)
@@ -119,8 +114,7 @@ func TestTotalBurned(t *testing.T) {
}
func TestEnergyGrowth(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
acc := thor.BytesToAddress([]byte("a1"))
diff --git a/builtin/executor_test.go b/builtin/executor_test.go
index 2053f15b4..41df7f7aa 100644
--- a/builtin/executor_test.go
+++ b/builtin/executor_test.go
@@ -19,6 +19,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/xenv"
)
@@ -73,7 +74,7 @@ func initExectorTest() *ctest {
})
repo, _ := chain.NewRepository(db, b0)
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
chain := repo.NewChain(b0.Header().ID())
rt := runtime.New(chain, st, &xenv.BlockContext{Time: uint64(time.Now().Unix())}, thor.NoFork)
diff --git a/builtin/gen/bindata.go b/builtin/gen/bindata.go
index 8f1ffbc5d..c0724a53d 100644
--- a/builtin/gen/bindata.go
+++ b/builtin/gen/bindata.go
@@ -1,4 +1,4 @@
-// Package gen Code generated by go-bindata. (@generated) DO NOT EDIT.
+// Code generated by go-bindata. DO NOT EDIT.
// sources:
// compiled/Authority.abi
// compiled/Authority.bin-runtime
@@ -76,32 +76,21 @@ type bindataFileInfo struct {
modTime time.Time
}
-// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
-
-// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
-
-// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
-
-// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
-
-// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
- return fi.mode&os.ModeDir != 0
+ return false
}
-
-// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
@@ -794,11 +783,13 @@ var _bindata = map[string]func() (*asset, error){
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
-// data/
-// foo.txt
-// img/
-// a.png
-// b.png
+//
+// data/
+// foo.txt
+// img/
+// a.png
+// b.png
+//
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
diff --git a/builtin/gen/gen.go b/builtin/gen/gen.go
index d08c2f179..ce5fed8e1 100644
--- a/builtin/gen/gen.go
+++ b/builtin/gen/gen.go
@@ -6,5 +6,6 @@
package gen
//go:generate rm -rf ./compiled/
-//go:generate solc --optimize-runs 200 --overwrite --bin-runtime --abi -o ./compiled authority.sol energy.sol executor.sol extension.sol extension-v2.sol measure.sol params.sol prototype.sol
-//go:generate go-bindata -nometadata -ignore=_ -pkg gen -o bindata.go compiled/
+//go:generate docker run -v ./:/solidity ethereum/solc:0.4.24 --optimize-runs 200 --overwrite --bin-runtime --abi -o /solidity/compiled authority.sol energy.sol executor.sol extension.sol extension-v2.sol measure.sol params.sol prototype.sol
+//go:generate go run github.com/go-bindata/go-bindata/go-bindata@v1.0.0 -nometadata -ignore=_ -pkg gen -o bindata.go compiled/
+//go:generate go fmt
diff --git a/builtin/native_calls_test.go b/builtin/native_calls_test.go
index 48d53d3f4..ca6af57ff 100644
--- a/builtin/native_calls_test.go
+++ b/builtin/native_calls_test.go
@@ -26,6 +26,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/vm"
"github.com/vechain/thor/v2/xenv"
@@ -121,7 +122,7 @@ func (c *ccase) Assert(t *testing.T) *ccase {
assert.True(t, ok, "should have method")
constant := method.Const()
- stage, err := c.rt.State().Stage(0, 0)
+ stage, err := c.rt.State().Stage(trie.Version{})
assert.Nil(t, err, "should stage state")
stateRoot := stage.Hash()
@@ -140,7 +141,7 @@ func (c *ccase) Assert(t *testing.T) *ccase {
vmout, _, err := exec()
assert.Nil(t, err)
if constant || vmout.VMErr != nil {
- stage, err := c.rt.State().Stage(0, 0)
+ stage, err := c.rt.State().Stage(trie.Version{})
assert.Nil(t, err, "should stage state")
newStateRoot := stage.Hash()
assert.Equal(t, stateRoot, newStateRoot)
@@ -195,7 +196,7 @@ func TestParamsNative(t *testing.T) {
return nil
})
repo, _ := chain.NewRepository(db, b0)
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
chain := repo.NewChain(b0.Header().ID())
rt := runtime.New(chain, st, &xenv.BlockContext{}, thor.NoFork)
@@ -263,7 +264,7 @@ func TestAuthorityNative(t *testing.T) {
return nil
})
repo, _ := chain.NewRepository(db, b0)
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
chain := repo.NewChain(b0.Header().ID())
rt := runtime.New(chain, st, &xenv.BlockContext{}, thor.NoFork)
@@ -369,7 +370,7 @@ func TestEnergyNative(t *testing.T) {
})
repo, _ := chain.NewRepository(db, b0)
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
chain := repo.NewChain(b0.Header().ID())
st.SetEnergy(addr, eng, b0.Header().Timestamp())
@@ -495,7 +496,7 @@ func TestPrototypeNative(t *testing.T) {
gene := genesis.NewDevnet()
genesisBlock, _, _, _ := gene.Build(state.NewStater(db))
repo, _ := chain.NewRepository(db, genesisBlock)
- st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()})
chain := repo.NewChain(genesisBlock.Header().ID())
st.SetStorage(thor.Address(acc1), key, value)
@@ -768,14 +769,14 @@ func TestPrototypeNativeWithLongerBlockNumber(t *testing.T) {
db := muxdb.NewMem()
gene := genesis.NewDevnet()
genesisBlock, _, _, _ := gene.Build(state.NewStater(db))
- st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()})
repo, _ := chain.NewRepository(db, genesisBlock)
launchTime := genesisBlock.Header().Timestamp()
for i := 1; i < 100; i++ {
st.SetBalance(acc1, big.NewInt(int64(i)))
st.SetEnergy(acc1, big.NewInt(int64(i)), launchTime+uint64(i)*10)
- stage, _ := st.Stage(uint32(i), 0)
+ stage, _ := st.Stage(trie.Version{Major: uint32(i)})
stateRoot, _ := stage.Commit()
b := new(block.Builder).
ParentID(repo.BestBlockSummary().Header.ID()).
@@ -784,11 +785,10 @@ func TestPrototypeNativeWithLongerBlockNumber(t *testing.T) {
StateRoot(stateRoot).
Build().
WithSignature(sig[:])
- repo.AddBlock(b, tx.Receipts{}, 0)
- repo.SetBestBlockID(b.Header().ID())
+ repo.AddBlock(b, tx.Receipts{}, 0, true)
}
- st = state.New(db, repo.BestBlockSummary().Header.StateRoot(), repo.BestBlockSummary().Header.Number(), 0, 0)
+ st = state.New(db, repo.BestBlockSummary().Root())
chain := repo.NewBestChain()
rt := runtime.New(chain, st, &xenv.BlockContext{
@@ -838,14 +838,14 @@ func TestPrototypeNativeWithBlockNumber(t *testing.T) {
db := muxdb.NewMem()
gene := genesis.NewDevnet()
genesisBlock, _, _, _ := gene.Build(state.NewStater(db))
- st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()})
repo, _ := chain.NewRepository(db, genesisBlock)
launchTime := genesisBlock.Header().Timestamp()
for i := 1; i < 100; i++ {
st.SetBalance(acc1, big.NewInt(int64(i)))
st.SetEnergy(acc1, big.NewInt(int64(i)), launchTime+uint64(i)*10)
- stage, _ := st.Stage(uint32(i), 0)
+ stage, _ := st.Stage(trie.Version{Major: uint32(i)})
stateRoot, _ := stage.Commit()
b := new(block.Builder).
ParentID(repo.BestBlockSummary().Header.ID()).
@@ -854,11 +854,10 @@ func TestPrototypeNativeWithBlockNumber(t *testing.T) {
StateRoot(stateRoot).
Build().
WithSignature(sig[:])
- repo.AddBlock(b, tx.Receipts{}, 0)
- repo.SetBestBlockID(b.Header().ID())
+ repo.AddBlock(b, tx.Receipts{}, 0, true)
}
- st = state.New(db, repo.BestBlockSummary().Header.StateRoot(), repo.BestBlockSummary().Header.Number(), 0, repo.BestBlockSummary().SteadyNum)
+ st = state.New(db, repo.BestBlockSummary().Root())
chain := repo.NewBestChain()
rt := runtime.New(chain, st, &xenv.BlockContext{
@@ -898,7 +897,7 @@ func newBlock(parent *block.Block, score uint64, timestamp uint64, privateKey *e
func TestExtensionNative(t *testing.T) {
db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(db, trie.Root{})
gene := genesis.NewDevnet()
genesisBlock, _, _, _ := gene.Build(state.NewStater(db))
repo, _ := chain.NewRepository(db, genesisBlock)
@@ -920,9 +919,9 @@ func TestExtensionNative(t *testing.T) {
gasPayer := thor.BytesToAddress([]byte("gasPayer"))
- err := repo.AddBlock(b1, nil, 0)
+ err := repo.AddBlock(b1, nil, 0, false)
assert.Equal(t, err, nil)
- err = repo.AddBlock(b2, nil, 0)
+ err = repo.AddBlock(b2, nil, 0, false)
assert.Equal(t, err, nil)
assert.Equal(t, builtin.Extension.Address, builtin.Extension.Address)
diff --git a/builtin/params/params_test.go b/builtin/params/params_test.go
index 484442b14..277e99930 100644
--- a/builtin/params/params_test.go
+++ b/builtin/params/params_test.go
@@ -13,11 +13,11 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestParamsGetSet(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
setv := big.NewInt(10)
key := thor.BytesToBytes32([]byte("key"))
p := New(thor.BytesToAddress([]byte("par")), st)
diff --git a/builtin/prototype/prototype_test.go b/builtin/prototype/prototype_test.go
index 6cdf127af..d187cc16d 100644
--- a/builtin/prototype/prototype_test.go
+++ b/builtin/prototype/prototype_test.go
@@ -14,6 +14,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func M(a ...interface{}) []interface{} {
@@ -21,8 +22,7 @@ func M(a ...interface{}) []interface{} {
}
func TestPrototype(t *testing.T) {
- db := muxdb.NewMem()
- st := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ st := state.New(muxdb.NewMem(), trie.Root{})
proto := prototype.New(thor.BytesToAddress([]byte("proto")), st)
binding := proto.Bind(thor.BytesToAddress([]byte("binding")))
diff --git a/builtin/prototype_native.go b/builtin/prototype_native.go
index 97e72fce1..5a039bd2e 100644
--- a/builtin/prototype_native.go
+++ b/builtin/prototype_native.go
@@ -94,7 +94,7 @@ func init() {
}
env.UseGas(thor.SloadGas)
- state := env.State().Checkout(summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts, summary.SteadyNum)
+ state := env.State().Checkout(summary.Root())
env.UseGas(thor.GetBalanceGas)
val, err := state.GetBalance(thor.Address(args.Self))
@@ -136,7 +136,7 @@ func init() {
}
env.UseGas(thor.SloadGas)
- state := env.State().Checkout(summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts, summary.SteadyNum)
+ state := env.State().Checkout(summary.Root())
env.UseGas(thor.GetBalanceGas)
val, err := state.GetEnergy(thor.Address(args.Self), summary.Header.Timestamp())
diff --git a/chain/block_reader_test.go b/chain/block_reader_test.go
index 7d4c306e3..804c91ce6 100644
--- a/chain/block_reader_test.go
+++ b/chain/block_reader_test.go
@@ -3,14 +3,13 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package chain_test
+package chain
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/block"
- "github.com/vechain/thor/v2/chain"
)
func TestBlockReader(t *testing.T) {
@@ -18,22 +17,20 @@ func TestBlockReader(t *testing.T) {
b0 := repo.GenesisBlock()
b1 := newBlock(b0, 10)
- repo.AddBlock(b1, nil, 0)
+ repo.AddBlock(b1, nil, 0, false)
b2 := newBlock(b1, 20)
- repo.AddBlock(b2, nil, 0)
+ repo.AddBlock(b2, nil, 0, false)
b3 := newBlock(b2, 30)
- repo.AddBlock(b3, nil, 0)
+ repo.AddBlock(b3, nil, 0, false)
b4 := newBlock(b3, 40)
- repo.AddBlock(b4, nil, 0)
-
- repo.SetBestBlockID(b4.Header().ID())
+ repo.AddBlock(b4, nil, 0, true)
br := repo.NewBlockReader(b2.Header().ID())
- var blks []*chain.ExtendedBlock
+ var blks []*ExtendedBlock
for {
r, err := br.Read()
@@ -46,7 +43,7 @@ func TestBlockReader(t *testing.T) {
blks = append(blks, r...)
}
- assert.Equal(t, []*chain.ExtendedBlock{
+ assert.Equal(t, []*ExtendedBlock{
{block.Compose(b3.Header(), b3.Transactions()), false},
{block.Compose(b4.Header(), b4.Transactions()), false}},
blks)
@@ -57,25 +54,23 @@ func TestBlockReaderFork(t *testing.T) {
b0 := repo.GenesisBlock()
b1 := newBlock(b0, 10)
- repo.AddBlock(b1, nil, 0)
+ repo.AddBlock(b1, nil, 0, false)
b2 := newBlock(b1, 20)
- repo.AddBlock(b2, nil, 0)
+ repo.AddBlock(b2, nil, 0, false)
b2x := newBlock(b1, 20)
- repo.AddBlock(b2x, nil, 1)
+ repo.AddBlock(b2x, nil, 1, false)
b3 := newBlock(b2, 30)
- repo.AddBlock(b3, nil, 0)
+ repo.AddBlock(b3, nil, 0, false)
b4 := newBlock(b3, 40)
- repo.AddBlock(b4, nil, 0)
-
- repo.SetBestBlockID(b4.Header().ID())
+ repo.AddBlock(b4, nil, 0, true)
br := repo.NewBlockReader(b2x.Header().ID())
- var blks []*chain.ExtendedBlock
+ var blks []*ExtendedBlock
for {
r, err := br.Read()
@@ -89,7 +84,7 @@ func TestBlockReaderFork(t *testing.T) {
blks = append(blks, r...)
}
- assert.Equal(t, []*chain.ExtendedBlock{
+ assert.Equal(t, []*ExtendedBlock{
{block.Compose(b2x.Header(), b2x.Transactions()), true},
{block.Compose(b2.Header(), b2.Transactions()), false},
{block.Compose(b3.Header(), b3.Transactions()), false},
diff --git a/chain/chain.go b/chain/chain.go
index 0ee205402..c7e30d9e9 100644
--- a/chain/chain.go
+++ b/chain/chain.go
@@ -7,6 +7,7 @@ package chain
import (
"encoding/binary"
+ "fmt"
"math"
"sort"
@@ -34,8 +35,8 @@ type storageTxMeta struct {
// TxMeta contains tx location and reversal state.
type TxMeta struct {
- // The block id this tx is involved.
- BlockID thor.Bytes32
+ // The number of block this tx is involved.
+ BlockNum, BlockConflicts uint32
// Index the position of the tx in block's txs.
Index uint64 // rlp require uint64.
@@ -64,9 +65,9 @@ func newChain(repo *Repository, headID thor.Bytes32) *Chain {
func() (*muxdb.Trie, error) {
if indexTrie == nil && initErr == nil {
if summary, err := repo.GetBlockSummary(headID); err == nil {
- indexTrie = repo.db.NewNonCryptoTrie(IndexTrieName, trie.NonCryptoNodeHash, summary.Header.Number(), summary.Conflicts)
+ indexTrie = repo.db.NewTrie(IndexTrieName, summary.IndexRoot())
} else {
- initErr = errors.Wrap(err, "lazy init chain")
+ initErr = errors.Wrap(err, fmt.Sprintf("lazy init chain, head=%v", headID))
}
}
return indexTrie, initErr
@@ -106,35 +107,31 @@ func (c *Chain) GetBlockID(num uint32) (thor.Bytes32, error) {
// GetTransactionMeta returns tx meta by given tx id.
func (c *Chain) GetTransactionMeta(id thor.Bytes32) (*TxMeta, error) {
- // precheck. point access is faster than range access.
- if has, err := c.repo.txIndexer.Has(id[:]); err != nil {
- return nil, err
- } else if !has {
- return nil, errNotFound
- }
-
iter := c.repo.txIndexer.Iterate(kv.Range(*util.BytesPrefix(id[:])))
defer iter.Release()
for iter.Next() {
- if len(iter.Key()) != 64 { // skip the pure txid key
+ ver := iter.Key()[32:]
+ blockNum, n := binary.Uvarint(ver)
+ conflicts, _ := binary.Uvarint(ver[n:])
+
+ if blockNum > uint64(block.Number(c.headID)) {
continue
}
- blockID := thor.BytesToBytes32(iter.Key()[32:])
-
- has, err := c.HasBlock(blockID)
+ s, err := c.GetBlockSummary(uint32(blockNum))
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "block missing")
}
- if has {
+ if s.Conflicts == uint32(conflicts) {
var sMeta storageTxMeta
if err := rlp.DecodeBytes(iter.Value(), &sMeta); err != nil {
return nil, err
}
return &TxMeta{
- BlockID: blockID,
- Index: sMeta.Index,
- Reverted: sMeta.Reverted,
+ BlockNum: uint32(blockNum),
+ BlockConflicts: uint32(conflicts),
+ Index: sMeta.Index,
+ Reverted: sMeta.Reverted,
}, nil
}
}
@@ -152,30 +149,55 @@ func (c *Chain) HasTransaction(txid thor.Bytes32, txBlockRef uint32) (bool, erro
if txBlockRef > headNum {
return false, nil
}
- // tx block ref too old, fallback to retrieve tx meta.
- if headNum-txBlockRef > 100 {
- if _, err := c.GetTransactionMeta(txid); err != nil {
- if c.IsNotFound(err) {
- return false, nil
+
+ // the tx is in recent blocks, if there is.
+ if headNum-txBlockRef < 100 {
+ // iterate block summaries from head block to ref block,
+ // to match tx id.
+ for nextID := c.headID; block.Number(nextID) >= txBlockRef && block.Number(nextID) != math.MaxUint32; {
+ s, err := c.repo.GetBlockSummary(nextID)
+ if err != nil {
+ return false, err
}
- return false, err
+ for _, _txid := range s.Txs {
+ if _txid == txid {
+ return true, nil
+ }
+ }
+ nextID = s.Header.ParentID()
}
- return true, nil
+ return false, nil
+ }
+
+ // tx block ref too old, fallback to check tx meta.
+ if has, err := c.repo.txIndexer.Has(txid[:txFilterKeyLen]); err != nil {
+ return false, err
+ } else if !has {
+ return false, nil
}
- // iterate block summaries from head block to ref block,
- // to match tx id.
- for nextID := c.headID; block.Number(nextID) >= txBlockRef && block.Number(nextID) != math.MaxUint32; {
- s, err := c.repo.GetBlockSummary(nextID)
+ iter := c.repo.txIndexer.Iterate(kv.Range(*util.BytesPrefix(txid[:])))
+ defer iter.Release()
+ for iter.Next() {
+ ver := iter.Key()[32:]
+ blockNum, n := binary.Uvarint(ver)
+ conflicts, _ := binary.Uvarint(ver[n:])
+
+ if blockNum > uint64(block.Number(c.headID)) {
+ continue
+ }
+
+ s, err := c.GetBlockSummary(uint32(blockNum))
if err != nil {
- return false, err
+ return false, errors.Wrap(err, "block missing")
}
- for _, _txid := range s.Txs {
- if _txid == txid {
- return true, nil
- }
+
+ if s.Conflicts == uint32(conflicts) {
+ return true, nil
}
- nextID = s.Header.ParentID()
+ }
+ if err := iter.Error(); err != nil {
+ return false, err
}
return false, nil
}
@@ -190,7 +212,7 @@ func (c *Chain) GetBlockHeader(num uint32) (*block.Header, error) {
}
// GetBlockSummary returns block summary by given block number.
-func (c Chain) GetBlockSummary(num uint32) (*BlockSummary, error) {
+func (c *Chain) GetBlockSummary(num uint32) (*BlockSummary, error) {
id, err := c.GetBlockID(num)
if err != nil {
return nil, err
@@ -214,8 +236,7 @@ func (c *Chain) GetTransaction(id thor.Bytes32) (*tx.Transaction, *TxMeta, error
return nil, nil, err
}
- key := makeTxKey(txMeta.BlockID, txInfix)
- key.SetIndex(txMeta.Index)
+ key := appendTxKey(nil, txMeta.BlockNum, txMeta.BlockConflicts, txMeta.Index, txFlag)
tx, err := c.repo.getTransaction(key)
if err != nil {
return nil, nil, err
@@ -230,8 +251,7 @@ func (c *Chain) GetTransactionReceipt(txID thor.Bytes32) (*tx.Receipt, error) {
return nil, err
}
- key := makeTxKey(txMeta.BlockID, receiptInfix)
- key.SetIndex(txMeta.Index)
+ key := appendTxKey(nil, txMeta.BlockNum, txMeta.BlockConflicts, txMeta.Index, receiptFlag)
receipt, err := c.repo.getReceipt(key)
if err != nil {
return nil, err
@@ -352,22 +372,15 @@ func (r *Repository) NewChain(headID thor.Bytes32) *Chain {
return newChain(r, headID)
}
-func (r *Repository) indexBlock(parentConflicts uint32, newBlockID thor.Bytes32, newConflicts uint32) error {
- var (
- newNum = block.Number(newBlockID)
- root thor.Bytes32
- )
-
- if newNum != 0 { // not a genesis block
- root = trie.NonCryptoNodeHash
- }
-
- trie := r.db.NewNonCryptoTrie(IndexTrieName, root, newNum-1, parentConflicts)
+func (r *Repository) indexBlock(parentRoot trie.Root, newBlockID thor.Bytes32, newConflicts uint32) error {
+ t := r.db.NewTrie(IndexTrieName, parentRoot)
// map block number to block ID
- if err := trie.Update(newBlockID[:4], newBlockID[:], nil); err != nil {
+ if err := t.Update(newBlockID[:4], newBlockID[:], nil); err != nil {
return err
}
-
- _, commit := trie.Stage(newNum, newConflicts)
- return commit()
+ return t.Commit(
+ trie.Version{
+ Major: block.Number(newBlockID),
+ Minor: newConflicts},
+ true)
}
diff --git a/chain/chain_test.go b/chain/chain_test.go
index d61b38c52..1b6a3f970 100644
--- a/chain/chain_test.go
+++ b/chain/chain_test.go
@@ -3,15 +3,16 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package chain_test
+package chain
import (
"testing"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/block"
- "github.com/vechain/thor/v2/chain"
+ "github.com/vechain/thor/v2/test/datagen"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tx"
)
@@ -29,18 +30,18 @@ func TestChain(t *testing.T) {
_, repo := newTestRepo()
b1 := newBlock(repo.GenesisBlock(), 10, tx1)
- tx1Meta := &chain.TxMeta{BlockID: b1.Header().ID(), Index: 0, Reverted: false}
+ tx1Meta := &TxMeta{BlockNum: 1, Index: 0, Reverted: false}
tx1Receipt := &tx.Receipt{}
- repo.AddBlock(b1, tx.Receipts{tx1Receipt}, 0)
+ repo.AddBlock(b1, tx.Receipts{tx1Receipt}, 0, false)
b2 := newBlock(b1, 20)
- repo.AddBlock(b2, nil, 0)
+ repo.AddBlock(b2, nil, 0, false)
b3 := newBlock(b2, 30)
- repo.AddBlock(b3, nil, 0)
+ repo.AddBlock(b3, nil, 0, false)
b3x := newBlock(b2, 30)
- repo.AddBlock(b3x, nil, 1)
+ repo.AddBlock(b3x, nil, 1, false)
c := repo.NewChain(b3.Header().ID())
@@ -48,13 +49,26 @@ func TestChain(t *testing.T) {
assert.Equal(t, M(b3.Header().ID(), nil), M(c.GetBlockID(3)))
assert.Equal(t, M(b3.Header(), nil), M(c.GetBlockHeader(3)))
assert.Equal(t, M(block.Compose(b3.Header(), b3.Transactions()), nil), M(c.GetBlock(3)))
+ assert.Equal(t, repo.NewBestChain().GenesisID(), repo.GenesisBlock().Header().ID())
_, err := c.GetBlockID(4)
assert.True(t, c.IsNotFound(err))
assert.Equal(t, M(tx1Meta, nil), M(c.GetTransactionMeta(tx1.ID())))
- assert.Equal(t, M(tx1, tx1Meta, nil), M(c.GetTransaction(tx1.ID())))
- assert.Equal(t, M(tx1Receipt, nil), M(c.GetTransactionReceipt(tx1.ID())))
+ {
+ tx, meta, err := c.GetTransaction(tx1.ID())
+ assert.Nil(t, err)
+ assert.Equal(t, tx1Meta, meta)
+ assert.Equal(t, tx1.ID(), tx.ID())
+ }
+ {
+ r, err := c.GetTransactionReceipt(tx1.ID())
+ assert.Nil(t, err)
+ got, _ := rlp.EncodeToBytes(r)
+ want, _ := rlp.EncodeToBytes(tx1Receipt)
+ assert.Equal(t, want, got)
+ }
+
_, err = c.GetTransactionMeta(thor.Bytes32{})
assert.True(t, c.IsNotFound(err))
@@ -84,3 +98,27 @@ func TestChain(t *testing.T) {
_, err = dangleChain.Exclude(c1)
assert.Error(t, err)
}
+
+func TestHasTransaction(t *testing.T) {
+ _, repo := newTestRepo()
+
+ parent := repo.GenesisBlock()
+ for i := 1; i <= 101; i++ {
+ b := newBlock(parent, uint64(i)*10)
+ asBest := i == 101
+ repo.AddBlock(b, nil, 0, asBest)
+ parent = b
+ }
+
+ has, err := repo.NewBestChain().HasTransaction(datagen.RandomHash(), 0)
+ assert.Nil(t, err)
+ assert.False(t, has)
+
+ tx1 := newTx()
+ bx := newBlock(parent, 10020, tx1)
+ repo.AddBlock(bx, tx.Receipts{&tx.Receipt{}}, 0, true)
+
+ has, err = repo.NewBestChain().HasTransaction(tx1.ID(), 0)
+ assert.Nil(t, err)
+ assert.True(t, has)
+}
diff --git a/chain/metrics.go b/chain/metrics.go
new file mode 100644
index 000000000..8c9a764d4
--- /dev/null
+++ b/chain/metrics.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2024 The VeChainThor developers
+//
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package chain
+
+import "github.com/vechain/thor/v2/metrics"
+
+var (
+ metricCacheHitMiss = metrics.LazyLoadCounterVec("repo_cache_hit_miss_count", []string{"type", "event"})
+)
diff --git a/chain/persist.go b/chain/persist.go
index fa1f97a9d..0a73b98ac 100644
--- a/chain/persist.go
+++ b/chain/persist.go
@@ -12,13 +12,16 @@ import (
"github.com/vechain/thor/v2/block"
"github.com/vechain/thor/v2/kv"
"github.com/vechain/thor/v2/thor"
- "github.com/vechain/thor/v2/tx"
+ "github.com/vechain/thor/v2/trie"
)
-const (
- txInfix = byte(0)
- receiptInfix = byte(1)
-)
+// appendTxKey composes the key to access tx or receipt.
+func appendTxKey(buf []byte, blockNum, blockConflicts uint32, index uint64, flag byte) []byte {
+ buf = binary.BigEndian.AppendUint32(buf, blockNum)
+ buf = binary.AppendUvarint(buf, uint64(blockConflicts))
+ buf = append(buf, flag)
+ return binary.AppendUvarint(buf, index)
+}
// BlockSummary presents block summary.
type BlockSummary struct {
@@ -26,21 +29,29 @@ type BlockSummary struct {
Txs []thor.Bytes32
Size uint64
Conflicts uint32
- SteadyNum uint32
}
-// the key for tx/receipt.
-// it consists of: ( block id | infix | index )
-type txKey [32 + 1 + 8]byte
-
-func makeTxKey(blockID thor.Bytes32, infix byte) (k txKey) {
- copy(k[:], blockID[:])
- k[32] = infix
- return
+// Root returns state root for accessing state trie.
+func (s *BlockSummary) Root() trie.Root {
+ return trie.Root{
+ Hash: s.Header.StateRoot(),
+ Ver: trie.Version{
+ Major: s.Header.Number(),
+ Minor: s.Conflicts,
+ },
+ }
}
-func (k *txKey) SetIndex(i uint64) {
- binary.BigEndian.PutUint64(k[33:], i)
+// IndexRoot returns index root for accessing index trie.
+func (s *BlockSummary) IndexRoot() trie.Root {
+ return trie.Root{
+ // index trie skips hash, so here just provide a non-zero hash
+ Hash: thor.BytesToBytes32([]byte{1}),
+ Ver: trie.Version{
+ Major: s.Header.Number(),
+ Minor: s.Conflicts,
+ },
+ }
}
func saveRLP(w kv.Putter, key []byte, val interface{}) error {
@@ -63,6 +74,9 @@ func saveBlockSummary(w kv.Putter, summary *BlockSummary) error {
return saveRLP(w, summary.Header.ID().Bytes(), summary)
}
+// indexChainHead puts a header into store, it will put the block id and delete the parent id.
+// So there is only one block id stored for every branch(fork). Thus will result we can scan all
+// possible fork's head by iterating the index store.
func indexChainHead(w kv.Putter, header *block.Header) error {
if err := w.Delete(header.ParentID().Bytes()); err != nil {
return err
@@ -78,27 +92,3 @@ func loadBlockSummary(r kv.Getter, id thor.Bytes32) (*BlockSummary, error) {
}
return &summary, nil
}
-
-func saveTransaction(w kv.Putter, key txKey, tx *tx.Transaction) error {
- return saveRLP(w, key[:], tx)
-}
-
-func loadTransaction(r kv.Getter, key txKey) (*tx.Transaction, error) {
- var tx tx.Transaction
- if err := loadRLP(r, key[:], &tx); err != nil {
- return nil, err
- }
- return &tx, nil
-}
-
-func saveReceipt(w kv.Putter, key txKey, receipt *tx.Receipt) error {
- return saveRLP(w, key[:], receipt)
-}
-
-func loadReceipt(r kv.Getter, key txKey) (*tx.Receipt, error) {
- var receipt tx.Receipt
- if err := loadRLP(r, key[:], &receipt); err != nil {
- return nil, err
- }
- return &receipt, nil
-}
diff --git a/chain/repository.go b/chain/repository.go
index 2460d6a3c..5b69d88e2 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -16,20 +16,25 @@ import (
"github.com/vechain/thor/v2/kv"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
)
const (
- dataStoreName = "chain.data"
- propStoreName = "chain.props"
- headStoreName = "chain.heads"
- txIndexStoreName = "chain.txi"
+ hdrStoreName = "chain.hdr" // for block headers
+ bodyStoreName = "chain.body" // for block bodies and receipts
+ propStoreName = "chain.props" // for property-named blocks such as best block
+ headStoreName = "chain.heads" // for chain heads ( including uncles )
+ txIndexStoreName = "chain.txi" // for tx metadata
+
+ txFlag = byte(0) // flag byte of the key for saving tx blob
+ receiptFlag = byte(1) // flag byte fo the key for saving receipt blob
+ txFilterKeyLen = 8
)
var (
- errNotFound = errors.New("not found")
- bestBlockIDKey = []byte("best-block-id")
- steadyBlockIDKey = []byte("steady-block-id")
+ errNotFound = errors.New("not found")
+ bestBlockIDKey = []byte("best-block-id")
)
// Repository stores block headers, txs and receipts.
@@ -37,15 +42,16 @@ var (
// It's thread-safe.
type Repository struct {
db *muxdb.MuxDB
- data kv.Store
- head kv.Store
- props kv.Store
+ hdrStore kv.Store
+ bodyStore kv.Store
+ propStore kv.Store
+ headStore kv.Store
txIndexer kv.Store
- genesis *block.Block
+ genesis *block.Block
+ tag byte
+
bestSummary atomic.Value
- steadyID atomic.Value
- tag byte
tick co.Signal
caches struct {
@@ -67,9 +73,10 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) {
genesisID := genesis.Header().ID()
repo := &Repository{
db: db,
- data: db.NewStore(dataStoreName),
- head: db.NewStore(headStoreName),
- props: db.NewStore(propStoreName),
+ hdrStore: db.NewStore(hdrStoreName),
+ bodyStore: db.NewStore(bodyStoreName),
+ propStore: db.NewStore(propStoreName),
+ headStore: db.NewStore(headStoreName),
txIndexer: db.NewStore(txIndexStoreName),
genesis: genesis,
tag: genesisID[31],
@@ -79,17 +86,15 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) {
repo.caches.txs = newCache(2048)
repo.caches.receipts = newCache(2048)
- if val, err := repo.props.Get(bestBlockIDKey); err != nil {
- if !repo.props.IsNotFound(err) {
+ if val, err := repo.propStore.Get(bestBlockIDKey); err != nil {
+ if !repo.propStore.IsNotFound(err) {
return nil, err
}
- if err := repo.indexBlock(0, genesis.Header().ID(), 0); err != nil {
+ if err := repo.indexBlock(trie.Root{}, genesis.Header().ID(), 0); err != nil {
return nil, err
}
- if summary, err := repo.saveBlock(genesis, nil, 0, 0); err != nil {
- return nil, err
- } else if err := repo.setBestBlockSummary(summary); err != nil {
+ if _, err := repo.saveBlock(genesis, nil, 0, true); err != nil {
return nil, err
}
} else {
@@ -109,14 +114,6 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) {
repo.bestSummary.Store(summary)
}
- if val, err := repo.props.Get(steadyBlockIDKey); err != nil {
- if !repo.props.IsNotFound(err) {
- return nil, err
- }
- repo.steadyID.Store(genesis.Header().ID())
- } else {
- repo.steadyID.Store(thor.BytesToBytes32(val))
- }
return repo, nil
}
@@ -135,115 +132,89 @@ func (r *Repository) BestBlockSummary() *BlockSummary {
return r.bestSummary.Load().(*BlockSummary)
}
-// SetBestBlockID set the given block id as best block id.
-func (r *Repository) SetBestBlockID(id thor.Bytes32) (err error) {
- defer func() {
- if err == nil {
- r.tick.Broadcast()
- }
- }()
- summary, err := r.GetBlockSummary(id)
- if err != nil {
- return err
- }
- return r.setBestBlockSummary(summary)
-}
-
-func (r *Repository) setBestBlockSummary(summary *BlockSummary) error {
- if err := r.props.Put(bestBlockIDKey, summary.Header.ID().Bytes()); err != nil {
- return err
- }
- r.bestSummary.Store(summary)
- return nil
-}
-
-// SteadyBlockID return the head block id of the steady chain.
-func (r *Repository) SteadyBlockID() thor.Bytes32 {
- return r.steadyID.Load().(thor.Bytes32)
-}
-
-// SetSteadyBlockID set the given block id as the head block id of the steady chain.
-func (r *Repository) SetSteadyBlockID(id thor.Bytes32) error {
- prev := r.steadyID.Load().(thor.Bytes32)
-
- if has, err := r.NewChain(id).HasBlock(prev); err != nil {
- return err
- } else if !has {
- // the previous steady id is not on the chain of the new id.
- return errors.New("invalid new steady block id")
- }
- if err := r.props.Put(steadyBlockIDKey, id[:]); err != nil {
- return err
- }
- r.steadyID.Store(id)
- return nil
-}
-
-func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflicts, steadyNum uint32) (*BlockSummary, error) {
+func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflicts uint32, asBest bool) (*BlockSummary, error) {
var (
- header = block.Header()
- id = header.ID()
- txs = block.Transactions()
- summary = BlockSummary{header, []thor.Bytes32{}, uint64(block.Size()), conflicts, steadyNum}
- bulk = r.db.NewStore("").Bulk()
- indexPutter = kv.Bucket(txIndexStoreName).NewPutter(bulk)
- dataPutter = kv.Bucket(dataStoreName).NewPutter(bulk)
- headPutter = kv.Bucket(headStoreName).NewPutter(bulk)
+ header = block.Header()
+ id = header.ID()
+ num = header.Number()
+ txs = block.Transactions()
+ txIDs = []thor.Bytes32{}
+ bulk = r.db.NewStore("").Bulk()
+ hdrPutter = kv.Bucket(hdrStoreName).NewPutter(bulk)
+ bodyPutter = kv.Bucket(bodyStoreName).NewPutter(bulk)
+ propPutter = kv.Bucket(propStoreName).NewPutter(bulk)
+ headPutter = kv.Bucket(headStoreName).NewPutter(bulk)
+ txIndexPutter = kv.Bucket(txIndexStoreName).NewPutter(bulk)
+ keyBuf []byte
)
if len(txs) > 0 {
- // index txs
- buf := make([]byte, 64)
- copy(buf[32:], id[:])
+ // index and save txs
for i, tx := range txs {
txid := tx.ID()
- summary.Txs = append(summary.Txs, txid)
+ txIDs = append(txIDs, txid)
- // to accelerate point access
- if err := indexPutter.Put(txid[:], nil); err != nil {
+ // write the filter key
+ if err := txIndexPutter.Put(txid[:txFilterKeyLen], nil); err != nil {
return nil, err
}
+ // write tx metadata
+ keyBuf = append(keyBuf[:0], txid[:]...)
+ keyBuf = binary.AppendUvarint(keyBuf, uint64(header.Number()))
+ keyBuf = binary.AppendUvarint(keyBuf, uint64(conflicts))
- copy(buf, txid[:])
- if err := saveRLP(indexPutter, buf, &storageTxMeta{
+ if err := saveRLP(txIndexPutter, keyBuf, &storageTxMeta{
Index: uint64(i),
Reverted: receipts[i].Reverted,
}); err != nil {
return nil, err
}
- }
- // save tx & receipt data
- key := makeTxKey(id, txInfix)
- for i, tx := range txs {
- key.SetIndex(uint64(i))
- if err := saveTransaction(dataPutter, key, tx); err != nil {
+ // write the tx blob
+ keyBuf = appendTxKey(keyBuf[:0], num, conflicts, uint64(i), txFlag)
+ if err := saveRLP(bodyPutter, keyBuf[:], tx); err != nil {
return nil, err
}
- r.caches.txs.Add(key, tx)
+ r.caches.txs.Add(string(keyBuf), tx)
}
- key = makeTxKey(id, receiptInfix)
+
+ // save receipts
for i, receipt := range receipts {
- key.SetIndex(uint64(i))
- if err := saveReceipt(dataPutter, key, receipt); err != nil {
+ keyBuf = appendTxKey(keyBuf[:0], num, conflicts, uint64(i), receiptFlag)
+ if err := saveRLP(bodyPutter, keyBuf, receipt); err != nil {
return nil, err
}
- r.caches.receipts.Add(key, receipt)
+ r.caches.receipts.Add(string(keyBuf), receipt)
}
}
if err := indexChainHead(headPutter, header); err != nil {
return nil, err
}
- if err := saveBlockSummary(dataPutter, &summary); err != nil {
+ summary := BlockSummary{header, txIDs, uint64(block.Size()), conflicts}
+ if err := saveBlockSummary(hdrPutter, &summary); err != nil {
+ return nil, err
+ }
+
+ if asBest {
+ if err := propPutter.Put(bestBlockIDKey, id[:]); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := bulk.Write(); err != nil {
return nil, err
}
r.caches.summaries.Add(id, &summary)
- return &summary, bulk.Write()
+ if asBest {
+ r.bestSummary.Store(&summary)
+ r.tick.Broadcast()
+ }
+ return &summary, nil
}
// AddBlock add a new block with its receipts into repository.
-func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, conflicts uint32) error {
+func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, conflicts uint32, asBest bool) error {
parentSummary, err := r.GetBlockSummary(newBlock.Header().ParentID())
if err != nil {
if r.IsNotFound(err) {
@@ -251,21 +222,11 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl
}
return err
}
- if err := r.indexBlock(parentSummary.Conflicts, newBlock.Header().ID(), conflicts); err != nil {
+ if err := r.indexBlock(parentSummary.IndexRoot(), newBlock.Header().ID(), conflicts); err != nil {
return err
}
- steadyNum := parentSummary.SteadyNum // initially inherits parent's steady num.
- newSteadyID := r.steadyID.Load().(thor.Bytes32)
- if newSteadyNum := block.Number(newSteadyID); steadyNum != newSteadyNum {
- if has, err := r.NewChain(parentSummary.Header.ID()).HasBlock(newSteadyID); err != nil {
- return err
- } else if has {
- // the chain of the new block contains the new steady id,
- steadyNum = newSteadyNum
- }
- }
- if _, err := r.saveBlock(newBlock, receipts, conflicts, steadyNum); err != nil {
+ if _, err := r.saveBlock(newBlock, receipts, conflicts, asBest); err != nil {
return err
}
return nil
@@ -273,27 +234,28 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl
// ScanConflicts returns the count of saved blocks with the given blockNum.
func (r *Repository) ScanConflicts(blockNum uint32) (uint32, error) {
- var prefix [4]byte
- binary.BigEndian.PutUint32(prefix[:], blockNum)
+ prefix := binary.BigEndian.AppendUint32(nil, blockNum)
- iter := r.data.Iterate(kv.Range(*util.BytesPrefix(prefix[:])))
+ iter := r.hdrStore.Iterate(kv.Range(*util.BytesPrefix(prefix)))
defer iter.Release()
count := uint32(0)
for iter.Next() {
- if len(iter.Key()) == 32 {
- count++
- }
+ count++
}
return count, iter.Error()
}
// ScanHeads returns all head blockIDs from the given blockNum(included) in descending order.
+// It will return all fork's head block id stored in to local database after the given block number.
+// The following example will return B' and C.
+// A -> B -> C
+//
+// \ -> B'
func (r *Repository) ScanHeads(from uint32) ([]thor.Bytes32, error) {
- var start [4]byte
- binary.BigEndian.PutUint32(start[:], from)
+ start := binary.BigEndian.AppendUint32(nil, from)
- iter := r.head.Iterate(kv.Range{Start: start[:]})
+ iter := r.headStore.Iterate(kv.Range{Start: start})
defer iter.Release()
heads := make([]thor.Bytes32, 0, 16)
@@ -311,7 +273,7 @@ func (r *Repository) ScanHeads(from uint32) ([]thor.Bytes32, error) {
// GetMaxBlockNum returns the max committed block number.
func (r *Repository) GetMaxBlockNum() (uint32, error) {
- iter := r.data.Iterate(kv.Range{})
+ iter := r.hdrStore.Iterate(kv.Range{})
defer iter.Release()
if iter.Last() {
@@ -322,23 +284,37 @@ func (r *Repository) GetMaxBlockNum() (uint32, error) {
// GetBlockSummary get block summary by block id.
func (r *Repository) GetBlockSummary(id thor.Bytes32) (summary *BlockSummary, err error) {
- var cached interface{}
- if cached, err = r.caches.summaries.GetOrLoad(id, func() (interface{}, error) {
- return loadBlockSummary(r.data, id)
+ var blk interface{}
+ result := "hit"
+ if blk, err = r.caches.summaries.GetOrLoad(id, func() (interface{}, error) {
+ result = "miss"
+ return loadBlockSummary(r.hdrStore, id)
}); err != nil {
return
}
- return cached.(*BlockSummary), nil
+ metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "block-summary", "event": result})
+ return blk.(*BlockSummary), nil
}
-func (r *Repository) getTransaction(key txKey) (*tx.Transaction, error) {
- cached, err := r.caches.txs.GetOrLoad(key, func() (interface{}, error) {
- return loadTransaction(r.data, key)
+func (r *Repository) getTransaction(key []byte) (*tx.Transaction, error) {
+ result := "hit"
+ trx, err := r.caches.txs.GetOrLoad(string(key), func() (interface{}, error) {
+ result = "miss"
+ return loadTransaction(r.bodyStore, key)
})
if err != nil {
return nil, err
}
- return cached.(*tx.Transaction), nil
+ metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "transaction", "event": result})
+ return trx.(*tx.Transaction), nil
+}
+
+func loadTransaction(r kv.Getter, key []byte) (*tx.Transaction, error) {
+ var tx tx.Transaction
+ if err := loadRLP(r, key[:], &tx); err != nil {
+ return nil, err
+ }
+ return &tx, nil
}
// GetBlockTransactions get all transactions of the block for given block id.
@@ -350,9 +326,9 @@ func (r *Repository) GetBlockTransactions(id thor.Bytes32) (tx.Transactions, err
if n := len(summary.Txs); n > 0 {
txs := make(tx.Transactions, n)
- key := makeTxKey(id, txInfix)
+ var key []byte
for i := range summary.Txs {
- key.SetIndex(uint64(i))
+ key := appendTxKey(key[:0], summary.Header.Number(), summary.Conflicts, uint64(i), txFlag)
txs[i], err = r.getTransaction(key)
if err != nil {
return nil, err
@@ -376,14 +352,26 @@ func (r *Repository) GetBlock(id thor.Bytes32) (*block.Block, error) {
return block.Compose(summary.Header, txs), nil
}
-func (r *Repository) getReceipt(key txKey) (*tx.Receipt, error) {
- cached, err := r.caches.receipts.GetOrLoad(key, func() (interface{}, error) {
- return loadReceipt(r.data, key)
+
+func (r *Repository) getReceipt(key []byte) (*tx.Receipt, error) {
+ result := "hit"
+ receipt, err := r.caches.receipts.GetOrLoad(string(key), func() (interface{}, error) {
+ result = "miss"
+ return loadReceipt(r.bodyStore, key)
})
if err != nil {
return nil, err
}
- return cached.(*tx.Receipt), nil
+ metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "receipt", "event": result})
+ return receipt.(*tx.Receipt), nil
+}
+
+func loadReceipt(r kv.Getter, key []byte) (*tx.Receipt, error) {
+ var receipt tx.Receipt
+ if err := loadRLP(r, key[:], &receipt); err != nil {
+ return nil, err
+ }
+ return &receipt, nil
}
// GetBlockReceipts get all tx receipts of the block for given block id.
@@ -395,9 +383,9 @@ func (r *Repository) GetBlockReceipts(id thor.Bytes32) (tx.Receipts, error) {
if n := len(summary.Txs); n > 0 {
receipts := make(tx.Receipts, n)
- key := makeTxKey(id, receiptInfix)
+ var key []byte
for i := range summary.Txs {
- key.SetIndex(uint64(i))
+ key := appendTxKey(key[:0], summary.Header.Number(), summary.Conflicts, uint64(i), receiptFlag)
receipts[i], err = r.getReceipt(key)
if err != nil {
return nil, err
diff --git a/chain/repository_test.go b/chain/repository_test.go
index 1391acb8d..81bef17a5 100644
--- a/chain/repository_test.go
+++ b/chain/repository_test.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package chain_test
+package chain
import (
"testing"
@@ -11,10 +11,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/block"
- "github.com/vechain/thor/v2/chain"
- "github.com/vechain/thor/v2/genesis"
"github.com/vechain/thor/v2/muxdb"
- "github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tx"
)
@@ -23,26 +20,19 @@ func M(args ...interface{}) []interface{} {
return args
}
-func newTestRepo() (*muxdb.MuxDB, *chain.Repository) {
+func newTestRepo() (*muxdb.MuxDB, *Repository) {
db := muxdb.NewMem()
- g := genesis.NewDevnet()
- b0, _, _, _ := g.Build(state.NewStater(db))
+ b0 := new(block.Builder).
+ ParentID(thor.Bytes32{0xff, 0xff, 0xff, 0xff}).
+ Build()
- repo, err := chain.NewRepository(db, b0)
+ repo, err := NewRepository(db, b0)
if err != nil {
panic(err)
}
return db, repo
}
-func reopenRepo(db *muxdb.MuxDB, b0 *block.Block) *chain.Repository {
- repo, err := chain.NewRepository(db, b0)
- if err != nil {
- panic(err)
- }
- return repo
-}
-
func newBlock(parent *block.Block, ts uint64, txs ...*tx.Transaction) *block.Block {
builder := new(block.Builder).
ParentID(parent.Header().ID()).
@@ -58,12 +48,11 @@ func newBlock(parent *block.Block, ts uint64, txs ...*tx.Transaction) *block.Blo
return b.WithSignature(sig)
}
-func TestRepository(t *testing.T) {
- db := muxdb.NewMem()
- g := genesis.NewDevnet()
- b0, _, _, _ := g.Build(state.NewStater(db))
+func TestRepositoryFunc(t *testing.T) {
+ db, repo1 := newTestRepo()
+ b0 := repo1.GenesisBlock()
- repo1, err := chain.NewRepository(db, b0)
+ repo1, err := NewRepository(db, b0)
if err != nil {
panic(err)
}
@@ -75,14 +64,15 @@ func TestRepository(t *testing.T) {
receipt1 := &tx.Receipt{}
b1 := newBlock(repo1.GenesisBlock(), 10, tx1)
- assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0))
-
+ assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0, false))
// best block not set, so still 0
assert.Equal(t, uint32(0), repo1.BestBlockSummary().Header.Number())
- repo1.SetBestBlockID(b1.Header().ID())
- repo2, _ := chain.NewRepository(db, b0)
- for _, repo := range []*chain.Repository{repo1, repo2} {
+ assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0, true))
+ assert.Equal(t, uint32(1), repo1.BestBlockSummary().Header.Number())
+
+ repo2, _ := NewRepository(db, b0)
+ for _, repo := range []*Repository{repo1, repo2} {
assert.Equal(t, b1.Header().ID(), repo.BestBlockSummary().Header.ID())
s, err := repo.GetBlockSummary(b1.Header().ID())
assert.Nil(t, err)
@@ -99,54 +89,32 @@ func TestRepository(t *testing.T) {
}
}
+func TestAddBlock(t *testing.T) {
+ _, repo := newTestRepo()
+
+ err := repo.AddBlock(new(block.Builder).Build(), nil, 0, false)
+ assert.Error(t, err, "parent missing")
+
+ b1 := newBlock(repo.GenesisBlock(), 10)
+ assert.Nil(t, repo.AddBlock(b1, nil, 0, false))
+}
+
func TestConflicts(t *testing.T) {
_, repo := newTestRepo()
b0 := repo.GenesisBlock()
b1 := newBlock(b0, 10)
- repo.AddBlock(b1, nil, 0)
+ repo.AddBlock(b1, nil, 0, false)
assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.GetMaxBlockNum()))
assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.ScanConflicts(1)))
b1x := newBlock(b0, 20)
- repo.AddBlock(b1x, nil, 1)
+ repo.AddBlock(b1x, nil, 1, false)
assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.GetMaxBlockNum()))
assert.Equal(t, []interface{}{uint32(2), nil}, M(repo.ScanConflicts(1)))
}
-func TestSteadyBlockID(t *testing.T) {
- db, repo := newTestRepo()
- b0 := repo.GenesisBlock()
-
- assert.Equal(t, b0.Header().ID(), repo.SteadyBlockID())
-
- b1 := newBlock(b0, 10)
- repo.AddBlock(b1, nil, 0)
-
- assert.Nil(t, repo.SetSteadyBlockID(b1.Header().ID()))
- assert.Equal(t, b1.Header().ID(), repo.SteadyBlockID())
-
- b2 := newBlock(b1, 10)
- repo.AddBlock(b2, nil, 0)
-
- assert.Nil(t, repo.SetSteadyBlockID(b2.Header().ID()))
- assert.Equal(t, b2.Header().ID(), repo.SteadyBlockID())
-
- b2x := newBlock(b1, 10)
- repo.AddBlock(b2x, nil, 1)
- assert.Error(t, repo.SetSteadyBlockID(b2x.Header().ID()))
- assert.Equal(t, b2.Header().ID(), repo.SteadyBlockID())
-
- b3 := newBlock(b2, 10)
- repo.AddBlock(b3, nil, 0)
- assert.Nil(t, repo.SetSteadyBlockID(b3.Header().ID()))
- assert.Equal(t, b3.Header().ID(), repo.SteadyBlockID())
-
- repo = reopenRepo(db, b0)
- assert.Equal(t, b3.Header().ID(), repo.SteadyBlockID())
-}
-
func TestScanHeads(t *testing.T) {
_, repo := newTestRepo()
@@ -156,14 +124,14 @@ func TestScanHeads(t *testing.T) {
assert.Equal(t, []thor.Bytes32{repo.GenesisBlock().Header().ID()}, heads)
b1 := newBlock(repo.GenesisBlock(), 10)
- err = repo.AddBlock(b1, nil, 0)
+ err = repo.AddBlock(b1, nil, 0, false)
assert.Nil(t, err)
heads, err = repo.ScanHeads(0)
assert.Nil(t, err)
assert.Equal(t, []thor.Bytes32{b1.Header().ID()}, heads)
b2 := newBlock(b1, 20)
- err = repo.AddBlock(b2, nil, 0)
+ err = repo.AddBlock(b2, nil, 0, false)
assert.Nil(t, err)
heads, err = repo.ScanHeads(0)
assert.Nil(t, err)
@@ -174,7 +142,7 @@ func TestScanHeads(t *testing.T) {
assert.Equal(t, 0, len(heads))
b2x := newBlock(b1, 20)
- err = repo.AddBlock(b2x, nil, 0)
+ err = repo.AddBlock(b2x, nil, 0, false)
assert.Nil(t, err)
heads, err = repo.ScanHeads(0)
assert.Nil(t, err)
@@ -186,7 +154,7 @@ func TestScanHeads(t *testing.T) {
}
b3 := newBlock(b2, 30)
- err = repo.AddBlock(b3, nil, 0)
+ err = repo.AddBlock(b3, nil, 0, false)
assert.Nil(t, err)
heads, err = repo.ScanHeads(0)
assert.Nil(t, err)
@@ -201,7 +169,7 @@ func TestScanHeads(t *testing.T) {
assert.Equal(t, []thor.Bytes32{b3.Header().ID()}, heads)
b3x := newBlock(b2, 30)
- err = repo.AddBlock(b3x, nil, 0)
+ err = repo.AddBlock(b3x, nil, 0, false)
assert.Nil(t, err)
heads, err = repo.ScanHeads(0)
assert.Nil(t, err)
diff --git a/cmd/thor/main.go b/cmd/thor/main.go
index 4b934bc13..ce8d1f965 100644
--- a/cmd/thor/main.go
+++ b/cmd/thor/main.go
@@ -22,7 +22,7 @@ import (
"github.com/vechain/thor/v2/api"
"github.com/vechain/thor/v2/bft"
"github.com/vechain/thor/v2/cmd/thor/node"
- "github.com/vechain/thor/v2/cmd/thor/optimizer"
+ "github.com/vechain/thor/v2/cmd/thor/pruner"
"github.com/vechain/thor/v2/cmd/thor/solo"
"github.com/vechain/thor/v2/genesis"
"github.com/vechain/thor/v2/log"
@@ -282,8 +282,10 @@ func defaultAction(ctx *cli.Context) error {
}
defer p2pCommunicator.Stop()
- optimizer := optimizer.New(mainDB, repo, !ctx.Bool(disablePrunerFlag.Name))
- defer func() { log.Info("stopping optimizer..."); optimizer.Stop() }()
+ if !ctx.Bool(disablePrunerFlag.Name) {
+ pruner := pruner.New(mainDB, repo)
+ defer func() { log.Info("stopping pruner..."); pruner.Stop() }()
+ }
return node.New(
master,
@@ -437,8 +439,10 @@ func soloAction(ctx *cli.Context) error {
printStartupMessage2(gene, apiURL, "", metricsURL, adminURL)
- optimizer := optimizer.New(mainDB, repo, !ctx.Bool(disablePrunerFlag.Name))
- defer func() { log.Info("stopping optimizer..."); optimizer.Stop() }()
+ if !ctx.Bool(disablePrunerFlag.Name) {
+ pruner := pruner.New(mainDB, repo)
+ defer func() { log.Info("stopping pruner..."); pruner.Stop() }()
+ }
return solo.New(repo,
state.NewStater(mainDB),
diff --git a/cmd/thor/node/node.go b/cmd/thor/node/node.go
index d103f227a..251e421b0 100644
--- a/cmd/thor/node/node.go
+++ b/cmd/thor/node/node.go
@@ -360,8 +360,16 @@ func (n *Node) processBlock(newBlock *block.Block, stats *blockStats) (bool, err
return errors.Wrap(err, "commit state")
}
+ // sync the log-writing task
+ if logEnabled {
+ if err := n.logWorker.Sync(); err != nil {
+ log.Warn("failed to write logs", "err", err)
+ n.logDBFailed = true
+ }
+ }
+
// add the new block into repository
- if err := n.repo.AddBlock(newBlock, receipts, conflicts); err != nil {
+ if err := n.repo.AddBlock(newBlock, receipts, conflicts, becomeNewBest); err != nil {
return errors.Wrap(err, "add block")
}
@@ -374,18 +382,7 @@ func (n *Node) processBlock(newBlock *block.Block, stats *blockStats) (bool, err
realElapsed := mclock.Now() - startTime
- // sync the log-writing task
- if logEnabled {
- if err := n.logWorker.Sync(); err != nil {
- logger.Warn("failed to write logs", "err", err)
- n.logDBFailed = true
- }
- }
-
if becomeNewBest {
- if err := n.repo.SetBestBlockID(newBlock.Header().ID()); err != nil {
- return err
- }
n.processFork(newBlock, oldBest.Header.ID())
}
diff --git a/cmd/thor/node/node_benchmark_test.go b/cmd/thor/node/node_benchmark_test.go
new file mode 100644
index 000000000..bdcf26f0b
--- /dev/null
+++ b/cmd/thor/node/node_benchmark_test.go
@@ -0,0 +1,541 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package node
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "fmt"
+ "math"
+ "math/big"
+ "path/filepath"
+ "runtime/debug"
+ "sync"
+ "testing"
+
+ "github.com/elastic/gosigar"
+ "github.com/ethereum/go-ethereum/common/fdlimit"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+ "github.com/vechain/thor/v2/bft"
+ "github.com/vechain/thor/v2/block"
+ "github.com/vechain/thor/v2/chain"
+ "github.com/vechain/thor/v2/cmd/thor/solo"
+ "github.com/vechain/thor/v2/genesis"
+ "github.com/vechain/thor/v2/logdb"
+ "github.com/vechain/thor/v2/muxdb"
+ "github.com/vechain/thor/v2/packer"
+ "github.com/vechain/thor/v2/state"
+ "github.com/vechain/thor/v2/test/datagen"
+ "github.com/vechain/thor/v2/test/testchain"
+ "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/tx"
+)
+
+var (
+ cachedAccounts []genesis.DevAccount
+ once sync.Once
+ blockCount = 1_000
+)
+
+func getCachedAccounts(b *testing.B) []genesis.DevAccount {
+ once.Do(func() {
+ cachedAccounts = createAccounts(b, 1_000)
+ })
+ return cachedAccounts
+}
+
+func BenchmarkBlockProcess_RandomSigners_ManyClausesPerTx_RealDB(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createOneClausePerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, randomSignerFunc)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+func BenchmarkBlockProcess_RandomSigners_OneClausePerTx_RealDB(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createManyClausesPerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, randomSignerFunc)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+func BenchmarkBlockProcess_ManyClausesPerTx_RealDB(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // Use one signer for signing the transactions
+ singleSignerFun := randomPickSignerFunc([]genesis.DevAccount{accounts[0]}, createManyClausesPerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, singleSignerFun)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+func BenchmarkBlockProcess_OneClausePerTx_RealDB(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // Use one signer for signing the transactions
+ singleSignerFun := randomPickSignerFunc([]genesis.DevAccount{accounts[0]}, createOneClausePerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, singleSignerFun)
+
+ // create test db - will be automagically removed when the benchmark ends
+ db, err := openTempMainDB(b.TempDir())
+ require.NoError(b, err)
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+
+func BenchmarkBlockProcess_RandomSigners_ManyClausesPerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createOneClausePerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, randomSignerFunc)
+
+ // create test db
+ db := muxdb.NewMem()
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+
+func BenchmarkBlockProcess_RandomSigners_OneClausePerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // randomly pick a signer for signing the transactions
+ randomSignerFunc := randomPickSignerFunc(accounts, createManyClausesPerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, randomSignerFunc)
+
+ // create test db
+ db := muxdb.NewMem()
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+
+func BenchmarkBlockProcess_ManyClausesPerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // Use one signer for signing the transactions
+ singleSignerFun := randomPickSignerFunc([]genesis.DevAccount{accounts[0]}, createManyClausesPerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, singleSignerFun)
+
+ // create test db
+ db := muxdb.NewMem()
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+
+func BenchmarkBlockProcess_OneClausePerTx(b *testing.B) {
+ // create state accounts
+ accounts := getCachedAccounts(b)
+
+ // Use one signer for signing the transactions
+ singleSignerFun := randomPickSignerFunc([]genesis.DevAccount{accounts[0]}, createOneClausePerTx)
+
+ // create blocks
+ blocks := createBlocks(b, blockCount, accounts, singleSignerFun)
+
+ // create test db
+ db := muxdb.NewMem()
+
+ // run the benchmark
+ benchmarkBlockProcess(b, db, accounts, blocks)
+}
+
+func benchmarkBlockProcess(b *testing.B, db *muxdb.MuxDB, accounts []genesis.DevAccount, blocks []*block.Block) {
+ // Initialize the test chain and dependencies
+ thorChain, err := createChain(db, accounts)
+ require.NoError(b, err)
+
+ proposer := &accounts[0]
+
+ engine, err := bft.NewEngine(thorChain.Repo(), thorChain.Database(), thorChain.GetForkConfig(), proposer.Address)
+ require.NoError(b, err)
+
+ node := New(
+ &Master{
+ PrivateKey: proposer.PrivateKey,
+ },
+ thorChain.Repo(),
+ engine,
+ thorChain.Stater(),
+ nil,
+ nil,
+ "",
+ nil,
+ 10_000_000,
+ true,
+ thor.NoFork,
+ )
+
+ stats := &blockStats{}
+
+ // Measure memory usage
+ b.ReportAllocs()
+
+ // Benchmark execution
+ b.ResetTimer()
+ for _, blk := range blocks {
+ _, err = node.processBlock(blk, stats)
+ if err != nil {
+ b.Fatalf("processBlock failed: %v", err)
+ }
+ }
+}
+
+func createBlocks(b *testing.B, noBlocks int, accounts []genesis.DevAccount, createTxFunc func(chain *testchain.Chain) (tx.Transactions, error)) []*block.Block {
+ proposer := &accounts[0]
+
+ // mock a fake chain for block production
+ fakeChain, err := createChain(muxdb.NewMem(), accounts)
+ require.NoError(b, err)
+
+ // pre-alloc blocks
+ var blocks []*block.Block
+ var transactions tx.Transactions
+
+ // Start from the Genesis block
+ previousBlock := fakeChain.GenesisBlock()
+ for i := 0; i < noBlocks; i++ {
+ transactions, err = createTxFunc(fakeChain)
+ require.NoError(b, err)
+ previousBlock, err = packTxsIntoBlock(
+ fakeChain,
+ proposer,
+ previousBlock,
+ transactions,
+ )
+ require.NoError(b, err)
+ blocks = append(blocks, previousBlock)
+ }
+
+ return blocks
+}
+
+func createOneClausePerTx(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error) {
+ var transactions tx.Transactions
+ gasUsed := uint64(0)
+ for gasUsed < 9_500_000 {
+ toAddr := datagen.RandAddress()
+ cla := tx.NewClause(&toAddr).WithValue(big.NewInt(10000))
+ transaction := new(tx.Builder).
+ ChainTag(thorChain.Repo().ChainTag()).
+ GasPriceCoef(1).
+ Expiration(math.MaxUint32 - 1).
+ Gas(21_000).
+ Nonce(uint64(datagen.RandInt())).
+ Clause(cla).
+ BlockRef(tx.NewBlockRef(0)).
+ Build()
+
+ sig, err := crypto.Sign(transaction.SigningHash().Bytes(), signerPK)
+ if err != nil {
+ return nil, err
+ }
+ transaction = transaction.WithSignature(sig)
+
+ gasUsed += 21_000 // Gas per transaction
+ transactions = append(transactions, transaction)
+ }
+ return transactions, nil
+}
+
+func createManyClausesPerTx(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error) {
+ var transactions tx.Transactions
+ gasUsed := uint64(0)
+ txGas := uint64(42_000)
+
+ transactionBuilder := new(tx.Builder).
+ ChainTag(thorChain.Repo().ChainTag()).
+ GasPriceCoef(1).
+ Expiration(math.MaxUint32 - 1).
+ Nonce(uint64(datagen.RandInt())).
+ BlockRef(tx.NewBlockRef(0))
+
+ for ; gasUsed < 9_500_000; gasUsed += txGas {
+ toAddr := datagen.RandAddress()
+ transactionBuilder.Clause(tx.NewClause(&toAddr).WithValue(big.NewInt(10000)))
+ }
+
+ transaction := transactionBuilder.Gas(gasUsed).Build()
+
+ sig, err := crypto.Sign(transaction.SigningHash().Bytes(), signerPK)
+ if err != nil {
+ return nil, err
+ }
+ transaction = transaction.WithSignature(sig)
+
+ transactions = append(transactions, transaction)
+
+ return transactions, nil
+}
+
+func packTxsIntoBlock(thorChain *testchain.Chain, proposerAccount *genesis.DevAccount, parentBlk *block.Block, transactions tx.Transactions) (*block.Block, error) {
+ p := packer.New(thorChain.Repo(), thorChain.Stater(), proposerAccount.Address, &proposerAccount.Address, thorChain.GetForkConfig())
+
+ parentSum, err := thorChain.Repo().GetBlockSummary(parentBlk.Header().ID())
+ if err != nil {
+ return nil, err
+ }
+
+ flow, err := p.Schedule(parentSum, parentBlk.Header().Timestamp()+1)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, transaction := range transactions {
+ err = flow.Adopt(transaction)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ b1, stage, receipts, err := flow.Pack(proposerAccount.PrivateKey, 0, false)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := stage.Commit(); err != nil {
+ return nil, err
+ }
+
+ if err := thorChain.Repo().AddBlock(b1, receipts, 0, true); err != nil {
+ return nil, err
+ }
+
+ return b1, nil
+}
+
+func createChain(db *muxdb.MuxDB, accounts []genesis.DevAccount) (*testchain.Chain, error) {
+ forkConfig := thor.NoFork
+ forkConfig.VIP191 = 1
+ forkConfig.BLOCKLIST = 0
+ forkConfig.VIP214 = 2
+
+ // Create the state manager (Stater) with the initialized database.
+ stater := state.NewStater(db)
+
+ authAccs := make([]genesis.Authority, 0, len(accounts))
+ stateAccs := make([]genesis.Account, 0, len(accounts))
+
+ for _, acc := range accounts {
+ authAccs = append(authAccs, genesis.Authority{
+ MasterAddress: acc.Address,
+ EndorsorAddress: acc.Address,
+ Identity: thor.BytesToBytes32([]byte("master")),
+ })
+ bal, _ := new(big.Int).SetString("1000000000000000000000000000", 10)
+ stateAccs = append(stateAccs, genesis.Account{
+ Address: acc.Address,
+ Balance: (*genesis.HexOrDecimal256)(bal),
+ Energy: (*genesis.HexOrDecimal256)(bal),
+ Code: "",
+ Storage: nil,
+ })
+ }
+ mbp := uint64(1_000)
+ genConfig := genesis.CustomGenesis{
+ LaunchTime: 1526400000,
+ GasLimit: thor.InitialGasLimit,
+ ExtraData: "",
+ ForkConfig: &forkConfig,
+ Authority: authAccs,
+ Accounts: stateAccs,
+ Params: genesis.Params{
+ MaxBlockProposers: &mbp,
+ },
+ }
+
+ builder, err := genesis.NewCustomNet(&genConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the genesis and retrieve the genesis block
+ //gene := genesis.NewDevnet()
+ geneBlk, _, _, err := builder.Build(stater)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the repository which manages chain data, using the database and genesis block.
+ repo, err := chain.NewRepository(db, geneBlk)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create an inMemory logdb
+ logDb, err := logdb.NewMem()
+ if err != nil {
+ return nil, err
+ }
+
+ return testchain.New(
+ db,
+ builder,
+ solo.NewBFTEngine(repo),
+ repo,
+ stater,
+ geneBlk,
+ logDb,
+ thor.NoFork,
+ ), nil
+}
+
+func randomPickSignerFunc(
+ accounts []genesis.DevAccount,
+ createTxFun func(signerPK *ecdsa.PrivateKey, thorChain *testchain.Chain) (tx.Transactions, error),
+) func(chain *testchain.Chain) (tx.Transactions, error) {
+ return func(chain *testchain.Chain) (tx.Transactions, error) {
+ // Ensure there are accounts available
+ if len(accounts) == 0 {
+ return nil, fmt.Errorf("no accounts available to pick a random sender")
+ }
+
+ // Securely pick a random index
+ maxLen := big.NewInt(int64(len(accounts)))
+ randomIndex, err := rand.Int(rand.Reader, maxLen)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate random index: %v", err)
+ }
+
+ // Use the selected account to create transactions
+ sender := accounts[randomIndex.Int64()]
+ return createTxFun(sender.PrivateKey, chain)
+ }
+}
+
+func createAccounts(b *testing.B, accountNo int) []genesis.DevAccount {
+ var accs []genesis.DevAccount
+
+ for i := 0; i < accountNo; i++ {
+ pk, err := crypto.GenerateKey()
+ require.NoError(b, err)
+ addr := crypto.PubkeyToAddress(pk.PublicKey)
+ accs = append(accs, genesis.DevAccount{Address: thor.Address(addr), PrivateKey: pk})
+ }
+
+ return accs
+}
+
+func openTempMainDB(dir string) (*muxdb.MuxDB, error) {
+ cacheMB := normalizeCacheSize(4096)
+
+ fdCache := suggestFDCache()
+
+ opts := muxdb.Options{
+ TrieNodeCacheSizeMB: cacheMB,
+ TrieCachedNodeTTL: 30, // 5min
+ TrieDedupedPartitionFactor: math.MaxUint32,
+ TrieWillCleanHistory: true,
+ OpenFilesCacheCapacity: fdCache,
+ ReadCacheMB: 256, // rely on os page cache other than huge db read cache.
+ WriteBufferMB: 128,
+ }
+
+ // go-ethereum stuff
+ // Ensure Go's GC ignores the database cache for trigger percentage
+ totalCacheMB := cacheMB + opts.ReadCacheMB + opts.WriteBufferMB*2
+ gogc := math.Max(10, math.Min(100, 50/(float64(totalCacheMB)/1024)))
+
+ debug.SetGCPercent(int(gogc))
+
+ if opts.TrieWillCleanHistory {
+ opts.TrieHistPartitionFactor = 256
+ } else {
+ opts.TrieHistPartitionFactor = 524288
+ }
+
+ db, err := muxdb.Open(filepath.Join(dir, "maindb"), &opts)
+ if err != nil {
+ return nil, errors.Wrapf(err, "open main database [%v]", dir)
+ }
+ return db, nil
+}
+
+func normalizeCacheSize(sizeMB int) int {
+ if sizeMB < 128 {
+ sizeMB = 128
+ }
+
+ var mem gosigar.Mem
+ if err := mem.Get(); err != nil {
+ fmt.Println("failed to get total mem:", "err", err)
+ } else {
+ total := int(mem.Total / 1024 / 1024)
+ half := total / 2
+
+ // limit to not less than total/2 and up to total-2GB
+ limitMB := total - 2048
+ if limitMB < half {
+ limitMB = half
+ }
+
+ if sizeMB > limitMB {
+ sizeMB = limitMB
+ fmt.Println("cache size(MB) limited", "limit", limitMB)
+ }
+ }
+ return sizeMB
+}
+
+func suggestFDCache() int {
+ limit, err := fdlimit.Current()
+ if err != nil {
+ fmt.Println("unable to get fdlimit", "error", err)
+ return 500
+ }
+ if limit <= 1024 {
+ fmt.Println("low fd limit, increase it if possible", "limit", limit)
+ }
+
+ n := limit / 2
+ if n > 5120 {
+ return 5120
+ }
+ return n
+}
diff --git a/cmd/thor/node/packer_loop.go b/cmd/thor/node/packer_loop.go
index 675ab041b..f7d43413f 100644
--- a/cmd/thor/node/packer_loop.go
+++ b/cmd/thor/node/packer_loop.go
@@ -13,6 +13,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/pkg/errors"
+ "github.com/vechain/thor/v2/log"
"github.com/vechain/thor/v2/packer"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tx"
@@ -168,8 +169,16 @@ func (n *Node) pack(flow *packer.Flow) (err error) {
return errors.Wrap(err, "commit state")
}
+ // sync the log-writing task
+ if logEnabled {
+ if err := n.logWorker.Sync(); err != nil {
+ log.Warn("failed to write logs", "err", err)
+ n.logDBFailed = true
+ }
+ }
+
// add the new block into repository
- if err := n.repo.AddBlock(newBlock, receipts, conflicts); err != nil {
+ if err := n.repo.AddBlock(newBlock, receipts, conflicts, true); err != nil {
return errors.Wrap(err, "add block")
}
@@ -181,18 +190,6 @@ func (n *Node) pack(flow *packer.Flow) (err error) {
}
realElapsed := mclock.Now() - startTime
- // sync the log-writing task
- if logEnabled {
- if err := n.logWorker.Sync(); err != nil {
- logger.Warn("failed to write logs", "err", err)
- n.logDBFailed = true
- }
- }
-
- if err := n.repo.SetBestBlockID(newBlock.Header().ID()); err != nil {
- return err
- }
-
n.processFork(newBlock, oldBest.Header.ID())
commitElapsed := mclock.Now() - startTime - execElapsed
diff --git a/cmd/thor/optimizer/optimizer.go b/cmd/thor/optimizer/optimizer.go
deleted file mode 100644
index b61e75813..000000000
--- a/cmd/thor/optimizer/optimizer.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright (c) 2019 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package optimizer
-
-import (
- "context"
- "fmt"
- "math"
- "time"
-
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/pkg/errors"
- "github.com/vechain/thor/v2/block"
- "github.com/vechain/thor/v2/chain"
- "github.com/vechain/thor/v2/co"
- "github.com/vechain/thor/v2/log"
- "github.com/vechain/thor/v2/muxdb"
- "github.com/vechain/thor/v2/state"
- "github.com/vechain/thor/v2/thor"
- "github.com/vechain/thor/v2/trie"
-)
-
-var logger = log.WithContext("pkg", "optimizer")
-
-const (
- propsStoreName = "optimizer.props"
- statusKey = "status"
-)
-
-// Optimizer is a background task to optimize tries.
-type Optimizer struct {
- db *muxdb.MuxDB
- repo *chain.Repository
- ctx context.Context
- cancel func()
- goes co.Goes
-}
-
-// New creates and starts the optimizer.
-func New(db *muxdb.MuxDB, repo *chain.Repository, prune bool) *Optimizer {
- ctx, cancel := context.WithCancel(context.Background())
- o := &Optimizer{
- db: db,
- repo: repo,
- ctx: ctx,
- cancel: cancel,
- }
- o.goes.Go(func() {
- if err := o.loop(prune); err != nil {
- if err != context.Canceled && errors.Cause(err) != context.Canceled {
- logger.Warn("optimizer interrupted", "error", err)
- }
- }
- })
- return o
-}
-
-// Stop stops the optimizer.
-func (p *Optimizer) Stop() {
- p.cancel()
- p.goes.Wait()
-}
-
-// loop is the main loop.
-func (p *Optimizer) loop(prune bool) error {
- logger.Info("optimizer started")
-
- const (
- period = 2000 // the period to update leafbank.
- prunePeriod = 10000 // the period to prune tries.
- pruneReserved = 70000 // must be > thor.MaxStateHistory
- )
-
- var (
- status status
- lastLogTime = time.Now().UnixNano()
- propsStore = p.db.NewStore(propsStoreName)
- )
- if err := status.Load(propsStore); err != nil {
- return errors.Wrap(err, "load status")
- }
-
- for {
- // select target
- target := status.Base + period
-
- targetChain, err := p.awaitUntilSteady(target)
- if err != nil {
- return errors.Wrap(err, "awaitUntilSteady")
- }
- startTime := time.Now().UnixNano()
-
- // dump account/storage trie leaves into leafbank
- if err := p.dumpStateLeaves(targetChain, status.Base, target); err != nil {
- return errors.Wrap(err, "dump state trie leaves")
- }
-
- // prune index/account/storage tries
- if prune && target > pruneReserved {
- if pruneTarget := target - pruneReserved; pruneTarget >= status.PruneBase+prunePeriod {
- if err := p.pruneTries(targetChain, status.PruneBase, pruneTarget); err != nil {
- return errors.Wrap(err, "prune tries")
- }
- status.PruneBase = pruneTarget
- }
- }
-
- if now := time.Now().UnixNano(); now-lastLogTime > int64(time.Second*20) {
- lastLogTime = now
- logger.Info("optimized tries",
- "range", fmt.Sprintf("#%v+%v", status.Base, target-status.Base),
- "et", time.Duration(now-startTime),
- )
- }
- status.Base = target
- if err := status.Save(propsStore); err != nil {
- return errors.Wrap(err, "save status")
- }
- }
-}
-
-// newStorageTrieIfUpdated creates a storage trie object from the account leaf if the storage trie updated since base.
-func (p *Optimizer) newStorageTrieIfUpdated(accLeaf *trie.Leaf, base uint32) *muxdb.Trie {
- if len(accLeaf.Meta) == 0 {
- return nil
- }
-
- var (
- acc state.Account
- meta state.AccountMetadata
- )
- if err := rlp.DecodeBytes(accLeaf.Value, &acc); err != nil {
- panic(errors.Wrap(err, "decode account"))
- }
-
- if err := rlp.DecodeBytes(accLeaf.Meta, &meta); err != nil {
- panic(errors.Wrap(err, "decode account metadata"))
- }
-
- if meta.StorageCommitNum >= base {
- return p.db.NewTrie(
- state.StorageTrieName(meta.StorageID),
- thor.BytesToBytes32(acc.StorageRoot),
- meta.StorageCommitNum,
- meta.StorageDistinctNum,
- )
- }
- return nil
-}
-
-// dumpStateLeaves dumps account/storage trie leaves updated within [base, target) into leafbank.
-func (p *Optimizer) dumpStateLeaves(targetChain *chain.Chain, base, target uint32) error {
- h, err := targetChain.GetBlockSummary(target - 1)
- if err != nil {
- return err
- }
- accTrie := p.db.NewTrie(state.AccountTrieName, h.Header.StateRoot(), h.Header.Number(), h.Conflicts)
- accTrie.SetNoFillCache(true)
-
- var sTries []*muxdb.Trie
- if err := accTrie.DumpLeaves(p.ctx, base, h.Header.Number(), func(leaf *trie.Leaf) *trie.Leaf {
- if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil {
- sTries = append(sTries, sTrie)
- }
- return leaf
- }); err != nil {
- return err
- }
- for _, sTrie := range sTries {
- sTrie.SetNoFillCache(true)
- if err := sTrie.DumpLeaves(p.ctx, base, h.Header.Number(), func(leaf *trie.Leaf) *trie.Leaf {
- return &trie.Leaf{Value: leaf.Value} // skip metadata to save space
- }); err != nil {
- return err
- }
- }
- return nil
-}
-
-// dumpTrieNodes dumps index/account/storage trie nodes committed within [base, target] into deduped space.
-func (p *Optimizer) dumpTrieNodes(targetChain *chain.Chain, base, target uint32) error {
- summary, err := targetChain.GetBlockSummary(target - 1)
- if err != nil {
- return err
- }
-
- // dump index trie
- indexTrie := p.db.NewNonCryptoTrie(chain.IndexTrieName, trie.NonCryptoNodeHash, summary.Header.Number(), summary.Conflicts)
- indexTrie.SetNoFillCache(true)
-
- if err := indexTrie.DumpNodes(p.ctx, base, nil); err != nil {
- return err
- }
-
- // dump account trie
- accTrie := p.db.NewTrie(state.AccountTrieName, summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts)
- accTrie.SetNoFillCache(true)
-
- var sTries []*muxdb.Trie
- if err := accTrie.DumpNodes(p.ctx, base, func(leaf *trie.Leaf) {
- if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil {
- sTries = append(sTries, sTrie)
- }
- }); err != nil {
- return err
- }
-
- // dump storage tries
- for _, sTrie := range sTries {
- sTrie.SetNoFillCache(true)
- if err := sTrie.DumpNodes(p.ctx, base, nil); err != nil {
- return err
- }
- }
- return nil
-}
-
-// pruneTries prunes index/account/storage tries in the range [base, target).
-func (p *Optimizer) pruneTries(targetChain *chain.Chain, base, target uint32) error {
- if err := p.dumpTrieNodes(targetChain, base, target); err != nil {
- return errors.Wrap(err, "dump trie nodes")
- }
-
- cleanBase := base
- if base == 0 {
- // keeps genesis state history like the previous version.
- cleanBase = 1
- }
- if err := p.db.CleanTrieHistory(p.ctx, cleanBase, target); err != nil {
- return errors.Wrap(err, "clean trie history")
- }
- return nil
-}
-
-// awaitUntilSteady waits until the target block number becomes almost final(steady),
-// and returns the steady chain.
-func (p *Optimizer) awaitUntilSteady(target uint32) (*chain.Chain, error) {
- // the knowned steady id is newer than target
- if steadyID := p.repo.SteadyBlockID(); block.Number(steadyID) >= target {
- return p.repo.NewChain(steadyID), nil
- }
-
- const windowSize = 100000
-
- backoff := uint32(0)
- for {
- best := p.repo.BestBlockSummary()
- bestNum := best.Header.Number()
- if bestNum > target+backoff {
- var meanScore float64
- if bestNum > windowSize {
- baseNum := bestNum - windowSize
- baseHeader, err := p.repo.NewChain(best.Header.ID()).GetBlockHeader(baseNum)
- if err != nil {
- return nil, err
- }
- meanScore = math.Round(float64(best.Header.TotalScore()-baseHeader.TotalScore()) / float64(windowSize))
- } else {
- meanScore = math.Round(float64(best.Header.TotalScore()) / float64(bestNum))
- }
- set := make(map[thor.Address]struct{})
- // reverse iterate the chain and collect signers.
- for i, prev := 0, best.Header; i < int(meanScore*3) && prev.Number() >= target; i++ {
- signer, _ := prev.Signer()
- set[signer] = struct{}{}
- if len(set) >= int(math.Round((meanScore+1)/2)) {
- // got enough unique signers
- steadyID := prev.ID()
- if err := p.repo.SetSteadyBlockID(steadyID); err != nil {
- return nil, err
- }
- return p.repo.NewChain(steadyID), nil
- }
- parent, err := p.repo.GetBlockSummary(prev.ParentID())
- if err != nil {
- return nil, err
- }
- prev = parent.Header
- }
- backoff += uint32(meanScore)
- } else {
- select {
- case <-p.ctx.Done():
- return nil, p.ctx.Err()
- case <-time.After(time.Second):
- }
- }
- }
-}
diff --git a/cmd/thor/pruner/pruner.go b/cmd/thor/pruner/pruner.go
new file mode 100644
index 000000000..2fca9da92
--- /dev/null
+++ b/cmd/thor/pruner/pruner.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2019 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package pruner
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/pkg/errors"
+ "github.com/vechain/thor/v2/chain"
+ "github.com/vechain/thor/v2/co"
+ "github.com/vechain/thor/v2/log"
+ "github.com/vechain/thor/v2/muxdb"
+ "github.com/vechain/thor/v2/state"
+ "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
+)
+
+var logger = log.WithContext("pkg", "pruner")
+
+const (
+ propsStoreName = "pruner.props"
+ statusKey = "status"
+)
+
+// Pruner is a background task to prune tries.
+type Pruner struct {
+ db *muxdb.MuxDB
+ repo *chain.Repository
+ ctx context.Context
+ cancel func()
+ goes co.Goes
+}
+
+// New creates and starts the pruner.
+func New(db *muxdb.MuxDB, repo *chain.Repository) *Pruner {
+ ctx, cancel := context.WithCancel(context.Background())
+ o := &Pruner{
+ db: db,
+ repo: repo,
+ ctx: ctx,
+ cancel: cancel,
+ }
+ o.goes.Go(func() {
+ if err := o.loop(); err != nil {
+ if err != context.Canceled && errors.Cause(err) != context.Canceled {
+ logger.Warn("pruner interrupted", "error", err)
+ }
+ }
+ })
+ return o
+}
+
+// Stop stops the pruner.
+func (p *Pruner) Stop() {
+ p.cancel()
+ p.goes.Wait()
+}
+
+// loop is the main loop.
+func (p *Pruner) loop() error {
+ logger.Info("pruner started")
+
+ var (
+ status status
+ propsStore = p.db.NewStore(propsStoreName)
+ )
+ if err := status.Load(propsStore); err != nil {
+ return errors.Wrap(err, "load status")
+ }
+
+ for {
+ period := uint32(65536)
+ if int64(p.repo.BestBlockSummary().Header.Timestamp()) > time.Now().Unix()-10*24*3600 {
+ // use smaller period when nearly synced
+ period = 8192
+ }
+
+ // select target
+ target := status.Base + period
+
+ targetChain, err := p.awaitUntilSteady(target + thor.MaxStateHistory)
+ if err != nil {
+ return errors.Wrap(err, "awaitUntilSteady")
+ }
+ startTime := time.Now().UnixNano()
+
+ // prune index/account/storage tries
+ if err := p.pruneTries(targetChain, status.Base, target); err != nil {
+ return errors.Wrap(err, "prune tries")
+ }
+
+ logger.Info("prune tries",
+ "range", fmt.Sprintf("#%v+%v", status.Base, target-status.Base),
+ "et", time.Duration(time.Now().UnixNano()-startTime),
+ )
+
+ status.Base = target
+ if err := status.Save(propsStore); err != nil {
+ return errors.Wrap(err, "save status")
+ }
+ }
+}
+
+// newStorageTrieIfUpdated creates a storage trie object from the account leaf if the storage trie updated since base.
+func (p *Pruner) newStorageTrieIfUpdated(accLeaf *trie.Leaf, base uint32) *muxdb.Trie {
+ if len(accLeaf.Meta) == 0 {
+ return nil
+ }
+
+ var (
+ acc state.Account
+ meta state.AccountMetadata
+ )
+ if err := rlp.DecodeBytes(accLeaf.Value, &acc); err != nil {
+ panic(errors.Wrap(err, "decode account"))
+ }
+
+ if err := rlp.DecodeBytes(accLeaf.Meta, &meta); err != nil {
+ panic(errors.Wrap(err, "decode account metadata"))
+ }
+
+ if meta.StorageMajorVer >= base {
+ return p.db.NewTrie(
+ state.StorageTrieName(meta.StorageID),
+ trie.Root{
+ Hash: thor.BytesToBytes32(acc.StorageRoot),
+ Ver: trie.Version{
+ Major: meta.StorageMajorVer,
+ Minor: meta.StorageMinorVer,
+ },
+ })
+ }
+ return nil
+}
+
+// checkpointTries transfers tries' standalone nodes, whose major version within [base, target).
+func (p *Pruner) checkpointTries(targetChain *chain.Chain, base, target uint32) error {
+ summary, err := targetChain.GetBlockSummary(target - 1)
+ if err != nil {
+ return err
+ }
+
+ // checkpoint index trie
+ indexTrie := p.db.NewTrie(chain.IndexTrieName, summary.IndexRoot())
+ indexTrie.SetNoFillCache(true)
+
+ if err := indexTrie.Checkpoint(p.ctx, base, nil); err != nil {
+ return err
+ }
+
+ // checkpoint account trie
+ accTrie := p.db.NewTrie(state.AccountTrieName, summary.Root())
+ accTrie.SetNoFillCache(true)
+
+ var sTries []*muxdb.Trie
+ if err := accTrie.Checkpoint(p.ctx, base, func(leaf *trie.Leaf) {
+ if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil {
+ sTries = append(sTries, sTrie)
+ }
+ }); err != nil {
+ return err
+ }
+
+ // checkpoint storage tries
+ for _, sTrie := range sTries {
+ sTrie.SetNoFillCache(true)
+ if err := sTrie.Checkpoint(p.ctx, base, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// pruneTries prunes index/account/storage tries in the range [base, target).
+func (p *Pruner) pruneTries(targetChain *chain.Chain, base, target uint32) error {
+ if err := p.checkpointTries(targetChain, base, target); err != nil {
+ return errors.Wrap(err, "checkpoint tries")
+ }
+
+ if err := p.db.DeleteTrieHistoryNodes(p.ctx, base, target); err != nil {
+ return errors.Wrap(err, "delete trie history")
+ }
+ return nil
+}
+
+// awaitUntilSteady waits until the target block number becomes almost final(steady),
+// and returns the steady chain.
+//
+// TODO: using finality flag
+func (p *Pruner) awaitUntilSteady(target uint32) (*chain.Chain, error) {
+ const windowSize = 100000
+
+ backoff := uint32(0)
+ for {
+ best := p.repo.BestBlockSummary()
+ bestNum := best.Header.Number()
+ if bestNum > target+backoff {
+ var meanScore float64
+ if bestNum > windowSize {
+ baseNum := bestNum - windowSize
+ baseHeader, err := p.repo.NewChain(best.Header.ID()).GetBlockHeader(baseNum)
+ if err != nil {
+ return nil, err
+ }
+ meanScore = math.Round(float64(best.Header.TotalScore()-baseHeader.TotalScore()) / float64(windowSize))
+ } else {
+ meanScore = math.Round(float64(best.Header.TotalScore()) / float64(bestNum))
+ }
+ set := make(map[thor.Address]struct{})
+ // reverse iterate the chain and collect signers.
+ for i, prev := 0, best.Header; i < int(meanScore*3) && prev.Number() >= target; i++ {
+ signer, _ := prev.Signer()
+ set[signer] = struct{}{}
+ if len(set) >= int(math.Round((meanScore+1)/2)) {
+ // got enough unique signers
+ steadyID := prev.ID()
+ return p.repo.NewChain(steadyID), nil
+ }
+ parent, err := p.repo.GetBlockSummary(prev.ParentID())
+ if err != nil {
+ return nil, err
+ }
+ prev = parent.Header
+ }
+ backoff += uint32(meanScore)
+ } else {
+ select {
+ case <-p.ctx.Done():
+ return nil, p.ctx.Err()
+ case <-time.After(time.Second):
+ }
+ }
+ }
+}
diff --git a/cmd/thor/optimizer/optimizer_test.go b/cmd/thor/pruner/pruner_test.go
similarity index 61%
rename from cmd/thor/optimizer/optimizer_test.go
rename to cmd/thor/pruner/pruner_test.go
index af3f729c7..fb714fa80 100644
--- a/cmd/thor/optimizer/optimizer_test.go
+++ b/cmd/thor/pruner/pruner_test.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package optimizer
+package pruner
import (
"context"
@@ -14,6 +14,7 @@ import (
"os"
"path/filepath"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
@@ -28,14 +29,18 @@ import (
"github.com/vechain/thor/v2/tx"
)
-func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB, steadyID thor.Bytes32) (thor.Bytes32, error) {
- id := thor.Bytes32{}
+func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB) (thor.Bytes32, error) {
+ var (
+ parentID thor.Bytes32
+ id thor.Bytes32
+ )
+ binary.BigEndian.PutUint32(parentID[:], to-1)
binary.BigEndian.PutUint32(id[:], to)
+ blk := new(block.Builder).ParentID(parentID).Build()
var summary = &chain.BlockSummary{
- Header: &block.Header{},
+ Header: blk.Header(),
Conflicts: 0,
- SteadyNum: block.Number(steadyID),
}
data, err := rlp.EncodeToBytes(summary)
@@ -43,33 +48,32 @@ func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB, steadyID thor.Bytes3
return thor.Bytes32{}, err
}
- store := db.NewStore("chain.data")
+ store := db.NewStore("chain.hdr")
err = store.Put(id.Bytes(), data)
if err != nil {
return thor.Bytes32{}, err
}
- trie := db.NewNonCryptoTrie("i", trie.NonCryptoNodeHash, from, 0)
- if err := trie.Update(id[:4], id[:], nil); err != nil {
+ indexTrie := db.NewTrie("i", trie.Root{
+ Hash: thor.BytesToBytes32([]byte{1}),
+ Ver: trie.Version{
+ Major: from,
+ Minor: 0,
+ },
+ })
+ if err := indexTrie.Update(id[:4], id[:], nil); err != nil {
return thor.Bytes32{}, err
}
- if steadyID == (thor.Bytes32{}) {
- if err := trie.Update(steadyID[:4], steadyID[:], nil); err != nil {
- return thor.Bytes32{}, err
- }
- }
-
- _, commit := trie.Stage(to, 0)
- err = commit()
- if err != nil {
+ if err := indexTrie.Commit(trie.Version{Major: to, Minor: 0}, true); err != nil {
return thor.Bytes32{}, err
}
return id, nil
}
func newBlock(parentID thor.Bytes32, score uint64, stateRoot thor.Bytes32, priv *ecdsa.PrivateKey) *block.Block {
- blk := new(block.Builder).ParentID(parentID).TotalScore(score).StateRoot(stateRoot).Build()
+ now := uint64(time.Now().Unix())
+ blk := new(block.Builder).ParentID(parentID).TotalScore(score).StateRoot(stateRoot).Timestamp(now - now%10 - 10).Build()
if priv != nil {
sig, _ := crypto.Sign(blk.Header().SigningHash().Bytes(), priv)
@@ -79,18 +83,14 @@ func newBlock(parentID thor.Bytes32, score uint64, stateRoot thor.Bytes32, priv
}
func TestStatus(t *testing.T) {
- db := muxdb.NewMem()
-
- store := db.NewStore("test")
+ store := muxdb.NewMem().NewStore("test")
s := &status{}
err := s.Load(store)
assert.Nil(t, err, "load should not error")
assert.Equal(t, uint32(0), s.Base)
- assert.Equal(t, uint32(0), s.PruneBase)
s.Base = 1
- s.PruneBase = 2
err = s.Save(store)
assert.Nil(t, err, "save should not error")
@@ -99,18 +99,17 @@ func TestStatus(t *testing.T) {
err = s2.Load(store)
assert.Nil(t, err, "load should not error")
assert.Equal(t, uint32(1), s.Base)
- assert.Equal(t, uint32(2), s.PruneBase)
}
-func TestNewOptimizer(t *testing.T) {
+func TestNewPruner(t *testing.T) {
db := muxdb.NewMem()
stater := state.NewStater(db)
gene := genesis.NewDevnet()
b0, _, _, _ := gene.Build(stater)
repo, _ := chain.NewRepository(db, b0)
- op := New(db, repo, false)
- op.Stop()
+ pr := New(db, repo)
+ pr.Stop()
}
func newTempFileDB() (*muxdb.MuxDB, func() error, error) {
@@ -118,9 +117,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) {
opts := muxdb.Options{
TrieNodeCacheSizeMB: 128,
- TrieRootCacheCapacity: 256,
TrieCachedNodeTTL: 30, // 5min
- TrieLeafBankSlotCapacity: 256,
TrieDedupedPartitionFactor: math.MaxUint32,
TrieWillCleanHistory: true,
OpenFilesCacheCapacity: 512,
@@ -134,7 +131,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) {
return nil, nil, err
}
- closeFunc := func() error {
+ close := func() error {
err = db.Close()
if err != nil {
return err
@@ -146,65 +143,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) {
return nil
}
- return db, closeFunc, nil
-}
-
-func TestProcessDump(t *testing.T) {
- db, closeDB, err := newTempFileDB()
- assert.Nil(t, err)
- stater := state.NewStater(db)
- gene := genesis.NewDevnet()
- b0, _, _, _ := gene.Build(stater)
- repo, _ := chain.NewRepository(db, b0)
-
- devAccounts := genesis.DevAccounts()
-
- // fast forward to 1999
- parentID, err := fastForwardTo(0, 1999, db, repo.SteadyBlockID())
- assert.Nil(t, err)
-
- var parentScore uint64 = 1999 * 2
- // add new blocks with signature
- for i := 0; i < 3; i++ {
- blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[i%2].PrivateKey)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
- assert.Nil(t, err)
-
- parentID = blk.Header().ID()
- parentScore = blk.Header().TotalScore()
- }
-
- repo.SetBestBlockID(parentID)
-
- op := New(db, repo, false)
- op.Stop()
-
- var s status
- assert.Nil(t, s.Load(op.db.NewStore(propsStoreName)))
- assert.Equal(t, uint32(2000), s.Base)
-
- // fast forward to 3999
- parentID, err = fastForwardTo(block.Number(parentID), 3999, db, repo.SteadyBlockID())
- assert.Nil(t, err)
-
- // add new blocks with signature
- for i := 0; i < 3; i++ {
- blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[i%2].PrivateKey)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
- assert.Nil(t, err)
-
- parentID = blk.Header().ID()
- parentScore = blk.Header().TotalScore()
- }
- repo.SetBestBlockID(parentID)
-
- op = New(db, repo, true)
- op.Stop()
-
- assert.Nil(t, s.Load(op.db.NewStore(propsStoreName)))
- assert.Equal(t, uint32(4000), s.Base)
-
- closeDB()
+ return db, close, nil
}
func TestWaitUntil(t *testing.T) {
@@ -216,7 +155,7 @@ func TestWaitUntil(t *testing.T) {
devAccounts := genesis.DevAccounts()
ctx, cancel := context.WithCancel(context.Background())
- op := &Optimizer{
+ pruner := &Pruner{
repo: repo,
db: db,
ctx: ctx,
@@ -224,18 +163,17 @@ func TestWaitUntil(t *testing.T) {
}
parentID := b0.Header().ID()
- var parentScore uint64
+ var parentScore uint64 = 0
for i := 0; i < 6; i++ {
blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[0].PrivateKey)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
+ err := repo.AddBlock(blk, tx.Receipts{}, 0, true)
assert.Nil(t, err)
parentID = blk.Header().ID()
parentScore = blk.Header().TotalScore()
}
- repo.SetBestBlockID(parentID)
- parentID, err := fastForwardTo(block.Number(parentID), 100000-1, db, repo.SteadyBlockID())
+ parentID, err := fastForwardTo(block.Number(parentID), 100000-1, db)
assert.Nil(t, err)
parentScore = (100000 - 1) * 2
@@ -243,13 +181,12 @@ func TestWaitUntil(t *testing.T) {
signer := devAccounts[0].PrivateKey
score := parentScore + 1
blk := newBlock(parentID, score, b0.Header().StateRoot(), signer)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
+ err := repo.AddBlock(blk, tx.Receipts{}, 0, true)
assert.Nil(t, err)
parentID = blk.Header().ID()
parentScore = blk.Header().TotalScore()
}
- repo.SetBestBlockID(parentID)
go func() {
cancel()
@@ -258,7 +195,7 @@ func TestWaitUntil(t *testing.T) {
// not enough signer, will wait for 1 sec
// backoff will increase for more waiting
// cancel here and restart a new test case
- _, err = op.awaitUntilSteady(100000)
+ _, err = pruner.awaitUntilSteady(100000)
assert.NotNil(t, err)
for i := 0; i < 3; i++ {
@@ -266,24 +203,23 @@ func TestWaitUntil(t *testing.T) {
score := parentScore + 2
blk := newBlock(parentID, score, b0.Header().StateRoot(), signer)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
+ err := repo.AddBlock(blk, tx.Receipts{}, 0, true)
assert.Nil(t, err)
parentID = blk.Header().ID()
parentScore = blk.Header().TotalScore()
}
- repo.SetBestBlockID(parentID)
ctx, cancel = context.WithCancel(context.Background())
- op.ctx = ctx
- op.cancel = cancel
+ pruner.ctx = ctx
+ pruner.cancel = cancel
- chain, err := op.awaitUntilSteady(100000)
+ chain, err := pruner.awaitUntilSteady(100000)
assert.Nil(t, err)
assert.True(t, block.Number(chain.HeadID()) >= 10000)
}
-func TestDumpAndPrune(t *testing.T) {
+func TestPrune(t *testing.T) {
db, closeDB, err := newTempFileDB()
assert.Nil(t, err)
@@ -294,7 +230,7 @@ func TestDumpAndPrune(t *testing.T) {
devAccounts := genesis.DevAccounts()
ctx, cancel := context.WithCancel(context.Background())
- op := &Optimizer{
+ pruner := &Pruner{
repo: repo,
db: db,
ctx: ctx,
@@ -311,31 +247,26 @@ func TestDumpAndPrune(t *testing.T) {
for i := 0; i < 9; i++ {
blk := newBlock(parentID, 10, b0.Header().StateRoot(), nil)
- err := repo.AddBlock(blk, tx.Receipts{}, 0)
+ err := repo.AddBlock(blk, tx.Receipts{}, 0, false)
assert.Nil(t, err)
parentID = blk.Header().ID()
}
- st := stater.NewState(b0.Header().StateRoot(), b0.Header().Number(), 0, 0)
+ st := stater.NewState(trie.Root{Hash: b0.Header().StateRoot(), Ver: trie.Version{Major: 0, Minor: 0}})
st.SetBalance(acc1, big.NewInt(1e18))
st.SetCode(acc2, code)
st.SetStorage(acc2, key, value)
- stage, err := st.Stage(10, 0)
+ stage, err := st.Stage(trie.Version{Major: 10, Minor: 0})
assert.Nil(t, err)
root, err := stage.Commit()
assert.Nil(t, err)
blk := newBlock(parentID, 10, root, devAccounts[0].PrivateKey)
- err = repo.AddBlock(blk, tx.Receipts{}, 0)
+ err = repo.AddBlock(blk, tx.Receipts{}, 0, true)
assert.Nil(t, err)
parentID = blk.Header().ID()
- repo.SetBestBlockID(parentID)
-
- err = op.dumpStateLeaves(repo.NewBestChain(), 0, block.Number(parentID)+1)
- assert.Nil(t, err)
-
- err = op.pruneTries(repo.NewBestChain(), 0, block.Number(parentID)+1)
+ err = pruner.pruneTries(repo.NewBestChain(), 0, block.Number(parentID)+1)
assert.Nil(t, err)
closeDB()
diff --git a/cmd/thor/optimizer/status.go b/cmd/thor/pruner/status.go
similarity index 92%
rename from cmd/thor/optimizer/status.go
rename to cmd/thor/pruner/status.go
index 8980a128e..202dfe98a 100644
--- a/cmd/thor/optimizer/status.go
+++ b/cmd/thor/pruner/status.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package optimizer
+package pruner
import (
"encoding/json"
@@ -12,8 +12,7 @@ import (
)
type status struct {
- Base uint32
- PruneBase uint32
+ Base uint32
}
func (s *status) Load(getter kv.Getter) error {
diff --git a/cmd/thor/solo/solo.go b/cmd/thor/solo/solo.go
index 638aa74ff..fefae0e74 100644
--- a/cmd/thor/solo/solo.go
+++ b/cmd/thor/solo/solo.go
@@ -174,12 +174,6 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error {
return errors.WithMessage(err, "commit state")
}
- // ignore fork when solo
- if err := s.repo.AddBlock(b, receipts, 0); err != nil {
- return errors.WithMessage(err, "commit block")
- }
- realElapsed := mclock.Now() - startTime
-
if !s.skipLogs {
w := s.logDB.NewWriter()
if err := w.Write(b, receipts); err != nil {
@@ -191,9 +185,11 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error {
}
}
- if err := s.repo.SetBestBlockID(b.Header().ID()); err != nil {
- return errors.WithMessage(err, "set best block")
+ // ignore fork when solo
+ if err := s.repo.AddBlock(b, receipts, 0, true); err != nil {
+ return errors.WithMessage(err, "commit block")
}
+ realElapsed := mclock.Now() - startTime
commitElapsed := mclock.Now() - startTime - execElapsed
@@ -216,7 +212,7 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error {
// The init function initializes the chain parameters.
func (s *Solo) init(ctx context.Context) error {
best := s.repo.BestBlockSummary()
- newState := s.stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum)
+ newState := s.stater.NewState(best.Root())
currentBGP, err := builtin.Params.Native(newState).Get(thor.KeyBaseGasPrice)
if err != nil {
return errors.WithMessage(err, "failed to get the current base gas price")
diff --git a/cmd/thor/solo/solo_test.go b/cmd/thor/solo/solo_test.go
index a4df3f35d..6fa2fde73 100644
--- a/cmd/thor/solo/solo_test.go
+++ b/cmd/thor/solo/solo_test.go
@@ -42,7 +42,7 @@ func TestInitSolo(t *testing.T) {
// check the gas price
best := solo.repo.BestBlockSummary()
- newState := solo.stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum)
+ newState := solo.stater.NewState(best.Root())
currentBGP, err := builtin.Params.Native(newState).Get(thor.KeyBaseGasPrice)
assert.Nil(t, err)
assert.Equal(t, baseGasPrice, currentBGP)
diff --git a/cmd/thor/utils.go b/cmd/thor/utils.go
index 5c6799354..396b153ae 100644
--- a/cmd/thor/utils.go
+++ b/cmd/thor/utils.go
@@ -296,7 +296,7 @@ func makeInstanceDir(ctx *cli.Context, gene *genesis.Genesis) (string, error) {
suffix = "-full"
}
- instanceDir := filepath.Join(dataDir, fmt.Sprintf("instance-%x-v3", gene.ID().Bytes()[24:])+suffix)
+ instanceDir := filepath.Join(dataDir, fmt.Sprintf("instance-%x-v4", gene.ID().Bytes()[24:])+suffix)
if err := os.MkdirAll(instanceDir, 0700); err != nil {
return "", errors.Wrapf(err, "create instance dir [%v]", instanceDir)
}
@@ -312,9 +312,7 @@ func openMainDB(ctx *cli.Context, dir string) (*muxdb.MuxDB, error) {
opts := muxdb.Options{
TrieNodeCacheSizeMB: cacheMB,
- TrieRootCacheCapacity: 256,
TrieCachedNodeTTL: 30, // 5min
- TrieLeafBankSlotCapacity: 256,
TrieDedupedPartitionFactor: math.MaxUint32,
TrieWillCleanHistory: !ctx.Bool(disablePrunerFlag.Name),
OpenFilesCacheCapacity: fdCache,
@@ -331,9 +329,9 @@ func openMainDB(ctx *cli.Context, dir string) (*muxdb.MuxDB, error) {
debug.SetGCPercent(int(gogc))
if opts.TrieWillCleanHistory {
- opts.TrieHistPartitionFactor = 1000
+ opts.TrieHistPartitionFactor = 256
} else {
- opts.TrieHistPartitionFactor = 500000
+ opts.TrieHistPartitionFactor = 524288
}
path := filepath.Join(dir, "main.db")
diff --git a/consensus/consensus.go b/consensus/consensus.go
index 8d6f7a9c3..fd9a78e8a 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -48,7 +48,7 @@ func New(repo *chain.Repository, stater *state.Stater, forkConfig thor.ForkConfi
// Process process a block.
func (c *Consensus) Process(parentSummary *chain.BlockSummary, blk *block.Block, nowTimestamp uint64, blockConflicts uint32) (*state.Stage, tx.Receipts, error) {
header := blk.Header()
- state := c.stater.NewState(parentSummary.Header.StateRoot(), parentSummary.Header.Number(), parentSummary.Conflicts, parentSummary.SteadyNum)
+ state := c.stater.NewState(parentSummary.Root())
var features tx.Features
if header.Number() >= c.forkConfig.VIP191 {
@@ -79,7 +79,7 @@ func (c *Consensus) NewRuntimeForReplay(header *block.Header, skipPoA bool) (*ru
}
return nil, errors.New("parent block is missing")
}
- state := c.stater.NewState(parentSummary.Header.StateRoot(), parentSummary.Header.Number(), parentSummary.Conflicts, parentSummary.SteadyNum)
+ state := c.stater.NewState(parentSummary.Root())
if !skipPoA {
if _, err := c.validateProposer(header, parentSummary.Header, state); err != nil {
return nil, err
diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go
index 5bac09763..0e97e0022 100644
--- a/consensus/consensus_test.go
+++ b/consensus/consensus_test.go
@@ -122,11 +122,7 @@ func newTestConsensus() (*testConsensus, error) {
return nil, err
}
- if err := repo.AddBlock(b1, receipts, 0); err != nil {
- return nil, err
- }
-
- if err := repo.SetBestBlockID(b1.Header().ID()); err != nil {
+ if err := repo.AddBlock(b1, receipts, 0, true); err != nil {
return nil, err
}
diff --git a/consensus/validator.go b/consensus/validator.go
index dc7ee85b3..4749fcff8 100644
--- a/consensus/validator.go
+++ b/consensus/validator.go
@@ -16,6 +16,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/xenv"
)
@@ -341,7 +342,7 @@ func (c *Consensus) verifyBlock(blk *block.Block, state *state.State, blockConfl
}
}
- stage, err := state.Stage(header.Number(), blockConflicts)
+ stage, err := state.Stage(trie.Version{Major: header.Number(), Minor: blockConflicts})
if err != nil {
return nil, nil, err
}
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
index 407dc0b2a..e5a45b559 100644
--- a/docs/CONTRIBUTING.md
+++ b/docs/CONTRIBUTING.md
@@ -1,7 +1,7 @@
# Contributing to VechainThor
Welcome to VechainThor! We appreciate your interest in contributing. By participating in this project, you agree to
-abide by our [Code of Conduct](https://github.com/vechain/thor/blob/master/CODE_OF_CONDUCT.md).
+abide by our [Code of Conduct](https://github.com/vechain/thor/blob/master/docs/CODE_OF_CONDUCT.md).
## VeChain Improvement Proposals (VIPs)
diff --git a/docs/hosting-a-node.md b/docs/hosting-a-node.md
index 1dc1e12ce..6212d7360 100644
--- a/docs/hosting-a-node.md
+++ b/docs/hosting-a-node.md
@@ -21,7 +21,7 @@ state, including the disk space required for various node types.
### Command Line Options
-Please refer to [Command Line Options](./usage.md#command-line-options) in the usage documentation to see a list of all
+Please refer to [Command Line Options](https://github.com/vechain/thor/blob/master/docs/usage.md#command-line-options) in the usage documentation to see a list of all
available options.
---
diff --git a/docs/usage.md b/docs/usage.md
index 7358e3f0e..3a3b7694a 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -20,7 +20,7 @@ ___
### Running from source
-- To install the `thor` binary, follow the instructions in the [build](build) guide.
+- To install the `thor` binary, follow the instructions in the [build](https://github.com/vechain/thor/blob/master/docs/build.md) guide.
Connect to vechain's mainnet:
@@ -47,7 +47,7 @@ ___
### Running a discovery node
-- To install the `disco` binary, follow the instructions in the [build](build) guide.
+- To install the `disco` binary, follow the instructions in the [build](https://github.com/vechain/thor/blob/master/docs/build.md) guide.
Start a discovery node:
diff --git a/genesis/builder.go b/genesis/builder.go
index ea12655c1..95327bf4b 100644
--- a/genesis/builder.go
+++ b/genesis/builder.go
@@ -14,6 +14,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/xenv"
)
@@ -73,9 +74,7 @@ func (b *Builder) ForkConfig(fc thor.ForkConfig) *Builder {
// ComputeID compute genesis ID.
func (b *Builder) ComputeID() (thor.Bytes32, error) {
- db := muxdb.NewMem()
-
- blk, _, _, err := b.Build(state.NewStater(db))
+ blk, _, _, err := b.Build(state.NewStater(muxdb.NewMem()))
if err != nil {
return thor.Bytes32{}, err
}
@@ -84,7 +83,7 @@ func (b *Builder) ComputeID() (thor.Bytes32, error) {
// Build build genesis block according to presets.
func (b *Builder) Build(stater *state.Stater) (blk *block.Block, events tx.Events, transfers tx.Transfers, err error) {
- state := stater.NewState(thor.Bytes32{}, 0, 0, 0)
+ state := stater.NewState(trie.Root{})
for _, proc := range b.stateProcs {
if err := proc(state); err != nil {
@@ -112,7 +111,7 @@ func (b *Builder) Build(stater *state.Stater) (blk *block.Block, events tx.Event
transfers = append(transfers, out.Transfers...)
}
- stage, err := state.Stage(0, 0)
+ stage, err := state.Stage(trie.Version{})
if err != nil {
return nil, nil, nil, errors.Wrap(err, "stage")
}
diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go
index e6c5c47ce..97b72295d 100644
--- a/genesis/genesis_test.go
+++ b/genesis/genesis_test.go
@@ -13,6 +13,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestTestnetGenesis(t *testing.T) {
@@ -22,13 +23,7 @@ func TestTestnetGenesis(t *testing.T) {
b0, _, _, err := gene.Build(state.NewStater(db))
assert.Nil(t, err)
- id := gene.ID()
- name := gene.Name()
-
- assert.Equal(t, id, thor.MustParseBytes32("0x000000000b2bce3c70bc649a02749e8687721b09ed2e15997f466536b20bb127"))
- assert.Equal(t, name, "testnet")
-
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
v, err := st.Exists(thor.MustParseAddress("0xe59D475Abe695c7f67a8a2321f33A856B0B4c71d"))
assert.Nil(t, err)
diff --git a/go.mod b/go.mod
index 4ba00ec9a..52ca3fc14 100644
--- a/go.mod
+++ b/go.mod
@@ -18,16 +18,17 @@ require (
github.com/mattn/go-sqlite3 v1.14.22
github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c
- github.com/pkg/errors v0.8.0
+ github.com/pkg/errors v0.8.1-0.20171216070316-e881fd58d78e
github.com/pmezard/go-difflib v1.0.0
github.com/prometheus/client_golang v1.18.0
github.com/prometheus/client_model v0.5.0
github.com/prometheus/common v0.45.0
github.com/qianbin/directcache v0.9.7
+ github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9
github.com/stretchr/testify v1.8.4
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a
github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765
- golang.org/x/crypto v0.21.0
+ golang.org/x/crypto v0.22.0
gopkg.in/cheggaaa/pb.v1 v1.0.28
gopkg.in/urfave/cli.v1 v1.20.0
gopkg.in/yaml.v3 v3.0.1
@@ -37,6 +38,7 @@ require (
github.com/aristanetworks/goarista v0.0.0-20180222005525-c41ed3986faa // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 // indirect
+ github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
github.com/cespare/cp v1.1.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/deckarep/golang-set v1.7.1 // indirect
@@ -46,17 +48,16 @@ require (
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/go-stack/stack v1.7.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/huin/goupnp v0.0.0-20171109214107-dceda08e705b // indirect
- github.com/jackpal/go-nat-pmp v1.0.1 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/mattn/go-colorable v0.0.9 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/rjeczalik/notify v0.9.3 // indirect
- golang.org/x/net v0.23.0 // indirect
- golang.org/x/sys v0.18.0 // indirect
+ golang.org/x/net v0.24.0 // indirect
+ golang.org/x/sys v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951 // indirect
@@ -64,4 +65,4 @@ require (
replace github.com/syndtr/goleveldb => github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655
-replace github.com/ethereum/go-ethereum => github.com/vechain/go-ethereum v1.8.15-0.20240528020007-2994c2a24b9c
+replace github.com/ethereum/go-ethereum => github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2
diff --git a/go.sum b/go.sum
index fd12a92aa..b0085450f 100644
--- a/go.sum
+++ b/go.sum
@@ -6,6 +6,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
+github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -23,6 +25,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
@@ -84,8 +87,8 @@ github.com/huin/goupnp v0.0.0-20171109214107-dceda08e705b h1:mvnS3LbcRgdM4nBLksE
github.com/huin/goupnp v0.0.0-20171109214107-dceda08e705b/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
-github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
+github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -119,12 +122,13 @@ github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs=
github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171216070316-e881fd58d78e h1:osn9cOzd93npXpRuTFR/MPjiTvTSNHA7pqbXkPyLqQ4=
+github.com/pkg/errors v0.8.1-0.20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
@@ -137,6 +141,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/qianbin/directcache v0.9.7 h1:DH6MdmU0fVjcKry57ju7U6akTFDBnLhHd0xOHZDq948=
github.com/qianbin/directcache v0.9.7/go.mod h1:gZBpa9NqO1Qz7wZKO7t7atBA76bT8X0eM01PdveW4qc=
+github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9 h1:phutO88A0XihNL/23gAzaih6cqQB25smZ0STd/lM0Ng=
+github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9/go.mod h1:OnClEjurpFUtR3RUCauP9HxNNl8xjfGAOv0kWYTznOc=
github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY=
github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
@@ -150,8 +156,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765 h1:jvr+TSivjObZmOKVdqlgeLtRhaDG27gE39PMuE2IJ24=
github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765/go.mod h1:cwnTMgAVzMb30xMKnGI1LdU1NjMiPllYb7i3ibj/fzE=
-github.com/vechain/go-ethereum v1.8.15-0.20240528020007-2994c2a24b9c h1:YfGsGXMNKI64gR76KumYgGnYSdAFtMA8igtmpFiBt74=
-github.com/vechain/go-ethereum v1.8.15-0.20240528020007-2994c2a24b9c/go.mod h1:EhX+lSkpNdEIxu1zOXtiFZu5nv1i8MX1mQA/qhUE+gw=
+github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2 h1:ch3DqXvl1ApfJut768bf5Vlhqtw+bxAWTyPDYXQkQZk=
+github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2/go.mod h1:yPUCNmntAh1PritrMfSi7noK+9vVPStZX3wgh3ieaY0=
github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655 h1:CbHcWpCi7wOYfpoErRABh3Slyq9vO0Ay/EHN5GuJSXQ=
github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -160,8 +166,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -174,8 +180,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -199,8 +205,8 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/logdb/sequence.go b/logdb/sequence.go
index 1e98458b7..04606890a 100644
--- a/logdb/sequence.go
+++ b/logdb/sequence.go
@@ -21,6 +21,8 @@ const (
txIndexMask = (1 << txIndexBits) - 1
// Max = 2^20 - 1 = 1,048,575
logIndexMask = (1 << logIndexBits) - 1
+
+ MaxBlockNumber = blockNumMask
)
func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) (sequence, error) {
diff --git a/lowrlp/encoder.go b/lowrlp/encoder.go
deleted file mode 100644
index 9f5bab37b..000000000
--- a/lowrlp/encoder.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-// Package lowrlp provides methods to perform low-level rlp encoding.
-// Codes are mostly copied from github.com/ethereum/go-ethereum/rlp.
-package lowrlp
-
-import (
- "io"
-)
-
-// Encoder is the low-level rlp encoder.
-type Encoder struct {
- str []byte // string data, contains everything except list headers
- lheads []listhead // all list headers
- lhsize int // sum of sizes of all encoded list headers
- sizebuf [9]byte // auxiliary buffer for uint encoding
-}
-
-// Reset reset the encoder state.
-func (w *Encoder) Reset() {
- w.lhsize = 0
- w.str = w.str[:0]
- w.lheads = w.lheads[:0]
-}
-
-// EncodeString encodes the string value.
-func (w *Encoder) EncodeString(b []byte) {
- if len(b) == 1 && b[0] <= 0x7F {
- // fits single byte, no string header
- w.str = append(w.str, b[0])
- } else {
- w.encodeStringHeader(len(b))
- w.str = append(w.str, b...)
- }
-}
-
-// EncodeUint encodes the uint value.
-func (w *Encoder) EncodeUint(i uint64) {
- if i == 0 {
- w.str = append(w.str, 0x80)
- } else if i < 128 {
- // fits single byte
- w.str = append(w.str, byte(i))
- } else {
- s := putint(w.sizebuf[1:], i)
- w.sizebuf[0] = 0x80 + byte(s)
- w.str = append(w.str, w.sizebuf[:s+1]...)
- }
-}
-
-// EncodeRaw encodes raw value.
-func (w *Encoder) EncodeRaw(r []byte) {
- w.str = append(w.str, r...)
-}
-
-// EncodeEmptyString encodes an empty string.
-// It's equivalent to w.EncodeString(nil), but more efficient.
-func (w *Encoder) EncodeEmptyString() {
- w.str = append(w.str, 0x80)
-}
-
-// EncodeEmptyList encodes an empty list.
-// It's equivalent to w.ListEnd(w.List()), but more efficient.
-func (w *Encoder) EncodeEmptyList() {
- w.str = append(w.str, 0xC0)
-}
-
-// List starts to encode list elements.
-// It returns the offset which is passed to ListEnd when list ended.
-func (w *Encoder) List() int {
- w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
- return len(w.lheads) - 1
-}
-
-// ListEnd ends the list. offset is the return value of the corresponded List call.
-func (w *Encoder) ListEnd(index int) {
- lh := &w.lheads[index]
- lh.size = w.size() - lh.offset - lh.size
- if lh.size < 56 {
- w.lhsize++ // length encoded into kind tag
- } else {
- w.lhsize += 1 + intsize(uint64(lh.size))
- }
-}
-
-// ToBytes outputs the encode result to byte slice.
-func (w *Encoder) ToBytes() []byte {
- out := make([]byte, w.size())
- strpos := 0
- pos := 0
- for _, head := range w.lheads {
- // write string data before header
- n := copy(out[pos:], w.str[strpos:head.offset])
- pos += n
- strpos += n
- // write the header
- enc := head.encode(out[pos:])
- pos += len(enc)
- }
- // copy string data after the last list header
- copy(out[pos:], w.str[strpos:])
- return out
-}
-
-// ToWriter outputs the encode result to io.Writer.
-func (w *Encoder) ToWriter(out io.Writer) (err error) {
- strpos := 0
- for _, head := range w.lheads {
- // write string data before header
- if head.offset-strpos > 0 {
- n, err := out.Write(w.str[strpos:head.offset])
- strpos += n
- if err != nil {
- return err
- }
- }
- // write the header
- enc := head.encode(w.sizebuf[:])
- if _, err = out.Write(enc); err != nil {
- return err
- }
- }
- if strpos < len(w.str) {
- // write string data after the last list header
- _, err = out.Write(w.str[strpos:])
- }
- return err
-}
-
-func (w *Encoder) encodeStringHeader(size int) {
- if size < 56 {
- w.str = append(w.str, 0x80+byte(size))
- } else {
- sizesize := putint(w.sizebuf[1:], uint64(size))
- w.sizebuf[0] = 0xB7 + byte(sizesize)
- w.str = append(w.str, w.sizebuf[:sizesize+1]...)
- }
-}
-
-func (w *Encoder) size() int {
- return len(w.str) + w.lhsize
-}
-
-type listhead struct {
- offset int // index of this header in string data
- size int // total size of encoded data (including list headers)
-}
-
-// encode writes head to the given buffer, which must be at least
-// 9 bytes long. It returns the encoded bytes.
-func (head *listhead) encode(buf []byte) []byte {
- return buf[:puthead(buf, 0xC0, 0xF7, uint64(head.size))]
-}
-
-// intsize computes the minimum number of bytes required to store i.
-func intsize(i uint64) (size int) {
- for size = 1; ; size++ {
- if i >>= 8; i == 0 {
- return size
- }
- }
-}
-
-// puthead writes a list or string header to buf.
-// buf must be at least 9 bytes long.
-func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
- if size < 56 {
- buf[0] = smalltag + byte(size)
- return 1
- }
- sizesize := putint(buf[1:], size)
- buf[0] = largetag + byte(sizesize)
- return sizesize + 1
-}
-
-// putint writes i to the beginning of b in big endian byte
-// order, using the least number of bytes needed to represent i.
-func putint(b []byte, i uint64) (size int) {
- switch {
- case i < (1 << 8):
- b[0] = byte(i)
- return 1
- case i < (1 << 16):
- b[0] = byte(i >> 8)
- b[1] = byte(i)
- return 2
- case i < (1 << 24):
- b[0] = byte(i >> 16)
- b[1] = byte(i >> 8)
- b[2] = byte(i)
- return 3
- case i < (1 << 32):
- b[0] = byte(i >> 24)
- b[1] = byte(i >> 16)
- b[2] = byte(i >> 8)
- b[3] = byte(i)
- return 4
- case i < (1 << 40):
- b[0] = byte(i >> 32)
- b[1] = byte(i >> 24)
- b[2] = byte(i >> 16)
- b[3] = byte(i >> 8)
- b[4] = byte(i)
- return 5
- case i < (1 << 48):
- b[0] = byte(i >> 40)
- b[1] = byte(i >> 32)
- b[2] = byte(i >> 24)
- b[3] = byte(i >> 16)
- b[4] = byte(i >> 8)
- b[5] = byte(i)
- return 6
- case i < (1 << 56):
- b[0] = byte(i >> 48)
- b[1] = byte(i >> 40)
- b[2] = byte(i >> 32)
- b[3] = byte(i >> 24)
- b[4] = byte(i >> 16)
- b[5] = byte(i >> 8)
- b[6] = byte(i)
- return 7
- default:
- b[0] = byte(i >> 56)
- b[1] = byte(i >> 48)
- b[2] = byte(i >> 40)
- b[3] = byte(i >> 32)
- b[4] = byte(i >> 24)
- b[5] = byte(i >> 16)
- b[6] = byte(i >> 8)
- b[7] = byte(i)
- return 8
- }
-}
diff --git a/metrics/noop.go b/metrics/noop.go
index 6eb909ff9..b804486b6 100644
--- a/metrics/noop.go
+++ b/metrics/noop.go
@@ -5,7 +5,10 @@
package metrics
-import "net/http"
+import (
+ "net/http"
+ "time"
+)
// noopMetrics implements a no operations metrics service
type noopMetrics struct{}
@@ -48,3 +51,5 @@ func (n noopMeters) Set(int64) {}
func (n noopMeters) Observe(int64) {}
func (n *noopMetrics) ObserveWithLabels(int64, map[string]string) {}
+
+func (n *noopMetrics) collectDiskIO(time.Duration) {}
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
index 50745752c..c1a21a345 100644
--- a/metrics/prometheus.go
+++ b/metrics/prometheus.go
@@ -6,8 +6,14 @@
package metrics
import (
+ "bufio"
+ "fmt"
"net/http"
+ "os"
+ "strconv"
+ "strings"
"sync"
+ "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
@@ -24,6 +30,8 @@ func InitializePrometheusMetrics() {
// don't allow for reset
if _, ok := metrics.(*prometheusMetrics); !ok {
metrics = newPrometheusMetrics()
+ // collection disk io metrics every 5 seconds
+ go metrics.(*prometheusMetrics).collectDiskIO(5 * time.Second)
}
}
@@ -147,6 +155,59 @@ func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) Hist
}
}
+func getIOLineValue(line string) int64 {
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ logger.Warn("this io file line is malformed", "err", line)
+ return 0
+ }
+ value, err := strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ logger.Warn("unable to parse int", "err", err)
+ return 0
+ }
+
+ return value
+}
+
+func getDiskIOData() (int64, int64, error) {
+ pid := os.Getpid()
+ ioFilePath := fmt.Sprintf("/proc/%d/io", pid)
+ file, err := os.Open(ioFilePath)
+ if err != nil {
+ return 0, 0, err
+ }
+
+ // Parse the file line by line
+ scanner := bufio.NewScanner(file)
+ var reads, writes int64
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "syscr") {
+ reads = getIOLineValue(line)
+ } else if strings.HasPrefix(line, "syscw") {
+ writes = getIOLineValue(line)
+ }
+ }
+
+ return reads, writes, nil
+}
+
+func (o *prometheusMetrics) collectDiskIO(refresh time.Duration) {
+ for {
+ reads, writes, err := getDiskIOData()
+ if err == nil {
+ readsMeter := o.GetOrCreateGaugeMeter("disk_reads")
+ readsMeter.Set(reads)
+
+ writesMeter := o.GetOrCreateGaugeMeter("disk_writes")
+ writesMeter.Set(writes)
+ }
+
+ time.Sleep(refresh)
+ }
+}
+
type promHistogramMeter struct {
histogram prometheus.Histogram
}
diff --git a/metrics/telemetry.go b/metrics/telemetry.go
index 9ab3bd633..1d1ee96f2 100644
--- a/metrics/telemetry.go
+++ b/metrics/telemetry.go
@@ -5,7 +5,10 @@
package metrics
-import "net/http"
+import (
+ "net/http"
+ "sync"
+)
// metrics is a singleton service that provides global access to a set of meters
// it wraps multiple implementations and defaults to a no-op implementation
@@ -30,7 +33,11 @@ func HTTPHandler() http.Handler {
// Define standard buckets for histograms
var (
Bucket10s = []int64{0, 500, 1000, 2000, 3000, 4000, 5000, 7500, 10_000}
- BucketHTTPReqs = []int64{0, 150, 300, 450, 600, 900, 1200, 1500, 3000}
+ BucketHTTPReqs = []int64{
+ 0, 1, 2, 5, 10, 20, 30, 50, 75, 100,
+ 150, 200, 300, 400, 500, 750, 1000,
+ 1500, 2000, 3000, 4000, 5000, 10000,
+ }
)
// HistogramMeter represents the type of metric that is calculated by aggregating
@@ -96,12 +103,11 @@ func GaugeVec(name string, labels []string) GaugeVecMeter {
// - it avoid metrics definition to determine the singleton to use (noop vs prometheus)
func LazyLoad[T any](f func() T) func() T {
var result T
- var loaded bool
+ var once sync.Once
return func() T {
- if !loaded {
+ once.Do(func() {
result = f()
- loaded = true
- }
+ })
return result
}
}
diff --git a/muxdb/backend.go b/muxdb/backend.go
new file mode 100644
index 000000000..4d63a2d6e
--- /dev/null
+++ b/muxdb/backend.go
@@ -0,0 +1,94 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "context"
+ "encoding/binary"
+ "math"
+
+ "github.com/vechain/thor/v2/kv"
+ "github.com/vechain/thor/v2/trie"
+)
+
+// backend is the backend of the trie.
+type backend struct {
+ Store kv.Store
+ Cache Cache
+ HistPtnFactor, DedupedPtnFactor uint32
+ CachedNodeTTL uint16
+}
+
+// AppendHistNodeKey composes hist node key and appends to buf.
+func (b *backend) AppendHistNodeKey(buf []byte, name string, path []byte, ver trie.Version) []byte {
+ // encoding node keys in this way has the following benefits:
+ // 1. nodes are stored in order of partition id, which is friendly to LSM DB.
+ // 2. adjacent versions of a node are stored together,
+ // so that node data is well compressed (ref https://gist.github.com/qianbin/bffcd248b7312c35d7d526a974018b1b )
+ buf = append(buf, trieHistSpace) // space
+ if b.HistPtnFactor != math.MaxUint32 { // partition id
+ buf = binary.BigEndian.AppendUint32(buf, ver.Major/b.HistPtnFactor)
+ }
+ buf = append(buf, name...) // trie name
+ buf = appendNodePath(buf, path) // path
+
+ // major ver
+ mod := ver.Major % b.HistPtnFactor
+ // more compact encoding
+ switch {
+ case b.HistPtnFactor > (1 << 24):
+ buf = binary.BigEndian.AppendUint32(buf, mod)
+ case b.HistPtnFactor > (1 << 16):
+ buf = append(buf, byte(mod>>16), byte(mod>>8), byte(mod))
+ case b.HistPtnFactor > (1 << 8):
+ buf = append(buf, byte(mod>>8), byte(mod))
+ case b.HistPtnFactor > 1:
+ buf = append(buf, byte(mod))
+ }
+
+ if ver.Minor != 0 { // minor ver
+ buf = binary.AppendUvarint(buf, uint64(ver.Minor))
+ }
+ return buf
+}
+
+// AppendDedupedNodeKey composes deduped node key and appends to buf.
+func (b *backend) AppendDedupedNodeKey(buf []byte, name string, path []byte, ver trie.Version) []byte {
+ buf = append(buf, trieDedupedSpace) // space
+ if b.DedupedPtnFactor != math.MaxUint32 { // partition id
+ buf = binary.BigEndian.AppendUint32(buf, ver.Major/b.DedupedPtnFactor)
+ }
+ buf = append(buf, name...) // trie name
+ buf = appendNodePath(buf, path) // path
+ return buf
+}
+
+// DeleteHistoryNodes deletes trie history nodes within partitions of [startMajorVer, limitMajorVer).
+func (b *backend) DeleteHistoryNodes(ctx context.Context, startMajorVer, limitMajorVer uint32) error {
+ startPtn := startMajorVer / b.HistPtnFactor
+ limitPtn := limitMajorVer / b.HistPtnFactor
+
+ return b.Store.DeleteRange(ctx, kv.Range{
+ Start: binary.BigEndian.AppendUint32([]byte{trieHistSpace}, startPtn),
+ Limit: binary.BigEndian.AppendUint32([]byte{trieHistSpace}, limitPtn),
+ })
+}
+
+// appendNodePath encodes the node path and appends to buf.
+func appendNodePath(buf, path []byte) []byte {
+ switch len(path) {
+ case 0:
+ return append(buf, 0, 0)
+ case 1:
+ return append(buf, path[0], 1)
+ case 2:
+ return append(buf, path[0], (path[1]<<4)|2)
+ default:
+ // has more
+ buf = append(buf, path[0]|0x10, (path[1]<<4)|2)
+ return appendNodePath(buf, path[2:])
+ }
+}
diff --git a/muxdb/cache.go b/muxdb/cache.go
new file mode 100644
index 000000000..ea73b47f7
--- /dev/null
+++ b/muxdb/cache.go
@@ -0,0 +1,230 @@
+// Copyright (c) 2021 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/qianbin/directcache"
+ "github.com/vechain/thor/v2/trie"
+)
+
+type Cache interface {
+ AddNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, blob []byte, isCommitting bool)
+ GetNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, peek bool) []byte
+ AddRootNode(name string, n trie.Node)
+ GetRootNode(name string, ver trie.Version) trie.Node
+}
+
+// cache is the cache layer for trie.
+type cache struct {
+ queriedNodes *directcache.Cache // caches recently queried node blobs.
+ committedNodes *directcache.Cache // caches newly committed node blobs.
+ roots struct { // caches root nodes.
+ m map[string]trie.Node
+ lock sync.RWMutex
+ maxMajor uint32
+ ttl uint32
+ }
+
+ nodeStats cacheStats
+ rootStats cacheStats
+ lastLogTime atomic.Int64
+}
+
+// newCache creates a cache object with the given cache size.
+func newCache(sizeMB int, rootTTL uint32) Cache {
+ sizeBytes := sizeMB * 1024 * 1024
+ cache := &cache{
+ queriedNodes: directcache.New(sizeBytes / 4),
+ committedNodes: directcache.New(sizeBytes - sizeBytes/4),
+ }
+ cache.lastLogTime.Store(time.Now().UnixNano())
+ cache.roots.m = make(map[string]trie.Node)
+ cache.roots.ttl = rootTTL
+ return cache
+}
+
+func (c *cache) log() {
+ now := time.Now().UnixNano()
+ last := c.lastLogTime.Swap(now)
+
+ if now-last > int64(time.Second*20) {
+ shouldNode, hitNode, missNode := c.nodeStats.Stats()
+ shouldRoot, hitRoot, missRoot := c.rootStats.Stats()
+
+ // log two categories together only one of the hit rate has
+ // changed compared to the last run, to avoid too many logs.
+ if shouldNode || shouldRoot {
+ logStats("node cache stats", hitNode, missNode)
+ logStats("root cache stats", hitRoot, missRoot)
+ }
+
+ // metrics will reported every 20 seconds
+ metricCacheHitMissGaugeVec().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"})
+ metricCacheHitMissGaugeVec().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"})
+ metricCacheHitMissGaugeVec().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"})
+ metricCacheHitMissGaugeVec().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"})
+ } else {
+ c.lastLogTime.CompareAndSwap(now, last)
+ }
+}
+
+// AddNodeBlob adds encoded node blob into the cache.
+func (c *cache) AddNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, blob []byte, isCommitting bool) {
+ // the version part
+ v := binary.AppendUvarint((*keyBuf)[:0], uint64(ver.Major))
+ v = binary.AppendUvarint(v, uint64(ver.Minor))
+ // the full key
+ k := append(v, name...)
+ k = append(k, path...)
+ *keyBuf = k
+
+ if isCommitting {
+ _ = c.committedNodes.AdvSet(k[len(v):], len(blob)+len(v), func(val []byte) {
+ copy(val, v)
+ copy(val[len(v):], blob)
+ })
+ } else {
+ _ = c.queriedNodes.Set(k, blob)
+ }
+}
+
+// GetNodeBlob returns the cached node blob.
+func (c *cache) GetNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, peek bool) []byte {
+ // the version part
+ v := binary.AppendUvarint((*keyBuf)[:0], uint64(ver.Major))
+ v = binary.AppendUvarint(v, uint64(ver.Minor))
+ // the full key
+ k := append(v, name...)
+ k = append(k, path...)
+ *keyBuf = k
+
+ var blob []byte
+ // lookup from committing cache
+ if c.committedNodes.AdvGet(k[len(v):], func(val []byte) {
+ if bytes.Equal(k[:len(v)], val[:len(v)]) {
+ blob = append([]byte(nil), val[len(v):]...)
+ }
+ }, peek) && len(blob) > 0 {
+ if !peek {
+ c.nodeStats.Hit()
+ }
+ return blob
+ }
+
+ // fallback to querying cache
+ if c.queriedNodes.AdvGet(k, func(val []byte) {
+ blob = append([]byte(nil), val...)
+ }, peek) && len(blob) > 0 {
+ if !peek {
+ c.nodeStats.Hit()
+ }
+ return blob
+ }
+ if !peek {
+ c.nodeStats.Miss()
+ }
+ return nil
+}
+
+// AddRootNode add the root node into the cache.
+func (c *cache) AddRootNode(name string, n trie.Node) {
+ if n == nil {
+ return
+ }
+ c.roots.lock.Lock()
+ defer c.roots.lock.Unlock()
+
+ major := n.Version().Major
+ if major > c.roots.maxMajor {
+ c.roots.maxMajor = major
+ // evict old root nodes
+ for k, r := range c.roots.m {
+ if major-r.Version().Major > c.roots.ttl {
+ delete(c.roots.m, k)
+ }
+ }
+ }
+ c.roots.m[name] = n
+}
+
+// GetRootNode returns the cached root node.
+func (c *cache) GetRootNode(name string, ver trie.Version) trie.Node {
+ c.roots.lock.RLock()
+ defer c.roots.lock.RUnlock()
+
+ if r, has := c.roots.m[name]; has {
+ if r.Version() == ver {
+ if c.rootStats.Hit()%2000 == 0 {
+ c.log()
+ }
+ return r
+ }
+ }
+ c.rootStats.Miss()
+ return nil
+}
+
+type cacheStats struct {
+ hit, miss atomic.Int64
+ flag atomic.Int32
+}
+
+func (cs *cacheStats) Hit() int64 { return cs.hit.Add(1) }
+func (cs *cacheStats) Miss() int64 { return cs.miss.Add(1) }
+
+func (cs *cacheStats) Stats() (bool, int64, int64) {
+ hit := cs.hit.Load()
+ miss := cs.miss.Load()
+ lookups := hit + miss
+
+ hitRate := float64(0)
+ if lookups > 0 {
+ hitRate = float64(hit) / float64(lookups)
+ }
+ flag := int32(hitRate * 1000)
+
+ return cs.flag.Swap(flag) != flag, hit, miss
+}
+
+func logStats(msg string, hit, miss int64) {
+ lookups := hit + miss
+ var str string
+ if lookups > 0 {
+ str = fmt.Sprintf("%.3f", float64(hit)/float64(lookups))
+ } else {
+ str = "n/a"
+ }
+
+ logger.Info(msg,
+ "lookups", lookups,
+ "hitrate", str,
+ )
+}
+
+type dummyCache struct{}
+
+// AddNodeBlob is a no-op.
+func (*dummyCache) AddNodeBlob(_ *[]byte, _ string, _ []byte, _ trie.Version, _ []byte, _ bool) {}
+
+// GetNodeBlob always returns nil.
+func (*dummyCache) GetNodeBlob(_ *[]byte, _ string, _ []byte, _ trie.Version, _ bool) []byte {
+ return nil
+}
+
+// AddRootNode is a no-op.
+func (*dummyCache) AddRootNode(_ string, _ trie.Node) {}
+
+// GetRootNode always returns nil.
+func (*dummyCache) GetRootNode(_ string, _ trie.Version) trie.Node {
+ return nil
+}
diff --git a/muxdb/cache_test.go b/muxdb/cache_test.go
new file mode 100644
index 000000000..20635afc0
--- /dev/null
+++ b/muxdb/cache_test.go
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "bytes"
+ "crypto/rand"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/vechain/thor/v2/trie"
+)
+
+type mockedRootNode struct {
+ trie.Node
+ ver trie.Version
+}
+
+func (m *mockedRootNode) Version() trie.Version { return m.ver }
+
+func TestCacheRootNode(t *testing.T) {
+ cache := newCache(0, 100)
+
+ n1 := &mockedRootNode{ver: trie.Version{Major: 1, Minor: 1}}
+ cache.AddRootNode("", n1)
+ assert.Equal(t, n1, cache.GetRootNode("", n1.ver))
+
+ // minor ver not matched
+ assert.Equal(t, nil, cache.GetRootNode("", trie.Version{Major: 1}))
+}
+
+func TestCacheNodeBlob(t *testing.T) {
+ var (
+ cache = newCache(1, 0)
+ keyBuf []byte
+ blob = []byte{1, 1, 1}
+ ver = trie.Version{Major: 1, Minor: 1}
+ )
+
+ // add to committing cache
+ cache.AddNodeBlob(&keyBuf, "", nil, ver, blob, true)
+ assert.Equal(t, blob, cache.GetNodeBlob(&keyBuf, "", nil, ver, false))
+ // minor ver not matched
+ assert.Nil(t, cache.GetNodeBlob(&keyBuf, "", nil, trie.Version{Major: 1}, false))
+
+ cache = newCache(1, 0)
+
+ // add to querying cache
+ cache.AddNodeBlob(&keyBuf, "", nil, ver, blob, false)
+ assert.Equal(t, blob, cache.GetNodeBlob(&keyBuf, "", nil, ver, false))
+ // minor ver not matched
+ assert.Nil(t, cache.GetNodeBlob(&keyBuf, "", nil, trie.Version{Major: 1}, false))
+}
+
+func Benchmark_cacheNodeBlob(b *testing.B) {
+ var (
+ cache = newCache(100, 0)
+ keyBuf []byte
+ name = "n"
+ path = []byte{1, 1}
+ blob = make([]byte, 100)
+ )
+ rand.Read(blob)
+
+ for i := 0; i < b.N; i++ {
+ cache.AddNodeBlob(&keyBuf, name, path, trie.Version{}, blob, true)
+ got := cache.GetNodeBlob(&keyBuf, name, path, trie.Version{}, false)
+ if !bytes.Equal(got, blob) {
+ b.Fatalf("want %x, got %x", blob, got)
+ }
+ }
+}
+
+func Benchmark_cacheRootNode(b *testing.B) {
+ var (
+ cache = newCache(1, 0)
+ name = "n"
+ )
+
+ var tr trie.Trie
+ tr.Update([]byte{1}, []byte{2}, []byte{3})
+
+ rn := tr.RootNode()
+
+ for i := 0; i < b.N; i++ {
+ cache.AddRootNode(name, rn)
+ got := cache.GetRootNode(name, trie.Version{})
+ if got != rn {
+ b.Fatalf("want %v, got %v", rn, got)
+ }
+ }
+}
diff --git a/muxdb/internal/engine/engine.go b/muxdb/engine/engine.go
similarity index 100%
rename from muxdb/internal/engine/engine.go
rename to muxdb/engine/engine.go
diff --git a/muxdb/internal/engine/leveldb.go b/muxdb/engine/leveldb.go
similarity index 100%
rename from muxdb/internal/engine/leveldb.go
rename to muxdb/engine/leveldb.go
diff --git a/muxdb/internal/trie/cache.go b/muxdb/internal/trie/cache.go
deleted file mode 100644
index cc7bca300..000000000
--- a/muxdb/internal/trie/cache.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "encoding/binary"
- "fmt"
- "sync/atomic"
- "time"
-
- lru "github.com/hashicorp/golang-lru"
- "github.com/qianbin/directcache"
- "github.com/vechain/thor/v2/trie"
-)
-
-// Cache is the cache layer for trie.
-type Cache struct {
- // caches recently queried node blobs. Using full node key as key.
- queriedNodes *directcache.Cache
- // caches newly committed node blobs. Using node path as key.
- committedNodes *directcache.Cache
- // caches root nodes.
- roots *lru.ARCCache
- nodeStats cacheStats
- rootStats cacheStats
- lastLogTime int64
-}
-
-// NewCache creates a cache object with the given cache size.
-func NewCache(sizeMB int, rootCap int) *Cache {
- sizeBytes := sizeMB * 1024 * 1024
- var cache Cache
- cache.queriedNodes = directcache.New(sizeBytes / 4)
- cache.committedNodes = directcache.New(sizeBytes - sizeBytes/4)
- cache.roots, _ = lru.NewARC(rootCap)
- cache.lastLogTime = time.Now().UnixNano()
- return &cache
-}
-
-func (c *Cache) log() {
- now := time.Now().UnixNano()
- last := atomic.SwapInt64(&c.lastLogTime, now)
-
- if now-last > int64(time.Second*20) {
- log1, ok1 := c.nodeStats.ShouldLog("node cache stats")
- log2, ok2 := c.rootStats.ShouldLog("root cache stats")
-
- if ok1 || ok2 {
- log1()
- log2()
- }
- } else {
- atomic.CompareAndSwapInt64(&c.lastLogTime, now, last)
- }
-}
-
-// AddNodeBlob adds node blob into the cache.
-func (c *Cache) AddNodeBlob(name string, seq sequence, path []byte, blob []byte, isCommitting bool) {
- if c == nil {
- return
- }
- cNum, dNum := seq.CommitNum(), seq.DistinctNum()
- k := bufferPool.Get().(*buffer)
- defer bufferPool.Put(k)
-
- k.buf = append(k.buf[:0], name...)
- k.buf = append(k.buf, path...)
- k.buf = appendUint32(k.buf, dNum)
-
- if isCommitting {
- // committing cache key: name + path + distinctNum
-
- // concat commit number with blob as cache value
- _ = c.committedNodes.AdvSet(k.buf, 4+len(blob), func(val []byte) {
- binary.BigEndian.PutUint32(val, cNum)
- copy(val[4:], blob)
- })
- } else {
- // querying cache key: name + path + distinctNum + commitNum
- k.buf = appendUint32(k.buf, cNum)
- _ = c.queriedNodes.Set(k.buf, blob)
- }
-}
-
-// GetNodeBlob returns the cached node blob.
-func (c *Cache) GetNodeBlob(name string, seq sequence, path []byte, peek bool, dst []byte) []byte {
- if c == nil {
- return nil
- }
-
- cNum, dNum := seq.CommitNum(), seq.DistinctNum()
- lookupQueried := c.queriedNodes.AdvGet
- lookupCommitted := c.committedNodes.AdvGet
-
- k := bufferPool.Get().(*buffer)
- defer bufferPool.Put(k)
-
- k.buf = append(k.buf[:0], name...)
- k.buf = append(k.buf, path...)
- k.buf = appendUint32(k.buf, dNum)
-
- // lookup from committing cache
- var blob []byte
- if lookupCommitted(k.buf, func(b []byte) {
- if binary.BigEndian.Uint32(b) == cNum {
- blob = append(dst, b[4:]...)
- }
- }, peek) && len(blob) > 0 {
- if !peek {
- c.nodeStats.Hit()
- }
- return blob
- }
-
- // fallback to querying cache
- k.buf = appendUint32(k.buf, cNum)
- if lookupQueried(k.buf, func(b []byte) {
- blob = append(dst, b...)
- }, peek); len(blob) > 0 {
- if !peek {
- c.nodeStats.Hit()
- }
- return blob
- }
- if !peek {
- c.nodeStats.Miss()
- }
- return nil
-}
-
-// AddRootNode add the root node into the cache.
-func (c *Cache) AddRootNode(name string, n trie.Node) bool {
- if c == nil {
- return false
- }
- if n.Dirty() {
- return false
- }
- var sub *lru.Cache
- if q, has := c.roots.Get(name); has {
- sub = q.(*lru.Cache)
- } else {
- sub, _ = lru.New(4)
- c.roots.Add(name, sub)
- }
- sub.Add(n.SeqNum(), n)
- return true
-}
-
-// GetRootNode returns the cached root node.
-func (c *Cache) GetRootNode(name string, seq uint64, peek bool) (trie.Node, bool) {
- if c == nil {
- return trie.Node{}, false
- }
-
- getByName := c.roots.Get
- if peek {
- getByName = c.roots.Peek
- }
-
- if sub, has := getByName(name); has {
- getByKey := sub.(*lru.Cache).Get
- if peek {
- getByKey = sub.(*lru.Cache).Peek
- }
- if cached, has := getByKey(seq); has {
- if !peek {
- if c.rootStats.Hit()%2000 == 0 {
- c.log()
- }
- }
- return cached.(trie.Node), true
- }
- }
- if !peek {
- c.rootStats.Miss()
- }
- return trie.Node{}, false
-}
-
-type cacheStats struct {
- hit, miss int64
- flag int32
-}
-
-func (cs *cacheStats) Hit() int64 { return atomic.AddInt64(&cs.hit, 1) }
-func (cs *cacheStats) Miss() int64 { return atomic.AddInt64(&cs.miss, 1) }
-
-func (cs *cacheStats) ShouldLog(msg string) (func(), bool) {
- hit := atomic.LoadInt64(&cs.hit)
- miss := atomic.LoadInt64(&cs.miss)
- lookups := hit + miss
-
- hitrate := float64(hit) / float64(lookups)
- flag := int32(hitrate * 1000)
- return func() {
- var str string
- if lookups > 0 {
- str = fmt.Sprintf("%.3f", hitrate)
- } else {
- str = "n/a"
- }
-
- logger.Info(msg,
- "lookups", lookups,
- "hitrate", str,
- )
- atomic.StoreInt32(&cs.flag, flag)
- }, atomic.LoadInt32(&cs.flag) != flag
-}
diff --git a/muxdb/internal/trie/leaf_bank.go b/muxdb/internal/trie/leaf_bank.go
deleted file mode 100644
index f088a1eb4..000000000
--- a/muxdb/internal/trie/leaf_bank.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "encoding/binary"
- "sync/atomic"
-
- "github.com/ethereum/go-ethereum/rlp"
- lru "github.com/hashicorp/golang-lru"
- "github.com/pkg/errors"
- "github.com/vechain/thor/v2/kv"
- "github.com/vechain/thor/v2/trie"
-)
-
-const (
- entityPrefix = "e"
- deletionJournalPrefix = "d"
-
- slotCacheSize = 64
-)
-
-// LeafRecord presents the queried leaf record.
-type LeafRecord struct {
- *trie.Leaf
- CommitNum uint32 // which commit number the leaf was committed
- SlotCommitNum uint32 // up to which commit number this leaf is valid
-}
-
-// leafEntity is the entity stored in leaf bank.
-type leafEntity struct {
- *trie.Leaf `rlp:"nil"`
- CommitNum uint32
-}
-
-var encodedEmptyLeafEntity, _ = rlp.EncodeToBytes(&leafEntity{})
-
-// trieSlot holds the state of a trie slot.
-type trieSlot struct {
- getter kv.Getter
- commitNum uint32 // the commit number of this slot
- cache *lru.Cache
-}
-
-func (s *trieSlot) getEntity(key []byte) (*leafEntity, error) {
- data, err := s.getter.Get(key)
- if err != nil {
- if !s.getter.IsNotFound(err) {
- return nil, errors.Wrap(err, "get entity from leafbank")
- }
- // never seen, which means it has been an empty leaf until slotCommitNum.
- return nil, nil
- }
-
- // entity found
- var ent leafEntity
- if err := rlp.DecodeBytes(data, &ent); err != nil {
- return nil, errors.Wrap(err, "decode leaf entity")
- }
-
- if ent.Leaf != nil && len(ent.Leaf.Meta) == 0 {
- ent.Meta = nil // normalize
- }
- return &ent, nil
-}
-
-func (s *trieSlot) getRecord(key []byte) (rec *LeafRecord, err error) {
- slotCommitNum := atomic.LoadUint32(&s.commitNum)
- if slotCommitNum == 0 {
- // an empty slot always gives undetermined value.
- return &LeafRecord{}, nil
- }
-
- strKey := string(key)
- if cached, ok := s.cache.Get(strKey); ok {
- return cached.(*LeafRecord), nil
- }
-
- defer func() {
- if err == nil {
- s.cache.Add(strKey, rec)
- }
- }()
-
- ent, err := s.getEntity(key)
- if err != nil {
- return nil, err
- }
-
- if ent == nil { // never seen
- return &LeafRecord{
- Leaf: &trie.Leaf{},
- CommitNum: 0,
- SlotCommitNum: slotCommitNum,
- }, nil
- }
-
- if slotCommitNum < ent.CommitNum {
- slotCommitNum = ent.CommitNum
- }
-
- return &LeafRecord{
- Leaf: ent.Leaf,
- CommitNum: ent.CommitNum,
- SlotCommitNum: slotCommitNum,
- }, nil
-}
-
-// LeafBank records accumulated trie leaves to help accelerate trie leaf access
-// according to VIP-212.
-type LeafBank struct {
- store kv.Store
- space byte
- slots *lru.ARCCache
-}
-
-// NewLeafBank creates a new LeafBank instance.
-// The slotCap indicates the capacity of cached per-trie slots.
-func NewLeafBank(store kv.Store, space byte, slotCap int) *LeafBank {
- b := &LeafBank{store: store, space: space}
- b.slots, _ = lru.NewARC(slotCap)
- return b
-}
-
-func (b *LeafBank) slotBucket(name string) kv.Bucket {
- return kv.Bucket(string(b.space) + entityPrefix + name)
-}
-
-func (b *LeafBank) deletionJournalBucket(name string) kv.Bucket {
- return kv.Bucket(string(b.space) + deletionJournalPrefix + name)
-}
-
-// getSlot gets slot from slots cache or create a new one.
-func (b *LeafBank) getSlot(name string) (*trieSlot, error) {
- if cached, ok := b.slots.Get(name); ok {
- return cached.(*trieSlot), nil
- }
-
- slot := &trieSlot{getter: b.slotBucket(name).NewGetter(b.store)}
- if data, err := slot.getter.Get(nil); err != nil {
- if !slot.getter.IsNotFound(err) {
- return nil, errors.Wrap(err, "get slot from leafbank")
- }
- } else {
- slot.commitNum = binary.BigEndian.Uint32(data)
- }
-
- slot.cache, _ = lru.New(slotCacheSize)
- b.slots.Add(name, slot)
- return slot, nil
-}
-
-// Lookup lookups a leaf record by the given leafKey for the trie named by name.
-// LeafRecord.Leaf might be nil if the leaf can't be determined.
-func (b *LeafBank) Lookup(name string, leafKey []byte) (rec *LeafRecord, err error) {
- slot, err := b.getSlot(name)
- if err != nil {
- return nil, err
- }
- return slot.getRecord(leafKey)
-}
-
-// LogDeletions saves the journal of leaf-key deletions which issued by one trie-commit.
-func (b *LeafBank) LogDeletions(putter kv.Putter, name string, keys []string, commitNum uint32) error {
- if len(keys) == 0 {
- return nil
- }
-
- bkt := b.deletionJournalBucket(name) + kv.Bucket(appendUint32(nil, commitNum))
- putter = bkt.NewPutter(putter)
- for _, k := range keys {
- if err := putter.Put([]byte(k), nil); err != nil {
- return err
- }
- }
- return nil
-}
-
-// NewUpdater creates a leaf-updater for a trie slot with the given name.
-func (b *LeafBank) NewUpdater(name string, baseCommitNum, targetCommitNum uint32) (*LeafUpdater, error) {
- slot, err := b.getSlot(name)
- if err != nil {
- return nil, err
- }
-
- bulk := b.slotBucket(name).
- NewStore(b.store).
- Bulk()
- bulk.EnableAutoFlush()
-
- // traverse the deletion-journal and write to the slot
- iter := b.deletionJournalBucket(name).
- NewStore(b.store).
- Iterate(kv.Range{
- Start: appendUint32(nil, baseCommitNum),
- Limit: appendUint32(nil, targetCommitNum+1),
- })
- defer iter.Release()
- for iter.Next() {
- // skip commit number to get leaf key
- leafKey := iter.Key()[4:]
- // put empty value to mark the leaf to undetermined state
- if err := bulk.Put(leafKey, encodedEmptyLeafEntity); err != nil {
- return nil, err
- }
- }
- if err := iter.Error(); err != nil {
- return nil, err
- }
-
- return &LeafUpdater{
- slot: slot,
- bulk: bulk,
- targetCommitNum: targetCommitNum,
- }, nil
-}
-
-// LeafUpdater helps to record trie leaves.
-type LeafUpdater struct {
- slot *trieSlot
- bulk kv.Bulk
- targetCommitNum uint32
-}
-
-// Update updates the leaf for the given key.
-func (u *LeafUpdater) Update(leafKey []byte, leaf *trie.Leaf, leafCommitNum uint32) error {
- ent := &leafEntity{
- Leaf: leaf,
- CommitNum: leafCommitNum,
- }
- data, err := rlp.EncodeToBytes(ent)
- if err != nil {
- return err
- }
-
- return u.bulk.Put(leafKey, data)
-}
-
-// Commit commits updates into leafbank.
-func (u *LeafUpdater) Commit() error {
- // save slot commit number
- if err := u.bulk.Put(nil, appendUint32(nil, u.targetCommitNum)); err != nil {
- return err
- }
- if err := u.bulk.Write(); err != nil {
- return err
- }
- atomic.StoreUint32(&u.slot.commitNum, u.targetCommitNum)
- return nil
-}
diff --git a/muxdb/internal/trie/leaf_bank_test.go b/muxdb/internal/trie/leaf_bank_test.go
deleted file mode 100644
index a3b8ebde6..000000000
--- a/muxdb/internal/trie/leaf_bank_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "strconv"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/trie"
-)
-
-func TestLeafbank(t *testing.T) {
- engine := newEngine()
- space := byte(2)
- slotCap := 10
- lb := NewLeafBank(engine, space, slotCap)
- name := "the trie"
-
- t.Run("empty state", func(t *testing.T) {
- key := []byte("key")
- rec, err := lb.Lookup(name, key)
- assert.NoError(t, err)
- assert.Equal(t, &LeafRecord{}, rec)
- })
-
- t.Run("update and lookup", func(t *testing.T) {
- u, err := lb.NewUpdater(name, 0, 100)
- assert.Nil(t, err)
- for i := 0; i < 10; i++ {
- if err := u.Update([]byte(strconv.Itoa(i)), &trie.Leaf{Value: []byte(strconv.Itoa(i))}, 10); err != nil {
- t.Fatal(err)
- }
- }
- if err := u.Commit(); err != nil {
- t.Fatal(err)
- }
-
- for i := 0; i < 10; i++ {
- rec, err := lb.Lookup(name, []byte(strconv.Itoa(i)))
- assert.NoError(t, err)
- assert.Equal(t, &LeafRecord{
- Leaf: &trie.Leaf{Value: []byte(strconv.Itoa(i))},
- CommitNum: 10,
- SlotCommitNum: 100,
- }, rec)
- }
- })
-
- t.Run("lookup never seen", func(t *testing.T) {
- rec, err := lb.Lookup(name, []byte(strconv.Itoa(11)))
- assert.NoError(t, err)
-
- assert.Equal(t, &LeafRecord{Leaf: &trie.Leaf{}, SlotCommitNum: 100}, rec)
- })
-
- t.Run("lookup deleted", func(t *testing.T) {
- // mark
- err := lb.LogDeletions(engine, name, []string{strconv.Itoa(1)}, 101)
- assert.Nil(t, err)
-
- u, err := lb.NewUpdater(name, 100, 101)
- assert.Nil(t, err)
-
- err = u.Commit()
- assert.Nil(t, err)
-
- // recreate to drop cache
- lb = NewLeafBank(engine, space, slotCap)
-
- rec, err := lb.Lookup(name, []byte(strconv.Itoa(1)))
- assert.NoError(t, err)
- assert.Equal(t, &LeafRecord{SlotCommitNum: 101}, rec)
- })
-}
diff --git a/muxdb/internal/trie/trie.go b/muxdb/internal/trie/trie.go
deleted file mode 100644
index af58fc78f..000000000
--- a/muxdb/internal/trie/trie.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "context"
-
- "github.com/pkg/errors"
- "github.com/vechain/thor/v2/kv"
- "github.com/vechain/thor/v2/log"
- "github.com/vechain/thor/v2/thor"
- "github.com/vechain/thor/v2/trie"
-)
-
-var logger = log.WithContext("pkg", "muxdb.trie")
-
-// Backend is the backend of the trie.
-type Backend struct {
- Store kv.Store
- Cache *Cache
- LeafBank *LeafBank
- HistSpace,
- DedupedSpace byte
- HistPtnFactor,
- DedupedPtnFactor uint32
- CachedNodeTTL uint16
-}
-
-// sequence helps convert sequence number from/to commitNum & distinctNum.
-type sequence uint64
-
-func makeSequence(commitNum, distinctNum uint32) sequence {
- return sequence(commitNum) | (sequence(distinctNum) << 32)
-}
-
-func (s sequence) CommitNum() uint32 { return uint32(s) }
-func (s sequence) DistinctNum() uint32 { return uint32(s >> 32) }
-
-// Trie is the managed trie.
-type Trie struct {
- back *Backend
- name string
- ext *trie.ExtendedTrie
-
- dirty bool
- deletions []string
- noFillCache bool
- fastLeafGet func(nodeCommitNum uint32) (*trie.Leaf, error)
-}
-
-// New creates a managed trie.
-func New(
- back *Backend,
- name string,
- root thor.Bytes32,
- commitNum uint32,
- distinctNum uint32,
- nonCrypto bool,
-) *Trie {
- t := &Trie{
- back: back,
- name: name,
- }
-
- seq := makeSequence(commitNum, distinctNum)
- if rootNode, ok := back.Cache.GetRootNode(name, uint64(seq), false); ok {
- t.ext = trie.NewExtendedCached(rootNode, t.newDatabase(), nonCrypto)
- } else {
- t.ext = trie.NewExtended(root, uint64(seq), t.newDatabase(), nonCrypto)
- }
- t.ext.SetCacheTTL(t.back.CachedNodeTTL)
- return t
-}
-
-// Name returns the trie name.
-func (t *Trie) Name() string {
- return t.name
-}
-
-func (t *Trie) makeHistNodeKey(dst []byte, seq sequence, path []byte) []byte {
- commitNum, distinctNum := seq.CommitNum(), seq.DistinctNum()
- dst = append(dst, t.back.HistSpace) // space
- dst = appendUint32(dst, commitNum/t.back.HistPtnFactor) // partition id
- dst = append(dst, t.name...) // trie name
- dst = encodePath(dst, path) // path
- dst = appendUint32(dst, commitNum%t.back.HistPtnFactor) // commit num mod
- dst = appendUint32(dst, distinctNum) // distinct num
- return dst
-}
-
-func (t *Trie) makeDedupedNodeKey(dst []byte, seq sequence, path []byte) []byte {
- commitNum := seq.CommitNum()
- dst = append(dst, t.back.DedupedSpace) // space
- dst = appendUint32(dst, commitNum/t.back.DedupedPtnFactor) // partition id
- dst = append(dst, t.name...) // trie name
- dst = encodePath(dst, path) // path
- return dst
-}
-
-// newDatabase creates a database instance for low-level trie construction.
-func (t *Trie) newDatabase() trie.Database {
- var (
- thisHash []byte
- thisSeq sequence
- thisPath []byte
- keyBuf []byte
- )
-
- return &struct {
- trie.DatabaseReaderTo
- trie.DatabaseKeyEncoder
- trie.DatabaseReader
- trie.DatabaseWriter
- }{
- databaseGetToFunc(func(_ []byte, dst []byte) (blob []byte, err error) {
- // get from cache
- if blob = t.back.Cache.GetNodeBlob(t.name, thisSeq, thisPath, t.noFillCache, dst); len(blob) > 0 {
- return
- }
- defer func() {
- if err == nil && !t.noFillCache {
- t.back.Cache.AddNodeBlob(t.name, thisSeq, thisPath, blob, false)
- }
- }()
-
- // if cache missed, try fast leaf get
- if t.fastLeafGet != nil {
- if leaf, err := t.fastLeafGet(thisSeq.CommitNum()); err != nil {
- return nil, err
- } else if leaf != nil {
- // good, leaf got. returns a special error to short-circuit further node lookups.
- return nil, &leafAvailable{leaf}
- }
- }
-
- defer func() {
- if err == nil && !t.ext.IsNonCrypto() {
- // to ensure the node is correct, we need to verify the node hash.
- // TODO: later can skip this step
- if ok, err1 := trie.VerifyNodeHash(blob[len(dst):], thisHash); err1 != nil {
- err = errors.Wrap(err1, "verify node hash")
- } else if !ok {
- err = errors.New("node hash checksum error")
- }
- }
- }()
-
- // query in db
- snapshot := t.back.Store.Snapshot()
- defer snapshot.Release()
-
- // get from hist space first
- keyBuf = t.makeHistNodeKey(keyBuf[:0], thisSeq, thisPath)
- if val, err := snapshot.Get(keyBuf); err == nil {
- // found
- return append(dst, val...), nil
- } else if !snapshot.IsNotFound(err) {
- // error
- if !snapshot.IsNotFound(err) {
- return nil, err
- }
- }
-
- // then from deduped space
- keyBuf = t.makeDedupedNodeKey(keyBuf[:0], thisSeq, thisPath)
- if val, err := snapshot.Get(keyBuf); err == nil {
- return append(dst, val...), nil
- }
- return nil, err
- }),
- databaseKeyEncodeFunc(func(hash []byte, seq uint64, path []byte) []byte {
- thisHash = hash
- thisSeq = sequence(seq)
- thisPath = path
- return nil
- }),
- nil,
- nil,
- }
-}
-
-// Copy make a copy of this trie.
-func (t *Trie) Copy() *Trie {
- cpy := *t
- cpy.ext = trie.NewExtendedCached(t.ext.RootNode(), cpy.newDatabase(), t.ext.IsNonCrypto())
- cpy.ext.SetCacheTTL(cpy.back.CachedNodeTTL)
- cpy.fastLeafGet = nil
-
- if len(t.deletions) > 0 {
- cpy.deletions = append([]string(nil), t.deletions...)
- } else {
- cpy.deletions = nil
- }
- return &cpy
-}
-
-// Get returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-func (t *Trie) Get(key []byte) ([]byte, []byte, error) {
- return t.ext.Get(key)
-}
-
-// FastGet uses a fast way to query the value for key stored in the trie.
-// See VIP-212 for detail.
-func (t *Trie) FastGet(key []byte, steadyCommitNum uint32) ([]byte, []byte, error) {
- if t.back.LeafBank == nil {
- return t.ext.Get(key)
- }
-
- // setup fast leaf getter
- var leafRec *LeafRecord
- t.fastLeafGet = func(nodeCommitNum uint32) (*trie.Leaf, error) {
- // short circuit if the node is too new
- if nodeCommitNum > steadyCommitNum {
- return nil, nil
- }
- if leafRec == nil {
- var err error
- if leafRec, err = t.back.LeafBank.Lookup(t.name, key); err != nil {
- return nil, err
- }
- }
-
- // can't be determined
- if leafRec.Leaf == nil {
- return nil, nil
- }
-
- // if [nodeCN, steadyCN] and [leafCN, slotCN] have intersection,
- // the leaf will be the correct one.
- if nodeCommitNum <= leafRec.SlotCommitNum && leafRec.CommitNum <= steadyCommitNum {
- return leafRec.Leaf, nil
- }
- return nil, nil
- }
- defer func() { t.fastLeafGet = nil }()
-
- val, meta, err := t.ext.Get(key)
- if err != nil {
- if miss, ok := err.(*trie.MissingNodeError); ok {
- if la, ok := miss.Err.(*leafAvailable); ok {
- return la.Value, la.Meta, nil
- }
- }
- return nil, nil, err
- }
- return val, meta, nil
-}
-
-// Update associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
-func (t *Trie) Update(key, val, meta []byte) error {
- t.dirty = true
- if len(val) == 0 { // deletion
- if t.back.LeafBank != nil {
- t.deletions = append(t.deletions, string(key))
- }
- }
- return t.ext.Update(key, val, meta)
-}
-
-// Stage processes trie updates and calculates the new root hash.
-func (t *Trie) Stage(newCommitNum, newDistinctNum uint32) (root thor.Bytes32, commit func() error) {
- var (
- thisPath []byte
- bulk = t.back.Store.Bulk()
- buf []byte
- )
-
- // make a copy of the original trie to perform commit.
- // so later if real commit is discarded, the original trie will be in
- // correct state.
- extCpy := *t.ext
- newSeq := makeSequence(newCommitNum, newDistinctNum)
-
- db := &struct {
- trie.DatabaseWriter
- trie.DatabaseKeyEncoder
- }{
- kv.PutFunc(func(_, blob []byte) error {
- buf = t.makeHistNodeKey(buf[:0], newSeq, thisPath)
- if err := bulk.Put(buf, blob); err != nil {
- return err
- }
- if !t.noFillCache {
- t.back.Cache.AddNodeBlob(t.name, newSeq, thisPath, blob, true)
- }
- return nil
- }),
- databaseKeyEncodeFunc(func(_ []byte, _ uint64, path []byte) []byte {
- thisPath = path
- return nil
- }),
- }
-
- // commit the copied trie without flush to db
- root, err := extCpy.CommitTo(db, uint64(newSeq))
- if err != nil {
- return root, func() error { return err }
- }
-
- commit = func() error {
- if t.back.LeafBank != nil {
- if err := t.back.LeafBank.LogDeletions(bulk, t.name, t.deletions, newCommitNum); err != nil {
- return err
- }
- }
- // real-commit, flush to db
- if err := bulk.Write(); err != nil {
- return err
- }
-
- t.dirty = false
- t.deletions = t.deletions[:0]
-
- // replace with the new root node after the copied trie committed
- newRootNode := extCpy.RootNode()
- t.ext.SetRootNode(newRootNode)
- if !t.noFillCache {
- t.back.Cache.AddRootNode(t.name, newRootNode)
- }
- return nil
- }
- return
-}
-
-// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
-// the key after the given start key
-func (t *Trie) NodeIterator(start []byte, baseCommitNum uint32) trie.NodeIterator {
- return t.ext.NodeIterator(start, func(seq uint64) bool {
- return sequence(seq).CommitNum() >= baseCommitNum
- })
-}
-
-// SetNoFillCache enable or disable cache filling.
-func (t *Trie) SetNoFillCache(b bool) {
- t.noFillCache = b
-}
-
-// DumpLeaves dumps leaves in the range of [baseCommitNum, targetCommitNum] into leaf bank.
-// transform transforms leaves before passing into leaf bank.
-func (t *Trie) DumpLeaves(ctx context.Context, baseCommitNum, targetCommitNum uint32, transform func(*trie.Leaf) *trie.Leaf) error {
- if t.dirty {
- return errors.New("dirty trie")
- }
- if t.back.LeafBank == nil {
- return nil
- }
-
- leafUpdater, err := t.back.LeafBank.NewUpdater(t.name, baseCommitNum, targetCommitNum)
- if err != nil {
- return err
- }
- var (
- checkContext = newContextChecker(ctx, 5000)
- iter = t.NodeIterator(nil, baseCommitNum)
- )
-
- for iter.Next(true) {
- if err := checkContext(); err != nil {
- return err
- }
-
- if leaf := iter.Leaf(); leaf != nil {
- seq := sequence(iter.SeqNum())
- if err := leafUpdater.Update(iter.LeafKey(), transform(leaf), seq.CommitNum()); err != nil {
- return err
- }
- }
- }
- if err := iter.Error(); err != nil {
- return err
- }
- return leafUpdater.Commit()
-}
-
-// DumpNodes dumps referenced nodes committed within [baseCommitNum, thisCommitNum], into the deduped space.
-func (t *Trie) DumpNodes(ctx context.Context, baseCommitNum uint32, handleLeaf func(*trie.Leaf)) error {
- if t.dirty {
- return errors.New("dirty trie")
- }
- var (
- checkContext = newContextChecker(ctx, 5000)
- bulk = t.back.Store.Bulk()
- iter = t.NodeIterator(nil, baseCommitNum)
- buf []byte
- )
- bulk.EnableAutoFlush()
-
- for iter.Next(true) {
- if err := checkContext(); err != nil {
- return err
- }
-
- if err := iter.Node(func(blob []byte) error {
- buf = t.makeDedupedNodeKey(buf[:0], sequence(iter.SeqNum()), iter.Path())
- return bulk.Put(buf, blob)
- }); err != nil {
- return err
- }
- if handleLeaf != nil {
- if leaf := iter.Leaf(); leaf != nil {
- handleLeaf(leaf)
- }
- }
- }
- if err := iter.Error(); err != nil {
- return err
- }
- return bulk.Write()
-}
-
-// CleanHistory cleans history nodes within [startCommitNum, limitCommitNum).
-func CleanHistory(ctx context.Context, back *Backend, startCommitNum, limitCommitNum uint32) error {
- startPtn := startCommitNum / back.HistPtnFactor
- limitPtn := limitCommitNum / back.HistPtnFactor
- // preserve ptn 0 to make genesis state always visitable
- if startPtn == 0 {
- startPtn = 1
- }
-
- return back.Store.DeleteRange(ctx, kv.Range{
- Start: appendUint32([]byte{back.HistSpace}, startPtn),
- Limit: appendUint32([]byte{back.HistSpace}, limitPtn),
- })
-}
-
-// individual functions of trie database interface.
-type (
- databaseKeyEncodeFunc func(hash []byte, seq uint64, path []byte) []byte
- databaseGetToFunc func(key, dst []byte) ([]byte, error)
-)
-
-func (f databaseKeyEncodeFunc) Encode(hash []byte, seq uint64, path []byte) []byte {
- return f(hash, seq, path)
-}
-
-func (f databaseGetToFunc) GetTo(key, dst []byte) ([]byte, error) {
- return f(key, dst)
-}
-
-// leafAvailable is a special error type to short circuit trie get method.
-type leafAvailable struct {
- *trie.Leaf
-}
-
-func (*leafAvailable) Error() string {
- return "leaf available"
-}
diff --git a/muxdb/internal/trie/trie_test.go b/muxdb/internal/trie/trie_test.go
deleted file mode 100644
index d8ce9077c..000000000
--- a/muxdb/internal/trie/trie_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "context"
- "strconv"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/storage"
- "github.com/vechain/thor/v2/muxdb/internal/engine"
- "github.com/vechain/thor/v2/thor"
- "github.com/vechain/thor/v2/trie"
-)
-
-func newEngine() engine.Engine {
- db, _ := leveldb.Open(storage.NewMemStorage(), nil)
- return engine.NewLevelEngine(db)
-}
-
-func newBackend() *Backend {
- engine := newEngine()
- return &Backend{
- Store: engine,
- Cache: nil,
- LeafBank: NewLeafBank(engine, 2, 100),
- HistSpace: 0,
- DedupedSpace: 1,
- HistPtnFactor: 1,
- DedupedPtnFactor: 1,
- CachedNodeTTL: 100,
- }
-}
-
-func TestTrie(t *testing.T) {
- name := "the trie"
-
- t.Run("basic", func(t *testing.T) {
- back := newBackend()
- tr := New(back, name, thor.Bytes32{}, 0, 0, false)
- assert.Equal(t, name, tr.Name())
-
- assert.False(t, tr.dirty)
-
- key := []byte("key")
- val := []byte("value")
- tr.Update(key, val, nil)
- assert.True(t, tr.dirty)
-
- _val, _, _ := tr.Get(key)
- assert.Equal(t, val, _val)
- })
-
- t.Run("hash root", func(t *testing.T) {
- back := newBackend()
- tr := New(back, name, thor.Bytes32{}, 0, 0, false)
-
- _tr := new(trie.Trie)
-
- for i := 0; i < 100; i++ {
- for j := 0; j < 100; j++ {
- key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j))
- val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i))
- tr.Update(key, val, nil)
- _tr.Update(key, val)
- }
- h, _ := tr.Stage(0, 0)
- assert.Equal(t, _tr.Hash(), h)
- }
- })
-
- t.Run("fast get", func(t *testing.T) {
- back := newBackend()
- tr := New(back, name, thor.Bytes32{}, 0, 0, false)
-
- var roots []thor.Bytes32
- for i := 0; i < 100; i++ {
- for j := 0; j < 100; j++ {
- key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j))
- val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i))
- tr.Update(key, val, nil)
- }
- root, commit := tr.Stage(uint32(i), 0)
- if err := commit(); err != nil {
- t.Fatal(err)
- }
-
- roots = append(roots, root)
- }
-
- tr = New(back, name, roots[10], 10, 0, false)
-
- if err := tr.DumpLeaves(context.Background(), 0, 10, func(l *trie.Leaf) *trie.Leaf {
- return &trie.Leaf{
- Value: l.Value,
- Meta: []byte("from lb"),
- }
- }); err != nil {
- t.Fatal(err)
- }
-
- for i := 0; i < 10; i++ {
- for j := 0; j < 100; j++ {
- key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j))
- val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i))
-
- _val, _meta, err := tr.FastGet(key, 10)
- if err != nil {
- t.Fatal(err)
- }
-
- assert.Equal(t, val, _val)
- assert.Equal(t, []byte("from lb"), _meta)
- }
- }
- })
-}
diff --git a/muxdb/internal/trie/util.go b/muxdb/internal/trie/util.go
deleted file mode 100644
index 6f4f344af..000000000
--- a/muxdb/internal/trie/util.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "context"
- "fmt"
- "math"
- "sync"
-)
-
-// encodePath encodes the path into compact form.
-func encodePath(dst []byte, path []byte) []byte {
- d := len(path)
- s := d / 4
- if s > math.MaxUint8 {
- panic(fmt.Errorf("unexpected length of path: %v", d))
- }
- // the prefix s is to split the trie into sub tries with depth 4.
- dst = append(dst, byte(s))
-
- // further on, a sub trie is divided to depth-2 sub tries.
- for i := 0; ; i += 4 {
- switch d - i {
- case 0:
- return append(dst, 0)
- case 1:
- return append(dst, (path[i]<<3)|1)
- case 2:
- t := (uint16(path[i]) << 4) | uint16(path[i+1])
- return appendUint16(dst, 0x8000|(t<<7))
- case 3:
- t := (uint16(path[i]) << 8) | (uint16(path[i+1]) << 4) | uint16(path[i+2])
- return appendUint16(dst, 0x8000|(t<<3)|1)
- default:
- dst = append(dst, (path[i]<<4)|path[i+1], (path[i+2]<<4)|path[i+3])
- }
- }
-}
-
-func appendUint32(b []byte, v uint32) []byte {
- return append(b,
- byte(v>>24),
- byte(v>>16),
- byte(v>>8),
- byte(v),
- )
-}
-
-func appendUint16(b []byte, v uint16) []byte {
- return append(b,
- byte(v>>8),
- byte(v),
- )
-}
-
-// newContextChecker creates a debounced context checker.
-func newContextChecker(ctx context.Context, debounce int) func() error {
- count := 0
- return func() error {
- count++
- if count > debounce {
- count = 0
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- }
- return nil
- }
-}
-
-type buffer struct {
- buf []byte
-}
-
-var bufferPool = sync.Pool{
- New: func() interface{} {
- return &buffer{}
- },
-}
diff --git a/muxdb/internal/trie/util_test.go b/muxdb/internal/trie/util_test.go
deleted file mode 100644
index f6a8c14fb..000000000
--- a/muxdb/internal/trie/util_test.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "reflect"
- "testing"
-)
-
-func Test_encodePath(t *testing.T) {
- tests := []struct {
- path []byte
- want []byte
- }{
- {[]byte{}, []byte{0, 0}},
- {[]byte{8}, []byte{0, (8 << 3) | 1}},
- {[]byte{8, 9}, []byte{0, 0x80 | (8 << 3) | (9 >> 1), 0x80}},
- {[]byte{8, 9, 0xa}, []byte{0, 0xc4, 0x80 | (0xa << 3) | 1}},
- {[]byte{8, 9, 0xa, 0xb}, []byte{1, 0x89, 0xab, 0}},
- {[]byte{8, 9, 0xa, 0xb, 0xc}, []byte{1, 0x89, 0xab, (0xc << 3) | 1}},
- {[]byte{8, 9, 0xa, 0xb, 0xc, 0xd}, []byte{1, 0x89, 0xab, 0x80 | (0xc << 3) | (0xd >> 1), 0x80}},
- {[]byte{8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}, []byte{1, 0x89, 0xab, 0x80 | (0xc << 3) | (0xd >> 1), 0x80 | (0xe << 3) | 1}},
- }
- for _, tt := range tests {
- if got := encodePath(nil, tt.path); !reflect.DeepEqual(got, tt.want) {
- t.Errorf("encodePath() = %v, want %v", got, tt.want)
- }
- }
-}
diff --git a/muxdb/metrics.go b/muxdb/metrics.go
new file mode 100644
index 000000000..ec0ad0707
--- /dev/null
+++ b/muxdb/metrics.go
@@ -0,0 +1,7 @@
+package muxdb
+
+import (
+ "github.com/vechain/thor/v2/metrics"
+)
+
+var metricCacheHitMissGaugeVec = metrics.LazyLoadGaugeVec("cache_hit_miss_count", []string{"type", "event"})
diff --git a/muxdb/muxdb.go b/muxdb/muxdb.go
index 1b0da3971..a0f00ae2c 100644
--- a/muxdb/muxdb.go
+++ b/muxdb/muxdb.go
@@ -17,16 +17,15 @@ import (
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/vechain/thor/v2/kv"
- "github.com/vechain/thor/v2/muxdb/internal/engine"
- "github.com/vechain/thor/v2/muxdb/internal/trie"
- "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/log"
+ "github.com/vechain/thor/v2/muxdb/engine"
+ "github.com/vechain/thor/v2/trie"
)
const (
- trieHistSpace = byte(0) // the key space for historical trie nodes.
- trieDedupedSpace = byte(1) // the key space for deduped trie nodes.
- trieLeafBankSpace = byte(2) // the key space for the trie leaf bank.
- namedStoreSpace = byte(3) // the key space for named store.
+ trieHistSpace = byte(0) // the key space for historical trie nodes.
+ trieDedupedSpace = byte(1) // the key space for deduped trie nodes.
+ namedStoreSpace = byte(2) // the key space for named store.
)
const (
@@ -34,19 +33,14 @@ const (
configKey = "config"
)
-// Trie is the managed trie.
-type Trie = trie.Trie
+var logger = log.WithContext("pkg", "muxdb")
// Options optional parameters for MuxDB.
type Options struct {
// TrieNodeCacheSizeMB is the size of the cache for trie node blobs.
TrieNodeCacheSizeMB int
- // TrieRootCacheCapacity is the capacity of the cache for trie root nodes.
- TrieRootCacheCapacity int
// TrieCachedNodeTTL defines the life time(times of commit) of cached trie nodes.
TrieCachedNodeTTL uint16
- // TrieLeafBankSlotCapacity defines max count of cached slot for leaf bank.
- TrieLeafBankSlotCapacity int
// TrieHistPartitionFactor is the partition factor for historical trie nodes.
TrieHistPartitionFactor uint32
// TrieDedupedPartitionFactor is the partition factor for deduped trie nodes.
@@ -65,7 +59,7 @@ type Options struct {
// MuxDB is the database to efficiently store state trie and block-chain data.
type MuxDB struct {
engine engine.Engine
- trieBackend *trie.Backend
+ trieBackend *backend
}
// Open opens or creates DB at the given path.
@@ -109,23 +103,13 @@ func Open(path string, options *Options) (*MuxDB, error) {
return nil, err
}
- trieCache := trie.NewCache(
- options.TrieNodeCacheSizeMB,
- options.TrieRootCacheCapacity)
-
- trieLeafBank := trie.NewLeafBank(
- engine,
- trieLeafBankSpace,
- options.TrieLeafBankSlotCapacity)
-
return &MuxDB{
engine: engine,
- trieBackend: &trie.Backend{
- Store: engine,
- Cache: trieCache,
- LeafBank: trieLeafBank,
- HistSpace: trieHistSpace,
- DedupedSpace: trieDedupedSpace,
+ trieBackend: &backend{
+ Store: engine,
+ Cache: newCache(
+ options.TrieNodeCacheSizeMB,
+ uint32(options.TrieCachedNodeTTL)),
HistPtnFactor: cfg.HistPtnFactor,
DedupedPtnFactor: cfg.DedupedPtnFactor,
CachedNodeTTL: options.TrieCachedNodeTTL,
@@ -141,12 +125,9 @@ func NewMem() *MuxDB {
engine := engine.NewLevelEngine(ldb)
return &MuxDB{
engine: engine,
- trieBackend: &trie.Backend{
+ trieBackend: &backend{
Store: engine,
- Cache: nil,
- LeafBank: nil,
- HistSpace: trieHistSpace,
- DedupedSpace: trieDedupedSpace,
+ Cache: &dummyCache{},
HistPtnFactor: 1,
DedupedPtnFactor: 1,
CachedNodeTTL: 32,
@@ -160,38 +141,18 @@ func (db *MuxDB) Close() error {
}
// NewTrie creates trie with existing root node.
-//
-// If root is zero or blake2b hash of an empty string, the trie is
-// initially empty.
-func (db *MuxDB) NewTrie(name string, root thor.Bytes32, commitNum, distinctNum uint32) *Trie {
- return trie.New(
- db.trieBackend,
+// If root is zero value, the trie is initially empty.
+func (db *MuxDB) NewTrie(name string, root trie.Root) *Trie {
+ return newTrie(
name,
- root,
- commitNum,
- distinctNum,
- false,
- )
-}
-
-// NewNonCryptoTrie creates non-crypto trie with existing root node.
-//
-// If root is zero or blake2b hash of an empty string, the trie is
-// initially empty.
-func (db *MuxDB) NewNonCryptoTrie(name string, root thor.Bytes32, commitNum, distinctNum uint32) *Trie {
- return trie.New(
db.trieBackend,
- name,
root,
- commitNum,
- distinctNum,
- true,
)
}
-// CleanTrieHistory clean trie history within [startCommitNum, limitCommitNum).
-func (db *MuxDB) CleanTrieHistory(ctx context.Context, startCommitNum, limitCommitNum uint32) error {
- return trie.CleanHistory(ctx, db.trieBackend, startCommitNum, limitCommitNum)
+// DeleteTrieHistoryNodes deletes trie history nodes within partitions of [startMajorVer, limitMajorVer).
+func (db *MuxDB) DeleteTrieHistoryNodes(ctx context.Context, startMajorVer, limitMajorVer uint32) error {
+ return db.trieBackend.DeleteHistoryNodes(ctx, startMajorVer, limitMajorVer)
}
// NewStore creates named kv-store.
diff --git a/muxdb/muxdb_test.go b/muxdb/muxdb_test.go
new file mode 100644
index 000000000..e0ecae072
--- /dev/null
+++ b/muxdb/muxdb_test.go
@@ -0,0 +1,144 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "context"
+ "math"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/vechain/thor/v2/trie"
+)
+
+func TestMuxdb(t *testing.T) {
+ var err error
+ db := NewMem()
+ db.Close()
+
+ dir := os.TempDir()
+
+ opts := Options{
+ TrieNodeCacheSizeMB: 128,
+ TrieCachedNodeTTL: 30, // 5min
+ TrieDedupedPartitionFactor: math.MaxUint32,
+ TrieWillCleanHistory: true,
+ OpenFilesCacheCapacity: 512,
+ ReadCacheMB: 256, // rely on os page cache other than huge db read cache.
+ WriteBufferMB: 128,
+ TrieHistPartitionFactor: 1000,
+ }
+ path := filepath.Join(dir, "main.db")
+ db, err = Open(path, &opts)
+ assert.Nil(t, err)
+
+ err = db.Close()
+ assert.Nil(t, err)
+
+ os.RemoveAll(path)
+}
+
+func TestStore(t *testing.T) {
+ db := NewMem()
+
+ store := db.NewStore("test")
+ key := []byte("key")
+ val := []byte("val")
+
+ store.Put(key, val)
+ v, err := store.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val, v)
+
+ store.Delete(key)
+ _, err = store.Get(key)
+ assert.True(t, db.IsNotFound(err))
+
+ db.Close()
+}
+
+func TestMuxdbTrie(t *testing.T) {
+ var err error
+ db := NewMem()
+
+ tr := db.NewTrie("test", trie.Root{})
+ tr.SetNoFillCache(true)
+ key := []byte("key")
+ val1 := []byte("val")
+ val2 := []byte("val2")
+
+ ver1 := trie.Version{Major: 1, Minor: 0}
+ ver2 := trie.Version{Major: 100, Minor: 0}
+ ver3 := trie.Version{Major: 101, Minor: 0}
+
+ err = tr.Update(key, val1, nil)
+ assert.Nil(t, err)
+ err = tr.Commit(ver1, false)
+ assert.Nil(t, err)
+
+ root1 := tr.Hash()
+ tr1 := db.NewTrie("test", trie.Root{Hash: root1, Ver: ver1})
+ tr1.SetNoFillCache(true)
+ v, _, err := tr1.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val1, v)
+
+ tr1.Update(key, val2, nil)
+ err = tr1.Commit(ver2, false)
+ assert.Nil(t, err)
+ root2 := tr1.Hash()
+
+ tr2 := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2})
+ tr2.SetNoFillCache(true)
+ v, _, err = tr2.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val2, v)
+
+ err = tr2.Commit(ver3, false)
+ assert.Nil(t, err)
+ root3 := tr2.Hash()
+
+ //prune trie [0, ver3)
+ xtr := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2})
+ err = xtr.Checkpoint(context.Background(), 0, nil)
+ assert.Nil(t, err)
+ err = db.DeleteTrieHistoryNodes(context.Background(), 0, ver3.Major)
+ assert.Nil(t, err)
+
+ //after delete history nodes,the history nodes should be deleted
+ path := []byte{}
+
+ histKey := xtr.back.AppendHistNodeKey(nil, "test", path, ver1)
+ _, err = xtr.back.Store.Get(histKey)
+ assert.True(t, db.IsNotFound(err))
+
+ histKey = xtr.back.AppendHistNodeKey(nil, "test", path, ver2)
+ _, err = xtr.back.Store.Get(histKey)
+ assert.True(t, db.IsNotFound(err))
+
+ histKey = xtr.back.AppendHistNodeKey(nil, "test", path, ver3)
+ _, err = xtr.back.Store.Get(histKey)
+ assert.Nil(t, err)
+
+ dedupedKey := xtr.back.AppendDedupedNodeKey(nil, "test", path, ver2)
+ blob, err := xtr.back.Store.Get(dedupedKey)
+ assert.Nil(t, err)
+ assert.NotNil(t, blob)
+
+ tr4 := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2})
+ v, _, err = tr4.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val2, v)
+
+ tr5 := db.NewTrie("test", trie.Root{Hash: root3, Ver: ver3})
+ v, _, err = tr5.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val2, v)
+
+ db.Close()
+}
diff --git a/muxdb/trie.go b/muxdb/trie.go
new file mode 100644
index 000000000..f0da76e5e
--- /dev/null
+++ b/muxdb/trie.go
@@ -0,0 +1,227 @@
+// Copyright (c) 2021 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "context"
+
+ "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
+)
+
+// Trie is the managed trie.
+type Trie struct {
+ name string
+ back *backend
+ trie *trie.Trie
+ noFillCache bool
+}
+
+// newTrie creates a managed trie.
+func newTrie(
+ name string,
+ back *backend,
+ root trie.Root,
+) *Trie {
+ t := &Trie{
+ name: name,
+ back: back,
+ }
+
+ if rn := back.Cache.GetRootNode(name, root.Ver); rn != nil {
+ t.trie = trie.FromRootNode(rn, t.newDatabaseReader())
+ } else {
+ t.trie = trie.New(root, t.newDatabaseReader())
+ }
+ t.trie.SetCacheTTL(back.CachedNodeTTL)
+ return t
+}
+
+// newDatabase creates a database instance for low-level trie construction.
+func (t *Trie) newDatabaseReader() trie.DatabaseReader {
+ var keyBuf []byte
+
+ return &struct {
+ trie.DatabaseReader
+ }{
+ databaseGetFunc(func(path []byte, ver trie.Version) (blob []byte, err error) {
+ // get from cache
+ if blob = t.back.Cache.GetNodeBlob(&keyBuf, t.name, path, ver, t.noFillCache); len(blob) > 0 {
+ return
+ }
+ defer func() {
+ if err == nil && !t.noFillCache {
+ t.back.Cache.AddNodeBlob(&keyBuf, t.name, path, ver, blob, false)
+ }
+ }()
+
+ // query in db
+ snapshot := t.back.Store.Snapshot()
+ defer snapshot.Release()
+
+ // get from hist space first
+ keyBuf = t.back.AppendHistNodeKey(keyBuf[:0], t.name, path, ver)
+ if blob, err = snapshot.Get(keyBuf); err != nil {
+ if !snapshot.IsNotFound(err) {
+ return
+ }
+ } else {
+ // found in hist space
+ return
+ }
+
+ // then from deduped space
+ keyBuf = t.back.AppendDedupedNodeKey(keyBuf[:0], t.name, path, ver)
+ return snapshot.Get(keyBuf)
+ }),
+ }
+}
+
+// Copy make a copy of this trie.
+func (t *Trie) Copy() *Trie {
+ cpy := *t
+ cpy.trie = trie.FromRootNode(t.trie.RootNode(), cpy.newDatabaseReader())
+ cpy.trie.SetCacheTTL(t.back.CachedNodeTTL)
+ return &cpy
+}
+
+// Get returns the value for key stored in the trie.
+// The value bytes must not be modified by the caller.
+func (t *Trie) Get(key []byte) ([]byte, []byte, error) {
+ return t.trie.Get(key)
+}
+
+// Update associates key with value in the trie. Subsequent calls to
+// Get will return value. If value has length zero, any existing value
+// is deleted from the trie and calls to Get will return nil.
+//
+// The value bytes must not be modified by the caller while they are
+// stored in the trie.
+func (t *Trie) Update(key, val, meta []byte) error {
+ return t.trie.Update(key, val, meta)
+}
+
+// Hash returns the root hash of the trie.
+func (t *Trie) Hash() thor.Bytes32 {
+ return t.trie.Hash()
+}
+
+// Commit writes all nodes to the trie's database.
+//
+// Committing flushes nodes from memory.
+// Subsequent Get calls will load nodes from the database.
+// If skipHash is true, less disk space is taken up but crypto features of merkle trie lost.
+func (t *Trie) Commit(newVer trie.Version, skipHash bool) error {
+ var (
+ bulk = t.back.Store.Bulk()
+ keyBuf []byte
+ )
+
+ db := &struct{ trie.DatabaseWriter }{
+ databasePutFunc(func(path []byte, ver trie.Version, blob []byte) error {
+ keyBuf = t.back.AppendHistNodeKey(keyBuf[:0], t.name, path, ver)
+ if err := bulk.Put(keyBuf, blob); err != nil {
+ return err
+ }
+ if !t.noFillCache {
+ t.back.Cache.AddNodeBlob(&keyBuf, t.name, path, ver, blob, true)
+ }
+ return nil
+ }),
+ }
+
+ if err := t.trie.Commit(db, newVer, skipHash); err != nil {
+ return err
+ }
+
+ if err := bulk.Write(); err != nil {
+ return err
+ }
+
+ if !t.noFillCache {
+ t.back.Cache.AddRootNode(t.name, t.trie.RootNode())
+ }
+ return nil
+}
+
+// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
+// the key after the given start key
+func (t *Trie) NodeIterator(start []byte, baseMajorVer uint32) trie.NodeIterator {
+ return t.trie.NodeIterator(start, trie.Version{Major: baseMajorVer})
+}
+
+// SetNoFillCache enable or disable cache filling.
+func (t *Trie) SetNoFillCache(b bool) {
+ t.noFillCache = b
+}
+
+// Checkpoint transfers standalone nodes, whose major version within [baseMajorVer, thisMajorVer], into deduped space.
+func (t *Trie) Checkpoint(ctx context.Context, baseMajorVer uint32, handleLeaf func(*trie.Leaf)) error {
+ var (
+ checkContext = newContextChecker(ctx, 5000)
+ bulk = t.back.Store.Bulk()
+ iter = t.NodeIterator(nil, baseMajorVer)
+ keyBuf []byte
+ )
+ bulk.EnableAutoFlush()
+
+ for iter.Next(true) {
+ if err := checkContext(); err != nil {
+ return err
+ }
+
+ blob, ver, err := iter.Blob()
+ if err != nil {
+ return err
+ }
+ if len(blob) > 0 {
+ keyBuf = t.back.AppendDedupedNodeKey(keyBuf[:0], t.name, iter.Path(), ver)
+ if err := bulk.Put(keyBuf, blob); err != nil {
+ return err
+ }
+ }
+ if handleLeaf != nil {
+ if leaf := iter.Leaf(); leaf != nil {
+ handleLeaf(leaf)
+ }
+ }
+ }
+ if err := iter.Error(); err != nil {
+ return err
+ }
+ return bulk.Write()
+}
+
+// individual functions of trie database interface.
+type (
+ databaseGetFunc func(path []byte, ver trie.Version) ([]byte, error)
+ databasePutFunc func(path []byte, ver trie.Version, value []byte) error
+)
+
+func (f databaseGetFunc) Get(path []byte, ver trie.Version) ([]byte, error) {
+ return f(path, ver)
+}
+
+func (f databasePutFunc) Put(path []byte, ver trie.Version, value []byte) error {
+ return f(path, ver, value)
+}
+
+// newContextChecker creates a debounced context checker.
+func newContextChecker(ctx context.Context, debounce int) func() error {
+ count := 0
+ return func() error {
+ count++
+ if count > debounce {
+ count = 0
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ }
+ return nil
+ }
+}
diff --git a/muxdb/trie_test.go b/muxdb/trie_test.go
new file mode 100644
index 000000000..79bec7124
--- /dev/null
+++ b/muxdb/trie_test.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2021 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package muxdb
+
+import (
+ "encoding/binary"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/vechain/thor/v2/muxdb/engine"
+ "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
+)
+
+func newTestEngine() engine.Engine {
+ db, _ := leveldb.Open(storage.NewMemStorage(), nil)
+ return engine.NewLevelEngine(db)
+}
+
+func newTestBackend() *backend {
+ engine := newTestEngine()
+ return &backend{
+ Store: engine,
+ Cache: &dummyCache{},
+ HistPtnFactor: 1,
+ DedupedPtnFactor: 1,
+ CachedNodeTTL: 100,
+ }
+}
+
+func TestTrie(t *testing.T) {
+ var (
+ name = "the trie"
+ back = newTestBackend()
+ round = uint32(200)
+ roots []trie.Root
+ )
+
+ for i := uint32(0); i < round; i++ {
+ var root trie.Root
+ if len(roots) > 0 {
+ root = roots[len(roots)-1]
+ }
+
+ tr := newTrie(name, back, root)
+ key := thor.Blake2b(binary.BigEndian.AppendUint32(nil, i)).Bytes()
+ val := thor.Blake2b(key).Bytes()
+ meta := thor.Blake2b(val).Bytes()
+ err := tr.Update(key, val, meta)
+ assert.Nil(t, err)
+
+ err = tr.Commit(trie.Version{Major: i}, false)
+ assert.Nil(t, err)
+
+ roots = append(roots, trie.Root{
+ Hash: tr.Hash(),
+ Ver: trie.Version{Major: i},
+ })
+ }
+
+ for _i, root := range roots {
+ tr := newTrie(name, back, root)
+ for i := uint32(0); i <= uint32(_i); i++ {
+ key := thor.Blake2b(binary.BigEndian.AppendUint32(nil, i)).Bytes()
+ val := thor.Blake2b(key).Bytes()
+ meta := thor.Blake2b(val).Bytes()
+ _val, _meta, err := tr.Get(key)
+ assert.Nil(t, err)
+ assert.Equal(t, val, _val)
+ assert.Equal(t, meta, _meta)
+ }
+ }
+}
diff --git a/packer/flow.go b/packer/flow.go
index 47bd97ae7..ed1530dca 100644
--- a/packer/flow.go
+++ b/packer/flow.go
@@ -14,6 +14,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/vrf"
)
@@ -156,7 +157,7 @@ func (f *Flow) Pack(privateKey *ecdsa.PrivateKey, newBlockConflicts uint32, shou
return nil, nil, nil, errors.New("private key mismatch")
}
- stage, err := f.runtime.State().Stage(f.Number(), newBlockConflicts)
+ stage, err := f.runtime.State().Stage(trie.Version{Major: f.Number(), Minor: newBlockConflicts})
if err != nil {
return nil, nil, nil, err
}
diff --git a/packer/packer.go b/packer/packer.go
index aa85ade87..212c81c42 100644
--- a/packer/packer.go
+++ b/packer/packer.go
@@ -50,7 +50,7 @@ func New(
// Schedule schedule a packing flow to pack new block upon given parent and clock time.
func (p *Packer) Schedule(parent *chain.BlockSummary, nowTimestamp uint64) (flow *Flow, err error) {
- state := p.stater.NewState(parent.Header.StateRoot(), parent.Header.Number(), parent.Conflicts, parent.SteadyNum)
+ state := p.stater.NewState(parent.Root())
var features tx.Features
if parent.Header.Number()+1 >= p.forkConfig.VIP191 {
@@ -141,7 +141,7 @@ func (p *Packer) Schedule(parent *chain.BlockSummary, nowTimestamp uint64) (flow
// It will skip the PoA verification and scheduling, and the block produced by
// the returned flow is not in consensus.
func (p *Packer) Mock(parent *chain.BlockSummary, targetTime uint64, gasLimit uint64) (*Flow, error) {
- state := p.stater.NewState(parent.Header.StateRoot(), parent.Header.Number(), parent.Conflicts, parent.SteadyNum)
+ state := p.stater.NewState(parent.Root())
var features tx.Features
if parent.Header.Number()+1 >= p.forkConfig.VIP191 {
diff --git a/packer/packer_test.go b/packer/packer_test.go
index da8078379..fbe95fe8c 100644
--- a/packer/packer_test.go
+++ b/packer/packer_test.go
@@ -22,6 +22,7 @@ import (
"github.com/vechain/thor/v2/packer"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
)
@@ -101,10 +102,9 @@ func TestP(t *testing.T) {
_, _, err = consensus.New(repo, stater, thor.NoFork).Process(best, blk, uint64(time.Now().Unix()*2), 0)
assert.Nil(t, err)
- if err := repo.AddBlock(blk, receipts, 0); err != nil {
+ if err := repo.AddBlock(blk, receipts, 0, true); err != nil {
t.Fatal(err)
}
- repo.SetBestBlockID(blk.Header().ID())
if time.Now().UnixNano() > start+1000*1000*1000*1 {
break
@@ -166,15 +166,15 @@ func TestForkVIP191(t *testing.T) {
t.Fatal(err)
}
- if err := repo.AddBlock(blk, receipts, 0); err != nil {
+ if err := repo.AddBlock(blk, receipts, 0, false); err != nil {
t.Fatal(err)
}
- headState := state.New(db, blk.Header().StateRoot(), blk.Header().Number(), 0, 0)
+ headState := state.New(db, trie.Root{Hash: blk.Header().StateRoot(), Ver: trie.Version{Major: blk.Header().Number()}})
assert.Equal(t, M(builtin.Extension.V2.RuntimeBytecodes(), nil), M(headState.GetCode(builtin.Extension.Address)))
- geneState := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ geneState := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
assert.Equal(t, M(builtin.Extension.RuntimeBytecodes(), nil), M(geneState.GetCode(builtin.Extension.Address)))
}
diff --git a/poa/candidates_test.go b/poa/candidates_test.go
index 16e43fd4e..4b73f50d8 100644
--- a/poa/candidates_test.go
+++ b/poa/candidates_test.go
@@ -14,6 +14,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func generateCandidateList(candidateCount int) []*authority.Candidate {
@@ -103,8 +104,7 @@ func TestCopy(t *testing.T) {
}
func TestPick(t *testing.T) {
- db := muxdb.NewMem()
- state := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ state := state.New(muxdb.NewMem(), trie.Root{})
candidateList := generateCandidateList(5)
diff --git a/poa/seed_test.go b/poa/seed_test.go
index ce16d13e8..88a41bff1 100644
--- a/poa/seed_test.go
+++ b/poa/seed_test.go
@@ -45,14 +45,12 @@ func TestSeeder_Generate(t *testing.T) {
ParentID(parent.Header().ID()).
Build().WithSignature(sig[:])
- if err := repo.AddBlock(b, nil, 0); err != nil {
+ asBest := i == int(epochInterval*3)
+ if err := repo.AddBlock(b, nil, 0, asBest); err != nil {
t.Fatal(err)
}
parent = b
}
- if err := repo.SetBestBlockID(parent.Header().ID()); err != nil {
- t.Fatal(err)
- }
b30ID, err := repo.NewBestChain().GetBlockID(epochInterval * 3)
if err != nil {
@@ -100,7 +98,7 @@ func TestSeeder_Generate(t *testing.T) {
ParentID(parent.Header().ID()).
Build().WithSignature(sig[:])
- if err := repo.AddBlock(b, nil, 0); err != nil {
+ if err := repo.AddBlock(b, nil, 0, false); err != nil {
t.Fatal(err)
}
parent = b
@@ -142,16 +140,13 @@ func TestSeeder_Generate(t *testing.T) {
b = b.WithSignature(cs)
- if err := repo.AddBlock(b, nil, 0); err != nil {
+ asBest := i == int(epochInterval*2)
+ if err := repo.AddBlock(b, nil, 0, asBest); err != nil {
t.Fatal(err)
}
parent = b
}
- if err := repo.SetBestBlockID(parent.Header().ID()); err != nil {
- t.Fatal(err)
- }
-
chain := repo.NewBestChain()
b40, err := chain.GetBlockHeader(40)
if err != nil {
diff --git a/runtime/native_return_gas_test.go b/runtime/native_return_gas_test.go
index 707169e00..47dad803b 100644
--- a/runtime/native_return_gas_test.go
+++ b/runtime/native_return_gas_test.go
@@ -14,13 +14,13 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/xenv"
)
func TestNativeCallReturnGas(t *testing.T) {
- db := muxdb.NewMem()
- state := state.New(db, thor.Bytes32{}, 0, 0, 0)
+ state := state.New(muxdb.NewMem(), trie.Root{})
state.SetCode(builtin.Measure.Address, builtin.Measure.RuntimeBytecodes())
inner, _ := builtin.Measure.ABI.MethodByName("inner")
diff --git a/runtime/resolved_tx_test.go b/runtime/resolved_tx_test.go
index 37a0e7425..5eae70486 100644
--- a/runtime/resolved_tx_test.go
+++ b/runtime/resolved_tx_test.go
@@ -72,7 +72,7 @@ func newTestResolvedTransaction(t *testing.T) (*testResolvedTransaction, error)
func (tr *testResolvedTransaction) currentState() *state.State {
h := tr.repo.BestBlockSummary()
- return tr.stater.NewState(h.Header.StateRoot(), h.Header.Number(), 0, h.SteadyNum)
+ return tr.stater.NewState(h.Root())
}
func (tr *testResolvedTransaction) TestResolveTransaction() {
diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go
index b86366e11..950154380 100644
--- a/runtime/runtime_test.go
+++ b/runtime/runtime_test.go
@@ -22,6 +22,7 @@ import (
"github.com/vechain/thor/v2/runtime"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/xenv"
)
@@ -52,7 +53,7 @@ func TestContractSuicide(t *testing.T) {
data, _ := hex.DecodeString("608060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063085da1b3146044575b600080fd5b348015604f57600080fd5b5060566058565b005b3373ffffffffffffffffffffffffffffffffffffffff16ff00a165627a7a723058204cb70b653a3d1821e00e6ade869638e80fa99719931c9fa045cec2189d94086f0029")
time := b0.Header().Timestamp()
addr := thor.BytesToAddress([]byte("acc01"))
- state := stater.NewState(b0.Header().StateRoot(), 0, 0, 0)
+ state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()})
state.SetCode(addr, data)
state.SetEnergy(addr, big.NewInt(100), time)
state.SetBalance(addr, big.NewInt(200))
@@ -125,7 +126,7 @@ func TestChainID(t *testing.T) {
// }
data, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063adc879e914602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600046905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea264697066735822122060b67d944ffa8f0c5ee69f2f47decc3dc175ea2e4341a4de3705d72b868ce2b864736f6c63430008010033")
addr := thor.BytesToAddress([]byte("acc01"))
- state := stater.NewState(b0.Header().StateRoot(), 0, 0, 0)
+ state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()})
state.SetCode(addr, data)
abi, _ := abi.New([]byte(`[{
@@ -178,7 +179,7 @@ func TestSelfBalance(t *testing.T) {
data, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063b0bed0ba14602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600047905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea2646970667358221220eeac1b7322c414db88987af09d3c8bdfde83bb378be9ac0e9ebe3fe34ecbcf2564736f6c63430008010033")
addr := thor.BytesToAddress([]byte("acc01"))
- state := stater.NewState(b0.Header().StateRoot(), 0, 0, 0)
+ state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()})
state.SetCode(addr, data)
state.SetBalance(addr, big.NewInt(100))
@@ -265,7 +266,7 @@ func TestBlake2(t *testing.T) {
// }
data, _ := hex.DecodeString("608060405234801561001057600080fd5b50600436106100365760003560e01c806372de3cbd1461003b578063fc75ac471461006b575b600080fd5b61005560048036038101906100509190610894565b610089565b6040516100629190610a9b565b60405180910390f35b6100736102e5565b6040516100809190610a9b565b60405180910390f35b61009161063c565b61009961063c565b600087876000600281106100d6577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600160028110610115577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600060048110610154577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015189600160048110610193577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518a6002600481106101d2577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600360048110610211577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600060028110610250577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c60016002811061028f577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c6040516020016102ae9a999897969594939291906109e7565b604051602081830303815290604052905060408260d5602084016009600019fa6102d757600080fd5b819250505095945050505050565b6102ed61063c565b6000600c90506102fb61063c565b7f48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa581600060028110610356577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250507fd182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b816001600281106103ba577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506103cb61065e565b7f616263000000000000000000000000000000000000000000000000000000000081600060048110610426577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201818152505060008160016004811061046b577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816002600481106104b0577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816003600481106104f5577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002018181525050610506610680565b7f030000000000000000000000000000000000000000000000000000000000000081600060028110610561577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000816001600281106105de577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000600190506106328585858585610089565b9550505050505090565b6040518060400160405280600290602082028036833780820191505090505090565b6040518060800160405280600490602082028036833780820191505090505090565b6040518060400160405280600290602082028036833780820191505090505090565b60006106b56106b084610adb565b610ab6565b905080828560208602820111156106cb57600080fd5b60005b858110156106fb57816106e18882610855565b8452602084019350602083019250506001810190506106ce565b5050509392505050565b600061071861071384610b01565b610ab6565b9050808285602086028201111561072e57600080fd5b60005b8581101561075e57816107448882610855565b845260208401935060208301925050600181019050610731565b5050509392505050565b600061077b61077684610b27565b610ab6565b9050808285602086028201111561079157600080fd5b60005b858110156107c157816107a7888261086a565b845260208401935060208301925050600181019050610794565b5050509392505050565b600082601f8301126107dc57600080fd5b60026107e98482856106a2565b91505092915050565b600082601f83011261080357600080fd5b6004610810848285610705565b91505092915050565b600082601f83011261082a57600080fd5b6002610837848285610768565b91505092915050565b60008135905061084f81610ca1565b92915050565b60008135905061086481610cb8565b92915050565b60008135905061087981610ccf565b92915050565b60008135905061088e81610ce6565b92915050565b600080600080600061014086880312156108ad57600080fd5b60006108bb8882890161087f565b95505060206108cc888289016107cb565b94505060606108dd888289016107f2565b93505060e06108ee88828901610819565b92505061012061090088828901610840565b9150509295509295909350565b60006109198383610993565b60208301905092915050565b61092e81610b57565b6109388184610b6f565b925061094382610b4d565b8060005b8381101561097457815161095b878261090d565b965061096683610b62565b925050600181019050610947565b505050505050565b61098d61098882610b7a565b610bfd565b82525050565b61099c81610b86565b82525050565b6109b36109ae82610b86565b610c0f565b82525050565b6109ca6109c582610b90565b610c19565b82525050565b6109e16109dc82610bbc565b610c23565b82525050565b60006109f3828d6109d0565b600482019150610a03828c6109a2565b602082019150610a13828b6109a2565b602082019150610a23828a6109a2565b602082019150610a3382896109a2565b602082019150610a4382886109a2565b602082019150610a5382876109a2565b602082019150610a6382866109b9565b600882019150610a7382856109b9565b600882019150610a83828461097c565b6001820191508190509b9a5050505050505050505050565b6000604082019050610ab06000830184610925565b92915050565b6000610ac0610ad1565b9050610acc8282610bcc565b919050565b6000604051905090565b600067ffffffffffffffff821115610af657610af5610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b1c57610b1b610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b4257610b41610c47565b5b602082029050919050565b6000819050919050565b600060029050919050565b6000602082019050919050565b600081905092915050565b60008115159050919050565b6000819050919050565b60007fffffffffffffffff00000000000000000000000000000000000000000000000082169050919050565b600063ffffffff82169050919050565b610bd582610c76565b810181811067ffffffffffffffff82111715610bf457610bf3610c47565b5b80604052505050565b6000610c0882610c35565b9050919050565b6000819050919050565b6000819050919050565b6000610c2e82610c87565b9050919050565b6000610c4082610c94565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000601f19601f8301169050919050565b60008160e01b9050919050565b60008160f81b9050919050565b610caa81610b7a565b8114610cb557600080fd5b50565b610cc181610b86565b8114610ccc57600080fd5b50565b610cd881610b90565b8114610ce357600080fd5b50565b610cef81610bbc565b8114610cfa57600080fd5b5056fea2646970667358221220d54d4583b224c049d80665ae690afd0e7e998bf883c6b97472d292d1e2e5fa3e64736f6c63430008010033")
addr := thor.BytesToAddress([]byte("acc01"))
- state := stater.NewState(b0.Header().StateRoot(), 0, 0, 0)
+ state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()})
state.SetCode(addr, data)
abi, _ := abi.New([]byte(`[{
@@ -349,7 +350,7 @@ func TestCall(t *testing.T) {
repo, _ := chain.NewRepository(db, b0)
- state := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
rt := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.NoFork)
@@ -438,7 +439,7 @@ func TestGetValues(t *testing.T) {
repo, _ := chain.NewRepository(db, b0)
- state := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
rt := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.NoFork)
runtimeChain := rt.Chain()
@@ -461,7 +462,7 @@ func TestExecuteTransaction(t *testing.T) {
repo, _ := chain.NewRepository(db, b0)
- state := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
originEnergy := new(big.Int)
originEnergy.SetString("9000000000000000000000000000000000000", 10)
@@ -489,7 +490,7 @@ func TestExecuteTransactionFailure(t *testing.T) {
repo, _ := chain.NewRepository(db, b0)
- state := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
+ state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
originEnergy := new(big.Int)
originEnergy.SetString("9000000000000000000000000000000000000", 10)
diff --git a/runtime/statedb/statedb_test.go b/runtime/statedb/statedb_test.go
index dd5fc8b35..5f41a2c52 100644
--- a/runtime/statedb/statedb_test.go
+++ b/runtime/statedb/statedb_test.go
@@ -22,7 +22,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/runtime/statedb"
State "github.com/vechain/thor/v2/state"
- "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestSnapshotRandom(t *testing.T) {
@@ -185,7 +185,7 @@ func (test *snapshotTest) run() bool {
// Run all actions and create snapshots.
var (
db = muxdb.NewMem()
- state = State.New(db, thor.Bytes32{}, 0, 0, 0)
+ state = State.New(db, trie.Root{})
stateDB = statedb.New(state)
snapshotRevs = make([]int, len(test.snapshots))
sindex = 0
@@ -200,7 +200,7 @@ func (test *snapshotTest) run() bool {
// Revert all snapshots in reverse order. Each revert must yield a state
// that is equivalent to fresh state with all actions up the snapshot applied.
for sindex--; sindex >= 0; sindex-- {
- state := State.New(db, thor.Bytes32{}, 0, 0, 0)
+ state := State.New(db, trie.Root{})
checkStateDB := statedb.New(state)
for _, action := range test.actions[:test.snapshots[sindex]] {
action.fn(action, checkStateDB)
diff --git a/state/account.go b/state/account.go
index cccec7eb4..f3cf5dbda 100644
--- a/state/account.go
+++ b/state/account.go
@@ -16,9 +16,9 @@ import (
// AccountMetadata is the account metadata.
type AccountMetadata struct {
- StorageID []byte // the unique id of the storage trie.
- StorageCommitNum uint32 // the commit number of the last storage update.
- StorageDistinctNum uint32 // the distinct number of the last storage update.
+ StorageID []byte // the unique id of the storage trie.
+ StorageMajorVer uint32 // the major version of the last storage update.
+ StorageMinorVer uint32 // the minor version of the last storage update.
}
// Account is the Thor consensus representation of an account.
@@ -69,11 +69,12 @@ func emptyAccount() *Account {
return &a
}
+func secureKey(k []byte) []byte { return thor.Blake2b(k).Bytes() }
+
// loadAccount load an account object and its metadata by address in trie.
// It returns empty account is no account found at the address.
-func loadAccount(trie *muxdb.Trie, addr thor.Address, steadyBlockNum uint32) (*Account, *AccountMetadata, error) {
- hashedKey := thor.Blake2b(addr[:])
- data, meta, err := trie.FastGet(hashedKey[:], steadyBlockNum)
+func loadAccount(trie *muxdb.Trie, addr thor.Address) (*Account, *AccountMetadata, error) {
+ data, meta, err := trie.Get(secureKey(addr[:]))
if err != nil {
return nil, nil, err
}
@@ -98,9 +99,8 @@ func loadAccount(trie *muxdb.Trie, addr thor.Address, steadyBlockNum uint32) (*A
// If the given account is empty, the value for given address is deleted.
func saveAccount(trie *muxdb.Trie, addr thor.Address, a *Account, am *AccountMetadata) error {
if a.IsEmpty() {
- hashedKey := thor.Blake2b(addr[:])
// delete if account is empty
- return trie.Update(hashedKey[:], nil, nil)
+ return trie.Update(secureKey(addr[:]), nil, nil)
}
data, err := rlp.EncodeToBytes(a)
@@ -114,25 +114,20 @@ func saveAccount(trie *muxdb.Trie, addr thor.Address, a *Account, am *AccountMet
return err
}
}
- hashedKey := thor.Blake2b(addr[:])
- return trie.Update(hashedKey[:], data, mdata)
+ return trie.Update(secureKey(addr[:]), data, mdata)
}
// loadStorage load storage data for given key.
-func loadStorage(trie *muxdb.Trie, key thor.Bytes32, steadyBlockNum uint32) (rlp.RawValue, error) {
- hashedKey := thor.Blake2b(key[:])
- v, _, err := trie.FastGet(
- hashedKey[:],
- steadyBlockNum)
+func loadStorage(trie *muxdb.Trie, key thor.Bytes32) (rlp.RawValue, error) {
+ v, _, err := trie.Get(secureKey(key[:]))
return v, err
}
// saveStorage save value for given key.
// If the data is zero, the given key will be deleted.
func saveStorage(trie *muxdb.Trie, key thor.Bytes32, data rlp.RawValue) error {
- hashedKey := thor.Blake2b(key[:])
return trie.Update(
- hashedKey[:],
+ secureKey(key[:]),
data,
bytes.TrimLeft(key[:], "\x00"), // key preimage as metadata
)
diff --git a/state/account_test.go b/state/account_test.go
index d95a281de..9bc84b517 100644
--- a/state/account_test.go
+++ b/state/account_test.go
@@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func M(a ...interface{}) []interface{} {
@@ -40,11 +41,11 @@ func TestAccount(t *testing.T) {
func TestTrie(t *testing.T) {
db := muxdb.NewMem()
- trie := db.NewTrie("", thor.Bytes32{}, 0, 0)
+ tr := db.NewTrie("", trie.Root{})
addr := thor.BytesToAddress([]byte("account1"))
assert.Equal(t,
- M(loadAccount(trie, addr, 0)),
+ M(loadAccount(tr, addr)),
M(emptyAccount(), &AccountMetadata{}, nil),
"should load an empty account")
@@ -57,40 +58,40 @@ func TestTrie(t *testing.T) {
[]byte("storage root"),
}
meta1 := AccountMetadata{
- StorageID: []byte("sid"),
- StorageCommitNum: 1,
- StorageDistinctNum: 2,
+ StorageID: []byte("sid"),
+ StorageMajorVer: 1,
+ StorageMinorVer: 2,
}
- saveAccount(trie, addr, &acc1, &meta1)
+ saveAccount(tr, addr, &acc1, &meta1)
assert.Equal(t,
- M(loadAccount(trie, addr, 0)),
+ M(loadAccount(tr, addr)),
M(&acc1, &meta1, nil))
- saveAccount(trie, addr, emptyAccount(), &meta1)
+ saveAccount(tr, addr, emptyAccount(), &meta1)
assert.Equal(t,
- M(trie.Get(addr[:])),
+ M(tr.Get(addr[:])),
M([]byte(nil), []byte(nil), nil),
"empty account should be deleted")
}
func TestStorageTrie(t *testing.T) {
db := muxdb.NewMem()
- trie := db.NewTrie("", thor.Bytes32{}, 0, 0)
+ tr := db.NewTrie("", trie.Root{})
key := thor.BytesToBytes32([]byte("key"))
assert.Equal(t,
- M(loadStorage(trie, key, 0)),
+ M(loadStorage(tr, key)),
M(rlp.RawValue(nil), nil))
value := rlp.RawValue("value")
- saveStorage(trie, key, value)
+ saveStorage(tr, key, value)
assert.Equal(t,
- M(loadStorage(trie, key, 0)),
+ M(loadStorage(tr, key)),
M(value, nil))
- saveStorage(trie, key, nil)
+ saveStorage(tr, key, nil)
assert.Equal(t,
- M(trie.Get(key[:])),
+ M(tr.Get(key[:])),
M([]byte(nil), []byte(nil), nil),
"empty storage value should be deleted")
}
diff --git a/state/cached_object.go b/state/cached_object.go
index 75f34d19a..df9a2275a 100644
--- a/state/cached_object.go
+++ b/state/cached_object.go
@@ -10,6 +10,7 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
var codeCache, _ = lru.NewARC(512)
@@ -43,16 +44,21 @@ func (co *cachedObject) getOrCreateStorageTrie() *muxdb.Trie {
trie := co.db.NewTrie(
StorageTrieName(co.meta.StorageID),
- thor.BytesToBytes32(co.data.StorageRoot),
- co.meta.StorageCommitNum,
- co.meta.StorageDistinctNum)
+ trie.Root{
+ Hash: thor.BytesToBytes32(co.data.StorageRoot),
+ Ver: trie.Version{
+ Major: co.meta.StorageMajorVer,
+ Minor: co.meta.StorageMinorVer,
+ },
+ },
+ )
co.cache.storageTrie = trie
return trie
}
// GetStorage returns storage value for given key.
-func (co *cachedObject) GetStorage(key thor.Bytes32, steadyBlockNum uint32) (rlp.RawValue, error) {
+func (co *cachedObject) GetStorage(key thor.Bytes32) (rlp.RawValue, error) {
cache := &co.cache
// retrieve from storage cache
if cache.storage != nil {
@@ -70,7 +76,7 @@ func (co *cachedObject) GetStorage(key thor.Bytes32, steadyBlockNum uint32) (rlp
}
// load from trie
- v, err := loadStorage(trie, key, steadyBlockNum)
+ v, err := loadStorage(trie, key)
if err != nil {
return nil, err
}
diff --git a/state/cached_object_test.go b/state/cached_object_test.go
index 5a5265217..1f06f0e98 100644
--- a/state/cached_object_test.go
+++ b/state/cached_object_test.go
@@ -14,13 +14,14 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestCachedObject(t *testing.T) {
db := muxdb.NewMem()
addr := thor.Address{}
- stgTrie := db.NewTrie(StorageTrieName([]byte("sid")), thor.Bytes32{}, 0, 0)
+ stgTrie := db.NewTrie(StorageTrieName([]byte("sid")), trie.Root{})
storages := []struct {
k thor.Bytes32
v rlp.RawValue
@@ -35,9 +36,9 @@ func TestCachedObject(t *testing.T) {
saveStorage(stgTrie, s.k, s.v)
}
- storageRoot, commit := stgTrie.Stage(0, 0)
+ storageRoot := stgTrie.Hash()
- err := commit()
+ err := stgTrie.Commit(trie.Version{}, false)
assert.Nil(t, err)
code := make([]byte, 100)
@@ -61,6 +62,6 @@ func TestCachedObject(t *testing.T) {
for _, s := range storages {
assert.Equal(t,
M(s.v, nil),
- M(obj.GetStorage(s.k, 0)))
+ M(obj.GetStorage(s.k)))
}
}
diff --git a/state/stage.go b/state/stage.go
index 5fca2859f..d21cacc1f 100644
--- a/state/stage.go
+++ b/state/stage.go
@@ -9,8 +9,8 @@ import "github.com/vechain/thor/v2/thor"
// Stage abstracts changes on the main accounts trie.
type Stage struct {
- root thor.Bytes32
- commits []func() error
+ root thor.Bytes32
+ commit func() error
}
// Hash computes hash of the main accounts trie.
@@ -20,11 +20,9 @@ func (s *Stage) Hash() thor.Bytes32 {
// Commit commits all changes into main accounts trie and storage tries.
func (s *Stage) Commit() (root thor.Bytes32, err error) {
- for _, c := range s.commits {
- if err = c(); err != nil {
- err = &Error{err}
- return
- }
+ if err = s.commit(); err != nil {
+ err = &Error{err}
+ return
}
return s.root, nil
}
diff --git a/state/stage_test.go b/state/stage_test.go
index f157591fa..2296f2604 100644
--- a/state/stage_test.go
+++ b/state/stage_test.go
@@ -13,11 +13,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestStage(t *testing.T) {
db := muxdb.NewMem()
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(db, trie.Root{})
addr := thor.BytesToAddress([]byte("acc1"))
balance := big.NewInt(10)
@@ -34,7 +35,7 @@ func TestStage(t *testing.T) {
state.SetStorage(addr, k, v)
}
- stage, err := state.Stage(1, 0)
+ stage, err := state.Stage(trie.Version{Major: 1})
assert.Nil(t, err)
hash := stage.Hash()
@@ -44,7 +45,7 @@ func TestStage(t *testing.T) {
assert.Equal(t, hash, root)
- state = New(db, root, 1, 0, 0)
+ state = New(db, trie.Root{Hash: root, Ver: trie.Version{Major: 1}})
assert.Equal(t, M(balance, nil), M(state.GetBalance(addr)))
assert.Equal(t, M(code, nil), M(state.GetCode(addr)))
@@ -56,8 +57,7 @@ func TestStage(t *testing.T) {
}
func TestStageCommitError(t *testing.T) {
- db := muxdb.NewMem()
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(muxdb.NewMem(), trie.Root{})
// Set up the state with an account, balance, code, and storage.
addr := thor.BytesToAddress([]byte("acc1"))
@@ -76,7 +76,7 @@ func TestStageCommitError(t *testing.T) {
}
// Prepare the stage with the current state.
- stage, err := state.Stage(1, 0)
+ stage, err := state.Stage(trie.Version{Major: 1})
assert.Nil(t, err, "Stage should not return an error")
// Mock a commit function to simulate an error.
@@ -85,7 +85,7 @@ func TestStageCommitError(t *testing.T) {
}
// Include the error-producing commit function in the stage's commits.
- stage.commits = append(stage.commits, commitFuncWithError)
+ stage.commit = commitFuncWithError
// Attempt to commit changes.
_, err = stage.Commit()
diff --git a/state/state.go b/state/state.go
index 8fe7237c8..5312e297c 100644
--- a/state/state.go
+++ b/state/state.go
@@ -7,14 +7,15 @@ package state
import (
"bytes"
+ "encoding/binary"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/vechain/thor/v2/lowrlp"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/stackedmap"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
const (
@@ -45,20 +46,18 @@ func (e *Error) Error() string {
// State manages the world state.
type State struct {
- db *muxdb.MuxDB
- trie *muxdb.Trie // the accounts trie reader
- cache map[thor.Address]*cachedObject // cache of accounts trie
- sm *stackedmap.StackedMap // keeps revisions of accounts state
- steadyBlockNum uint32
+ db *muxdb.MuxDB
+ trie *muxdb.Trie // the accounts trie reader
+ cache map[thor.Address]*cachedObject // cache of accounts trie
+ sm *stackedmap.StackedMap // keeps revisions of accounts state
}
// New create state object.
-func New(db *muxdb.MuxDB, root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State {
+func New(db *muxdb.MuxDB, root trie.Root) *State {
state := State{
- db: db,
- trie: db.NewTrie(AccountTrieName, root, blockNum, blockConflicts),
- cache: make(map[thor.Address]*cachedObject),
- steadyBlockNum: steadyBlockNum,
+ db: db,
+ trie: db.NewTrie(AccountTrieName, root),
+ cache: make(map[thor.Address]*cachedObject),
}
state.sm = stackedmap.New(func(key interface{}) (interface{}, bool, error) {
@@ -68,8 +67,8 @@ func New(db *muxdb.MuxDB, root thor.Bytes32, blockNum, blockConflicts, steadyBlo
}
// Checkout checkouts to another state.
-func (s *State) Checkout(root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State {
- return New(s.db, root, blockNum, blockConflicts, steadyBlockNum)
+func (s *State) Checkout(root trie.Root) *State {
+ return New(s.db, root)
}
// cacheGetter implements stackedmap.MapGetter.
@@ -102,7 +101,7 @@ func (s *State) cacheGetter(key interface{}) (value interface{}, exist bool, err
if err != nil {
return nil, false, err
}
- v, err := obj.GetStorage(k.key, s.steadyBlockNum)
+ v, err := obj.GetStorage(k.key)
if err != nil {
return nil, false, err
}
@@ -117,7 +116,7 @@ func (s *State) getCachedObject(addr thor.Address) (*cachedObject, error) {
if co, ok := s.cache[addr]; ok {
return co, nil
}
- a, am, err := loadAccount(s.trie, addr, s.steadyBlockNum)
+ a, am, err := loadAccount(s.trie, addr)
if err != nil {
return nil, err
}
@@ -359,28 +358,27 @@ func (s *State) RevertTo(revision int) {
}
// BuildStorageTrie build up storage trie for given address with cumulative changes.
-func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error) {
- acc, err := s.getAccount(addr)
+func (s *State) BuildStorageTrie(addr thor.Address) (t *muxdb.Trie, err error) {
+ obj, err := s.getCachedObject(addr)
if err != nil {
return nil, &Error{err}
}
- if len(acc.StorageRoot) > 0 {
- obj, err := s.getCachedObject(addr)
- if err != nil {
- return nil, &Error{err}
- }
- trie = s.db.NewTrie(
+ if len(obj.data.StorageRoot) > 0 {
+ t = s.db.NewTrie(
StorageTrieName(obj.meta.StorageID),
- thor.BytesToBytes32(acc.StorageRoot),
- obj.meta.StorageCommitNum,
- obj.meta.StorageDistinctNum)
+ trie.Root{
+ Hash: thor.BytesToBytes32(obj.data.StorageRoot),
+ Ver: trie.Version{
+ Major: obj.meta.StorageMajorVer,
+ Minor: obj.meta.StorageMinorVer,
+ },
+ },
+ )
} else {
- trie = s.db.NewTrie(
+ t = s.db.NewTrie(
"",
- thor.Bytes32{},
- 0,
- 0,
+ trie.Root{},
)
}
@@ -391,8 +389,7 @@ func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error
switch key := k.(type) {
case storageKey:
if key.barrier == barrier && key.addr == addr {
- err = saveStorage(trie, key.key, v.(rlp.RawValue))
- if err != nil {
+ if err = saveStorage(t, key.key, v.(rlp.RawValue)); err != nil {
return false
}
}
@@ -402,11 +399,11 @@ func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error
if err != nil {
return nil, &Error{err}
}
- return trie, nil
+ return t, nil
}
// Stage makes a stage object to compute hash of trie or commit all changes.
-func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) {
+func (s *State) Stage(newVer trie.Version) (*Stage, error) {
type changed struct {
data Account
meta AccountMetadata
@@ -460,13 +457,12 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) {
}
c.storage[key.key] = v.(rlp.RawValue)
if len(c.meta.StorageID) == 0 {
- // generate storage id for the new storage trie.
- var enc lowrlp.Encoder
- enc.EncodeUint(uint64(newBlockNum))
- enc.EncodeUint(uint64(newBlockConflicts))
- enc.EncodeUint(storageTrieCreationCount)
+ id := binary.BigEndian.AppendUint32(nil, newVer.Major)
+ id = binary.AppendUvarint(id, uint64(newVer.Minor))
+ id = binary.AppendUvarint(id, storageTrieCreationCount)
+
+ c.meta.StorageID = id
storageTrieCreationCount++
- c.meta.StorageID = enc.ToBytes()
}
case storageBarrierKey:
if c, jerr = getChanged(thor.Address(key)); jerr != nil {
@@ -484,7 +480,7 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) {
}
trieCpy := s.trie.Copy()
- commits := make([]func() error, 0, len(changes)+2)
+ tries := make([]*muxdb.Trie, 0, len(changes)+2)
for addr, c := range changes {
// skip storage changes if account is empty
@@ -496,44 +492,54 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) {
} else {
sTrie = s.db.NewTrie(
StorageTrieName(c.meta.StorageID),
- thor.BytesToBytes32(c.data.StorageRoot),
- c.meta.StorageCommitNum,
- c.meta.StorageDistinctNum)
+ trie.Root{
+ Hash: thor.BytesToBytes32(c.data.StorageRoot),
+ Ver: trie.Version{
+ Major: c.meta.StorageMajorVer,
+ Minor: c.meta.StorageMinorVer,
+ },
+ })
}
for k, v := range c.storage {
if err := saveStorage(sTrie, k, v); err != nil {
return nil, &Error{err}
}
}
- sRoot, commit := sTrie.Stage(newBlockNum, newBlockConflicts)
+ sRoot := sTrie.Hash()
c.data.StorageRoot = sRoot[:]
- c.meta.StorageCommitNum = newBlockNum
- c.meta.StorageDistinctNum = newBlockConflicts
- commits = append(commits, commit)
+ c.meta.StorageMajorVer = newVer.Major
+ c.meta.StorageMinorVer = newVer.Minor
+ tries = append(tries, sTrie)
}
}
if err := saveAccount(trieCpy, addr, &c.data, &c.meta); err != nil {
return nil, &Error{err}
}
}
- root, commitAcc := trieCpy.Stage(newBlockNum, newBlockConflicts)
- commitCodes := func() error {
- if len(codes) > 0 {
- bulk := s.db.NewStore(codeStoreName).Bulk()
- for hash, code := range codes {
- if err := bulk.Put(hash[:], code); err != nil {
+ root := trieCpy.Hash()
+ tries = append(tries, trieCpy)
+
+ return &Stage{
+ root: root,
+ commit: func() error {
+ if len(codes) > 0 {
+ bulk := s.db.NewStore(codeStoreName).Bulk()
+ for hash, code := range codes {
+ if err := bulk.Put(hash[:], code); err != nil {
+ return err
+ }
+ }
+ if err := bulk.Write(); err != nil {
return err
}
}
- return bulk.Write()
- }
- return nil
- }
- commits = append(commits, commitAcc, commitCodes)
-
- return &Stage{
- root: root,
- commits: commits,
+ for _, t := range tries {
+ if err := t.Commit(newVer, false); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
}, nil
}
diff --git a/state/state_test.go b/state/state_test.go
index 94cf3f979..966397600 100644
--- a/state/state_test.go
+++ b/state/state_test.go
@@ -13,12 +13,11 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestStateReadWrite(t *testing.T) {
- db := muxdb.NewMem()
-
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(muxdb.NewMem(), trie.Root{})
addr := thor.BytesToAddress([]byte("account1"))
storageKey := thor.BytesToBytes32([]byte("storageKey"))
@@ -57,7 +56,7 @@ func TestStateReadWrite(t *testing.T) {
func TestStateRevert(t *testing.T) {
db := muxdb.NewMem()
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(muxdb.NewMem(), trie.Root{})
addr := thor.BytesToAddress([]byte("account1"))
storageKey := thor.BytesToBytes32([]byte("storageKey"))
@@ -92,15 +91,14 @@ func TestStateRevert(t *testing.T) {
assert.Equal(t, M(false, nil), M(state.Exists(addr)))
//
- state = New(db, thor.Bytes32{}, 0, 0, 0)
+ state = New(db, trie.Root{})
assert.Equal(t, state.NewCheckpoint(), 1)
state.RevertTo(0)
assert.Equal(t, state.NewCheckpoint(), 0)
}
func TestEnergy(t *testing.T) {
- db := muxdb.NewMem()
- st := New(db, thor.Bytes32{}, 0, 0, 0)
+ st := New(muxdb.NewMem(), trie.Root{})
acc := thor.BytesToAddress([]byte("a1"))
@@ -119,8 +117,7 @@ func TestEnergy(t *testing.T) {
}
func TestEncodeDecodeStorage(t *testing.T) {
- db := muxdb.NewMem()
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(muxdb.NewMem(), trie.Root{})
// Create an account and key
addr := thor.BytesToAddress([]byte("account1"))
@@ -153,8 +150,7 @@ func TestEncodeDecodeStorage(t *testing.T) {
}
func TestBuildStorageTrie(t *testing.T) {
- db := muxdb.NewMem()
- state := New(db, thor.Bytes32{}, 0, 0, 0)
+ state := New(muxdb.NewMem(), trie.Root{})
// Create an account and set storage values
addr := thor.BytesToAddress([]byte("account1"))
@@ -174,8 +170,7 @@ func TestBuildStorageTrie(t *testing.T) {
}
func TestStorage(t *testing.T) {
- db := muxdb.NewMem()
- st := New(db, thor.Bytes32{}, 0, 0, 0)
+ st := New(muxdb.NewMem(), trie.Root{})
addr := thor.BytesToAddress([]byte("addr"))
key := thor.BytesToBytes32([]byte("key"))
@@ -202,7 +197,7 @@ func TestStorage(t *testing.T) {
func TestStorageBarrier(t *testing.T) {
db := muxdb.NewMem()
- st := New(db, thor.Bytes32{}, 0, 0, 0)
+ st := New(db, trie.Root{})
addr := thor.BytesToAddress([]byte("addr"))
key := thor.BytesToBytes32([]byte("key"))
@@ -215,14 +210,14 @@ func TestStorageBarrier(t *testing.T) {
st.SetCode(addr, []byte("code"))
- stage, err := st.Stage(0, 0)
+ stage, err := st.Stage(trie.Version{})
assert.Nil(t, err)
root, err := stage.Commit()
assert.Nil(t, err)
- tr := db.NewTrie(AccountTrieName, root, 0, 0)
- acc, _, err := loadAccount(tr, addr, 0)
+ tr := db.NewTrie(AccountTrieName, trie.Root{Hash: root})
+ acc, _, err := loadAccount(tr, addr)
assert.Nil(t, err)
assert.Equal(t, 0, len(acc.StorageRoot), "should skip storage writes when account deleteed then recreated")
}
diff --git a/state/stater.go b/state/stater.go
index 6a6e476f3..a5be1df36 100644
--- a/state/stater.go
+++ b/state/stater.go
@@ -7,7 +7,7 @@ package state
import (
"github.com/vechain/thor/v2/muxdb"
- "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
// Stater is the state creator.
@@ -21,6 +21,6 @@ func NewStater(db *muxdb.MuxDB) *Stater {
}
// NewState create a new state object.
-func (s *Stater) NewState(root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State {
- return New(s.db, root, blockNum, blockConflicts, steadyBlockNum)
+func (s *Stater) NewState(root trie.Root) *State {
+ return New(s.db, root)
}
diff --git a/state/stater_test.go b/state/stater_test.go
index fb24f03ac..131b8d8f8 100644
--- a/state/stater_test.go
+++ b/state/stater_test.go
@@ -9,20 +9,17 @@ import (
"testing"
"github.com/vechain/thor/v2/muxdb"
- "github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
)
func TestStater(t *testing.T) {
- db := muxdb.NewMem()
- stater := NewStater(db)
+ stater := NewStater(muxdb.NewMem())
// Example State
- root := thor.Bytes32{}
- blockNum := uint32(1)
- blockConflicts := uint32(0)
- steadyBlockNum := uint32(1)
+ var root trie.Root
+ root.Ver.Major = 1
- state := stater.NewState(root, blockNum, blockConflicts, steadyBlockNum)
+ state := stater.NewState(root)
if state == nil {
t.Errorf("NewState returned nil")
diff --git a/test/datagen/bytes.go b/test/datagen/bytes.go
new file mode 100644
index 000000000..e01e2fece
--- /dev/null
+++ b/test/datagen/bytes.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package datagen
+
+import "crypto/rand"
+
+func RandBytes(n int) []byte {
+ bytes := make([]byte, n)
+ rand.Read(bytes)
+ return bytes
+}
diff --git a/test/datagen/numbers.go b/test/datagen/numbers.go
index 9da0cdc00..b8745c978 100644
--- a/test/datagen/numbers.go
+++ b/test/datagen/numbers.go
@@ -13,6 +13,10 @@ func RandInt() int {
return mathrand.Int() //#nosec G404
}
+func RandUint64() uint64 {
+ return mathrand.Uint64() //#nosec G404
+}
+
func RandIntN(n int) int {
return mathrand.N(n) //#nosec G404
}
diff --git a/test/testchain/chain.go b/test/testchain/chain.go
index b35687d14..bbbe299e5 100644
--- a/test/testchain/chain.go
+++ b/test/testchain/chain.go
@@ -8,9 +8,11 @@ package testchain
import (
"errors"
"fmt"
+ "math/rand/v2"
"slices"
"time"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/vechain/thor/v2/bft"
"github.com/vechain/thor/v2/block"
"github.com/vechain/thor/v2/chain"
@@ -46,6 +48,7 @@ func New(
stater *state.Stater,
genesisBlock *block.Block,
logDB *logdb.LogDB,
+ forkConfig thor.ForkConfig,
) *Chain {
return &Chain{
db: db,
@@ -55,7 +58,7 @@ func New(
stater: stater,
genesisBlock: genesisBlock,
logDB: logDB,
- forkConfig: thor.GetForkConfig(genesisBlock.Header().ID()),
+ forkConfig: forkConfig,
}
}
@@ -87,6 +90,11 @@ func NewIntegrationTestChain() (*Chain, error) {
return nil, err
}
+ forkConfig := thor.NoFork
+ forkConfig.VIP191 = 1
+ forkConfig.BLOCKLIST = 0
+ forkConfig.VIP214 = 2
+
return New(
db,
gene,
@@ -95,6 +103,7 @@ func NewIntegrationTestChain() (*Chain, error) {
stater,
geneBlk,
logDb,
+ thor.NoFork,
), nil
}
@@ -124,6 +133,29 @@ func (c *Chain) MintTransactions(account genesis.DevAccount, transactions ...*tx
return c.MintBlock(account, transactions...)
}
+// MintClauses creates a transaction with the provided clauses and adds it to the blockchain.
+func (c *Chain) MintClauses(account genesis.DevAccount, clauses []*tx.Clause) error {
+ builer := new(tx.Builder).GasPriceCoef(255).
+ BlockRef(tx.NewBlockRef(c.Repo().BestBlockSummary().Header.Number())).
+ Expiration(1000).
+ ChainTag(c.Repo().ChainTag()).
+ Gas(10e6).
+ Nonce(rand.Uint64()) // nolint
+
+ for _, clause := range clauses {
+ builer.Clause(clause)
+ }
+
+ tx := builer.Build()
+ signature, err := crypto.Sign(tx.SigningHash().Bytes(), account.PrivateKey)
+ if err != nil {
+ return fmt.Errorf("unable to sign tx: %w", err)
+ }
+ tx = tx.WithSignature(signature)
+
+ return c.MintBlock(account, tx)
+}
+
// MintBlock creates and finalizes a new block with the given transactions.
// It schedules a new block, adopts transactions, packs them into a block, and commits it to the chain.
func (c *Chain) MintBlock(account genesis.DevAccount, transactions ...*tx.Transaction) error {
@@ -159,15 +191,18 @@ func (c *Chain) MintBlock(account genesis.DevAccount, transactions ...*tx.Transa
}
// Add the block to the repository.
- if err := c.Repo().AddBlock(newBlk, receipts, 0); err != nil {
+ if err := c.Repo().AddBlock(newBlk, receipts, 0, true); err != nil {
return fmt.Errorf("unable to add tx to repo: %w", err)
}
- // Set the new block as the best (latest) block in the repository.
- if err := c.Repo().SetBestBlockID(newBlk.Header().ID()); err != nil {
- return fmt.Errorf("unable to set best block: %w", err)
+ // Write the new block and receipts to the logdb.
+ w := c.LogDB().NewWriter()
+ if err := w.Write(newBlk, receipts); err != nil {
+ return err
+ }
+ if err := w.Commit(); err != nil {
+ return err
}
-
return nil
}
diff --git a/tracers/tracers_test.go b/tracers/tracers_test.go
index bd44a85c0..ddd6db1f6 100644
--- a/tracers/tracers_test.go
+++ b/tracers/tracers_test.go
@@ -37,6 +37,7 @@ import (
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/tracers"
"github.com/vechain/thor/v2/tracers/logger"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
"github.com/vechain/thor/v2/vm"
"github.com/vechain/thor/v2/xenv"
@@ -119,7 +120,7 @@ func RunTracerTest(t *testing.T, data *traceTest, tracerName string) json.RawMes
}
repo, _ := chain.NewRepository(db, gene)
- st := state.New(db, gene.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: gene.Header().StateRoot()})
chain := repo.NewChain(gene.Header().ID())
for addr, account := range data.State {
@@ -368,7 +369,7 @@ func TestInternals(t *testing.T) {
}
repo, _ := chain.NewRepository(db, gene)
- st := state.New(db, gene.Header().StateRoot(), 0, 0, 0)
+ st := state.New(db, trie.Root{Hash: gene.Header().StateRoot()})
chain := repo.NewChain(gene.Header().ID())
st.SetCode(to, tc.code)
diff --git a/trie/derive_root.go b/trie/derive_root.go
index 9f03b1096..3eb9d15ea 100644
--- a/trie/derive_root.go
+++ b/trie/derive_root.go
@@ -5,9 +5,7 @@
package trie
import (
- "bytes"
-
- "github.com/ethereum/go-ethereum/rlp"
+ "github.com/qianbin/drlp"
"github.com/vechain/thor/v2/thor"
)
@@ -19,12 +17,15 @@ type DerivableList interface {
}
func DeriveRoot(list DerivableList) thor.Bytes32 {
- keybuf := new(bytes.Buffer)
- trie := new(Trie)
+ var (
+ trie Trie
+ key []byte
+ )
+
for i := 0; i < list.Len(); i++ {
- keybuf.Reset()
- rlp.Encode(keybuf, uint(i))
- trie.Update(keybuf.Bytes(), list.GetRlp(i))
+ key = drlp.AppendUint(key[:0], uint64(i))
+ trie.Update(key, list.GetRlp(i), nil)
}
+
return trie.Hash()
}
diff --git a/trie/derive_root_test.go b/trie/derive_root_test.go
index 3ade78cc1..5e3a95e90 100644
--- a/trie/derive_root_test.go
+++ b/trie/derive_root_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2024 The VeChainThor developers
+// Copyright (c) 2023 The VeChainThor developers
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
@@ -7,36 +7,23 @@ package trie
import (
"testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/thor"
)
-type MockDerivableList struct {
- Elements [][]byte
+type mockedDerivableList struct {
+ n int
+ content []byte
}
-func (m *MockDerivableList) Len() int {
- return len(m.Elements)
-}
+func (l *mockedDerivableList) Len() int { return l.n }
-func (m *MockDerivableList) GetRlp(i int) []byte {
- if i >= len(m.Elements) {
- return nil
- }
- return m.Elements[i]
-}
+func (l *mockedDerivableList) GetRlp(i int) []byte { return l.content }
-func TestDeriveRoot(t *testing.T) {
- mockList := &MockDerivableList{
- Elements: [][]byte{
- {1, 2, 3, 4},
- {1, 2, 3, 4, 5, 6},
- },
+func BenchmarkDeriveRoot(b *testing.B) {
+ list := mockedDerivableList{
+ n: 100,
+ content: make([]byte, 32),
+ }
+ for i := 0; i < b.N; i++ {
+ DeriveRoot(&list)
}
-
- root := DeriveRoot(mockList)
-
- assert.Equal(t, "0x154227caf1172839284ce29cd6eaaee115af0993d5a5a4a08d9bb19ed18edae7", root.String())
- assert.NotEqual(t, thor.Bytes32{}, root, "The root hash should not be empty")
}
diff --git a/trie/encoding.go b/trie/encoding.go
index 1955a3e66..fa463414b 100644
--- a/trie/encoding.go
+++ b/trie/encoding.go
@@ -51,6 +51,35 @@ func hexToCompact(hex []byte) []byte {
return buf
}
+func compactLen(hex []byte) int {
+ hexLen := len(hex)
+ if hasTerm(hex) {
+ hexLen--
+ }
+ return hexLen/2 + 1
+}
+
+func appendHexToCompact(buf, hex []byte) []byte {
+ terminator := byte(0)
+ if hasTerm(hex) {
+ terminator = 1
+ hex = hex[:len(hex)-1]
+ }
+
+ b0 := terminator << 5 // the flag byte
+ if len(hex)&1 == 1 {
+ b0 |= 1 << 4 // odd flag
+ b0 |= hex[0] // first nibble is contained in the first byte
+ hex = hex[1:]
+ }
+ buf = append(buf, b0)
+
+ for bi, ni := 0, 0; ni < len(hex); bi, ni = bi+1, ni+2 {
+ buf = append(buf, hex[ni]<<4|hex[ni+1])
+ }
+ return buf
+}
+
func compactToHex(compact []byte) []byte {
if len(compact) == 0 {
return compact
diff --git a/trie/encoding_test.go b/trie/encoding_test.go
index 97d8da136..dd019d44f 100644
--- a/trie/encoding_test.go
+++ b/trie/encoding_test.go
@@ -39,6 +39,12 @@ func TestHexCompact(t *testing.T) {
if c := hexToCompact(test.hex); !bytes.Equal(c, test.compact) {
t.Errorf("hexToCompact(%x) -> %x, want %x", test.hex, c, test.compact)
}
+ if c := appendHexToCompact(nil, test.hex); !bytes.Equal(c, test.compact) {
+ t.Errorf("appendHexToCompact(%x) -> %x, want %x", test.hex, c, test.compact)
+ }
+ if l := compactLen(test.hex); l != len(test.compact) {
+ t.Errorf("compactLen(%x) -> %v, want %v", test.hex, l, len(test.compact))
+ }
if h := compactToHex(test.compact); !bytes.Equal(h, test.hex) {
t.Errorf("compactToHex(%x) -> %x, want %x", test.compact, h, test.hex)
}
@@ -82,6 +88,14 @@ func BenchmarkHexToCompact(b *testing.B) {
}
}
+func BenchmarkAppendHexToCompact(b *testing.B) {
+ testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
+ var buf []byte
+ for i := 0; i < b.N; i++ {
+ buf = appendHexToCompact(buf[:0], testBytes)
+ }
+}
+
func BenchmarkCompactToHex(b *testing.B) {
testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/}
for i := 0; i < b.N; i++ {
diff --git a/trie/errors.go b/trie/errors.go
index 92a84d0ef..9815e1f16 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -20,15 +20,15 @@ import (
"fmt"
)
-// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete)
+// MissingNodeError is returned by the trie functions (Get, Update)
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
type MissingNodeError struct {
- NodeHash *hashNode // hash of the missing node
- Path []byte // hex-encoded path to the missing node
- Err error // the actual error
+ Ref refNode // the ref node of the missing node
+ Path []byte // hex-encoded path to the missing node
+ Err error // the actual error
}
func (err *MissingNodeError) Error() string {
- return fmt.Sprintf("missing trie node %v (#%v path %x) reason: %v", err.NodeHash.Hash, err.NodeHash.seq, err.Path, err.Err)
+ return fmt.Sprintf("missing trie node (path %x hash %x #%v) reason: %v", err.Path, err.Ref.hash, err.Ref.ver, err.Err)
}
diff --git a/trie/extended.go b/trie/extended.go
deleted file mode 100644
index 04b9de9ed..000000000
--- a/trie/extended.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import "github.com/vechain/thor/v2/thor"
-
-// ExtendedTrie is an extended Merkle Patricia Trie which supports nodes sequence number
-// and leaf metadata.
-type ExtendedTrie struct {
- trie Trie
- nonCrypto bool
-}
-
-// Node contains the internal node object.
-type Node struct {
- node node
- cacheGen uint16
-}
-
-// Dirty returns if the node is dirty.
-func (n Node) Dirty() bool {
- if n.node != nil {
- _, dirty, _ := n.node.cache()
- return dirty
- }
- return true
-}
-
-// Hash returns the hash of the node. It returns zero hash in case of embedded or not computed.
-func (n Node) Hash() (hash thor.Bytes32) {
- if n.node != nil {
- if h, _, _ := n.node.cache(); h != nil {
- return h.Hash
- }
- }
- return
-}
-
-// SeqNum returns the node's sequence number. 0 is returned if the node is dirty.
-func (n Node) SeqNum() uint64 {
- if n.node != nil {
- return n.node.seqNum()
- }
- return 0
-}
-
-// NewExtended creates an extended trie.
-func NewExtended(root thor.Bytes32, seq uint64, db Database, nonCrypto bool) *ExtendedTrie {
- ext := &ExtendedTrie{trie: Trie{db: db}, nonCrypto: nonCrypto}
- if (root != thor.Bytes32{}) && root != emptyRoot {
- if db == nil {
- panic("trie.NewExtended: cannot use existing root without a database")
- }
- ext.trie.root = &hashNode{Hash: root, seq: seq}
- }
- return ext
-}
-
-// IsNonCrypto returns whether the trie is a non-crypto trie.
-func (e *ExtendedTrie) IsNonCrypto() bool {
- return e.nonCrypto
-}
-
-// NewExtendedCached creates an extended trie with the given root node.
-func NewExtendedCached(rootNode Node, db Database, nonCrypto bool) *ExtendedTrie {
- return &ExtendedTrie{trie: Trie{root: rootNode.node, db: db, cacheGen: rootNode.cacheGen}, nonCrypto: nonCrypto}
-}
-
-// SetCacheTTL sets life time of a cached node.
-func (e *ExtendedTrie) SetCacheTTL(ttl uint16) {
- e.trie.cacheTTL = ttl
-}
-
-// CacheTTL returns the life time of a cached node.
-func (e *ExtendedTrie) CacheTTL() uint16 {
- return e.trie.cacheTTL
-}
-
-// RootNode returns the current root node.
-func (e *ExtendedTrie) RootNode() Node {
- return Node{e.trie.root, e.trie.cacheGen}
-}
-
-// SetRootNode replace the root node with the given one.
-func (e *ExtendedTrie) SetRootNode(root Node) {
- e.trie.root = root.node
- e.trie.cacheGen = root.cacheGen
-}
-
-// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
-// the key after the given start key. It filters out nodes satisfy the filter.
-func (e *ExtendedTrie) NodeIterator(start []byte, filter func(seq uint64) bool) NodeIterator {
- t := &e.trie
- return newNodeIterator(t, start, filter, true, e.nonCrypto)
-}
-
-// Get returns the value and metadata for key stored in the trie.
-// The value and meta bytes must not be modified by the caller.
-// If a node was not found in the database, a MissingNodeError is returned.
-func (e *ExtendedTrie) Get(key []byte) (val, meta []byte, err error) {
- t := &e.trie
-
- value, newroot, err := t.tryGet(t.root, keybytesToHex(key), 0)
- if t.root != newroot {
- t.root = newroot
- }
- if err != nil {
- return nil, nil, err
- }
-
- if value != nil {
- return value.Value, value.meta, nil
- }
- return nil, nil, nil
-}
-
-// Update associates key with value and metadata in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value and meta bytes must not be modified by the caller while they are
-// stored in the trie.
-//
-// If a node was not found in the database, a MissingNodeError is returned.
-func (e *ExtendedTrie) Update(key, value, meta []byte) error {
- t := &e.trie
-
- k := keybytesToHex(key)
- if len(value) != 0 {
- _, n, err := t.insert(t.root, nil, k, &valueNode{Value: value, meta: meta})
- if err != nil {
- return err
- }
- t.root = n
- } else {
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- }
- return nil
-}
-
-// Hash returns the root hash of the trie. It does not write to the
-// database and can be used even if the trie doesn't have one.
-func (e *ExtendedTrie) Hash() thor.Bytes32 {
- t := &e.trie
- return t.Hash()
-}
-
-// Commit writes all nodes with the given sequence number to the trie's database.
-//
-// Committing flushes nodes from memory.
-// Subsequent Get calls will load nodes from the database.
-func (e *ExtendedTrie) Commit(seq uint64) (root thor.Bytes32, err error) {
- t := &e.trie
- if t.db == nil {
- panic("Commit called on trie with nil database")
- }
- return e.CommitTo(t.db, seq)
-}
-
-// CommitTo writes all nodes with the given sequence number to the given database.
-//
-// Committing flushes nodes from memory. Subsequent Get calls will
-// load nodes from the trie's database. Calling code must ensure that
-// the changes made to db are written back to the trie's attached
-// database before using the trie.
-func (e *ExtendedTrie) CommitTo(db DatabaseWriter, seq uint64) (root thor.Bytes32, err error) {
- t := &e.trie
- // ext trie always stores the root node even not changed. so here have to
- // resolve it (since ext trie lazily resolve the root node when initializing).
- if root, ok := t.root.(*hashNode); ok {
- rootnode, err := t.resolveHash(root, nil)
- if err != nil {
- return thor.Bytes32{}, err
- }
- t.root = rootnode
- }
- hash, cached, err := e.hashRoot(db, seq)
- if err != nil {
- return thor.Bytes32{}, err
- }
- t.root = cached
- t.cacheGen++
- return hash.(*hashNode).Hash, nil
-}
-
-func (e *ExtendedTrie) hashRoot(db DatabaseWriter, seq uint64) (node, node, error) {
- t := &e.trie
- if t.root == nil {
- return &hashNode{Hash: emptyRoot}, nil, nil
- }
- h := newHasherExtended(t.cacheGen, t.cacheTTL, seq, e.nonCrypto)
- defer returnHasherToPool(h)
- return h.hash(t.root, db, nil, true)
-}
diff --git a/trie/fast_node_encoder.go b/trie/fast_node_encoder.go
deleted file mode 100644
index f67f5b52a..000000000
--- a/trie/fast_node_encoder.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2021 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package trie
-
-import (
- "github.com/vechain/thor/v2/lowrlp"
-)
-
-// implements node.encode and node.encodeTrailing
-
-func (n *fullNode) encode(e *lowrlp.Encoder, nonCrypto bool) {
- off := e.List()
- for _, c := range n.Children {
- if c != nil {
- c.encode(e, nonCrypto)
- } else {
- e.EncodeEmptyString()
- }
- }
- e.ListEnd(off)
-}
-
-func (n *fullNode) encodeTrailing(e *lowrlp.Encoder) {
- for _, c := range n.Children {
- if c != nil {
- c.encodeTrailing(e)
- }
- }
-}
-
-func (n *shortNode) encode(e *lowrlp.Encoder, nonCrypto bool) {
- off := e.List()
- e.EncodeString(n.Key)
- if n.Val != nil {
- n.Val.encode(e, nonCrypto)
- } else {
- e.EncodeEmptyString()
- }
- e.ListEnd(off)
-}
-
-func (n *shortNode) encodeTrailing(e *lowrlp.Encoder) {
- if n.Val != nil {
- n.Val.encodeTrailing(e)
- }
-}
-
-func (n *hashNode) encode(e *lowrlp.Encoder, nonCrypto bool) {
- if nonCrypto {
- e.EncodeString(nonCryptoNodeHashPlaceholder)
- } else {
- e.EncodeString(n.Hash[:])
- }
-}
-
-func (n *hashNode) encodeTrailing(e *lowrlp.Encoder) {
- e.EncodeUint(n.seq)
-}
-
-func (n *valueNode) encode(e *lowrlp.Encoder, _ bool) {
- e.EncodeString(n.Value)
-}
-
-func (n *valueNode) encodeTrailing(e *lowrlp.Encoder) {
- if len(n.Value) > 0 {
- e.EncodeString(n.meta)
- }
-}
diff --git a/trie/hasher.go b/trie/hasher.go
index 1b1bb384f..66ee01256 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -17,226 +17,148 @@
package trie
import (
+ "fmt"
"sync"
- "github.com/ethereum/go-ethereum/common"
- "github.com/vechain/thor/v2/lowrlp"
"github.com/vechain/thor/v2/thor"
)
type hasher struct {
- enc lowrlp.Encoder
- tmp sliceBuffer
- cacheGen uint16
- cacheTTL uint16
-
- extended bool
- seq uint64
- nonCrypto bool
-}
-
-type sliceBuffer []byte
+ buf []byte
-func (b *sliceBuffer) Write(data []byte) (n int, err error) {
- *b = append(*b, data...)
- return len(data), nil
-}
-
-func (b *sliceBuffer) Reset() {
- *b = (*b)[:0]
+ // parameters for storing nodes
+ newVer Version
+ cacheTTL uint16
+ skipHash bool
}
-// hashers live in a global pool.
+// cache hashers
var hasherPool = sync.Pool{
- New: func() interface{} {
- return &hasher{
- tmp: make(sliceBuffer, 0, 700), // cap is as large as a full fullNode.
- }
+ New: func() any {
+ return &hasher{}
},
}
-func newHasher(cacheGen, cacheTTL uint16) *hasher {
- h := hasherPool.Get().(*hasher)
- h.cacheGen = cacheGen
- h.cacheTTL = cacheTTL
- h.extended = false
- h.seq = 0
- h.nonCrypto = false
- return h
-}
-
-func newHasherExtended(cacheGen, cacheTTL uint16, seq uint64, nonCrypto bool) *hasher {
- h := hasherPool.Get().(*hasher)
- h.cacheGen = cacheGen
- h.cacheTTL = cacheTTL
- h.extended = true
- h.seq = seq
- h.nonCrypto = nonCrypto
- return h
-}
-
-func returnHasherToPool(h *hasher) {
- hasherPool.Put(h)
-}
-
-// hash collapses a node down into a hash node, also returning a copy of the
-// original node initialized with the computed hash to replace the original one.
-func (h *hasher) hash(n node, db DatabaseWriter, path []byte, force bool) (node, node, error) {
- // If we're not storing the node, just hashing, use available cached data
- if hash, dirty, gen := n.cache(); hash != nil {
- if db == nil {
- return hash, n, nil
+// hash computes and returns the hash of n.
+// If force is true, the node is always hashed even smaller than 32 bytes.
+func (h *hasher) hash(n node, force bool) []byte {
+ switch n := n.(type) {
+ case *fullNode:
+ // already hashed
+ if hash := n.flags.ref.hash; hash != nil {
+ return hash
}
-
- if !dirty {
- if !force { // non-root node
- if h.cacheGen-gen > h.cacheTTL { // drop cached nodes exceeds life-time
- return hash, hash, nil
- }
- return hash, n, nil
+ // hash all children
+ for i := 0; i < 16; i++ {
+ if cn := n.children[i]; cn != nil {
+ h.hash(cn, false)
}
+ }
- if !h.extended {
- return hash, n, nil
- }
- // else for extended trie, always store root node regardless of dirty flag
+ h.buf = n.encodeConsensus(h.buf[:0])
+ if len(h.buf) >= 32 || force {
+ n.flags.ref.hash = thor.Blake2b(h.buf).Bytes()
+ return n.flags.ref.hash
}
- }
- // Trie not processed yet or needs storage, walk the children
- collapsed, cached, err := h.hashChildren(n, db, path)
- if err != nil {
- return nil, n, err
- }
- hashed, err := h.store(collapsed, db, path, force)
- if err != nil {
- return nil, n, err
- }
- // Cache the hash of the node for later reuse and remove
- // the dirty flag in commit mode. It's fine to assign these values directly
- // without copying the node first because hashChildren copies it.
- cachedHash, _ := hashed.(*hashNode)
- switch cn := cached.(type) {
+ return nil
case *shortNode:
- cn.flags.hash = cachedHash
- if db != nil {
- cn.flags.dirty = false
+ // already hashed
+ if hash := n.flags.ref.hash; hash != nil {
+ return hash
}
- case *fullNode:
- cn.flags.hash = cachedHash
- if db != nil {
- cn.flags.dirty = false
+
+ // hash child node
+ h.hash(n.child, false)
+
+ h.buf = n.encodeConsensus(h.buf[:0])
+ if len(h.buf) >= 32 || force {
+ n.flags.ref.hash = thor.Blake2b(h.buf).Bytes()
+ return n.flags.ref.hash
}
+ return nil
+ case *refNode:
+ return n.hash
+ case *valueNode:
+ return nil
+ default:
+ panic(fmt.Sprintf("hash %T: unexpected node: %v", n, n))
}
- return hashed, cached, nil
}
-// hashChildren replaces the children of a node with their hashes if the encoded
-// size of the child is larger than a hash, returning the collapsed node as well
-// as a replacement for the original node with the child hashes cached in.
-func (h *hasher) hashChildren(original node, db DatabaseWriter, path []byte) (node, node, error) {
- var err error
-
- switch n := original.(type) {
- case *shortNode:
- // Hash the short node's child, caching the newly hashed subtree
- collapsed, cached := n.copy(), n.copy()
- collapsed.Key = hexToCompact(n.Key)
- cached.Key = common.CopyBytes(n.Key)
-
- if _, ok := n.Val.(*valueNode); !ok {
- collapsed.Val, cached.Val, err = h.hash(n.Val, db, append(path, n.Key...), false)
- if err != nil {
- return original, original, err
- }
- }
- // no need when using frlp
- // if collapsed.Val == nil {
- // collapsed.Val = &valueNode{} // Ensure that nil children are encoded as empty strings.
- // }
- return collapsed, cached, nil
+// store stores node n and all its dirty sub nodes.
+// Root node is always stored regardless of its dirty flag.
+func (h *hasher) store(n node, db DatabaseWriter, path []byte) (node, error) {
+ isRoot := len(path) == 0
+ switch n := n.(type) {
case *fullNode:
- // Hash the full node's children, caching the newly hashed subtrees
- collapsed, cached := n.copy(), n.copy()
-
+ n = n.copy()
for i := 0; i < 16; i++ {
- if n.Children[i] != nil {
- collapsed.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, append(path, byte(i)), false)
- if err != nil {
- return original, original, err
+ cn := n.children[i]
+ switch cn := cn.(type) {
+ case *fullNode, *shortNode:
+ // store the child node if dirty
+ if ref, gen, dirty := cn.cache(); dirty {
+ nn, err := h.store(cn, db, append(path, byte(i)))
+ if err != nil {
+ return nil, err
+ }
+ n.children[i] = nn
+ } else {
+ // drop the cached node by replacing with its ref node when ttl reached
+ if n.flags.gen-gen > h.cacheTTL {
+ n.children[i] = &ref
+ }
}
}
- // no need when using frlp
- // else {
- // collapsed.Children[i] = &valueNode{} // Ensure that nil children are encoded as empty strings.
- // }
}
- // no need when using frlp
- // if collapsed.Children[16] == nil {
- // collapsed.Children[16] = &valueNode{}
- // }
- return collapsed, cached, nil
-
- default:
- // Value and hash nodes don't have children so they're left as were
- return n, original, nil
- }
-}
-func (h *hasher) store(n node, db DatabaseWriter, path []byte, force bool) (node, error) {
- // Don't store hashes or empty nodes.
- if _, isHash := n.(*hashNode); n == nil || isHash {
- return n, nil
- }
- // Generate the RLP encoding of the node
- h.enc.Reset()
- n.encode(&h.enc, h.nonCrypto)
- h.tmp.Reset()
- h.enc.ToWriter(&h.tmp)
-
- if h.nonCrypto {
- // fullnode and shortnode with non-value child are forced
- // just like normal trie.
- switch n := n.(type) {
- case *fullNode:
- force = true
- case *shortNode:
- if _, ok := n.Val.(*valueNode); !ok {
- force = true
+ // full node is stored in case of
+ // 1. it's the root node
+ // 2. it has hash value
+ // 3. hash is being skipped
+ if isRoot || n.flags.ref.hash != nil || h.skipHash {
+ h.buf = n.encode(h.buf[:0], h.skipHash)
+ if err := db.Put(path, h.newVer, h.buf); err != nil {
+ return nil, err
}
+ n.flags.dirty = false
+ n.flags.ref.ver = h.newVer
}
- }
-
- if len(h.tmp) < 32 && !force {
- return n, nil // Nodes smaller than 32 bytes are stored inside their parent
- }
- // Larger nodes are replaced by their hash and stored in the database.
- hash, _, _ := n.cache()
- if hash == nil {
- hash = &hashNode{}
- if h.nonCrypto {
- hash.Hash = NonCryptoNodeHash
- } else {
- hash.Hash = thor.Blake2b(h.tmp)
- }
- } else {
- cpy := *hash
- hash = &cpy
- }
- if db != nil {
- // extended
- if h.extended {
- h.enc.Reset()
- n.encodeTrailing(&h.enc)
- h.enc.ToWriter(&h.tmp)
- hash.seq = h.seq
+ return n, nil
+ case *shortNode:
+ n = n.copy()
+ switch cn := n.child.(type) {
+ case *fullNode, *shortNode:
+ if ref, gen, dirty := cn.cache(); dirty {
+ // store the child node if dirty
+ nn, err := h.store(cn, db, append(path, n.key...))
+ if err != nil {
+ return nil, err
+ }
+ n.child = nn
+ } else {
+ // drop the cached node by replacing with its ref node when ttl reached
+ if n.flags.gen-gen > h.cacheTTL {
+ n.child = &ref
+ }
+ }
}
- key := hash.Hash[:]
- if ke, ok := db.(DatabaseKeyEncoder); ok {
- key = ke.Encode(hash.Hash[:], h.seq, path)
+ // Here is the very significant improvement compared to maindb-v3. A short-node is embedded
+ // in its parent node whenever possible. Doing so can save about 30% storage space for a pruned trie.
+ //
+ // While for a hash-skipped trie, short-nodes are always stored as standalone nodes.
+ if isRoot || h.skipHash {
+ h.buf = n.encode(h.buf[:0], h.skipHash)
+ if err := db.Put(path, h.newVer, h.buf); err != nil {
+ return nil, err
+ }
+ n.flags.dirty = false
+ n.flags.ref.ver = h.newVer
}
- return hash, db.Put(key, h.tmp)
+ return n, nil
+ default:
+ panic(fmt.Sprintf("store %T: unexpected node: %v", n, n))
}
- return hash, nil
}
diff --git a/trie/iterator.go b/trie/iterator.go
index a27702f46..71f7f963a 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -18,10 +18,7 @@ package trie
import (
"bytes"
- "container/heap"
"errors"
-
- "github.com/vechain/thor/v2/thor"
)
// Iterator is a key-value trie iterator that traverses a Trie.
@@ -60,12 +57,6 @@ func (it *Iterator) Next() bool {
return false
}
-// Prove generates the Merkle proof for the leaf node the iterator is currently
-// positioned on.
-func (it *Iterator) Prove() [][]byte {
- return it.nodeIt.LeafProof()
-}
-
// Leaf presents the leaf node.
type Leaf struct {
Value []byte
@@ -81,18 +72,9 @@ type NodeIterator interface {
// Error returns the error status of the iterator.
Error() error
- // Hash returns the hash of the current node.
- Hash() thor.Bytes32
-
- // Node calls the handler with the blob of the current node if any.
- Node(handler func(blob []byte) error) error
-
- // SeqNum returns the sequence number of the current node.
- SeqNum() uint64
-
- // Parent returns the hash of the parent of the current node. The hash may be the one
- // grandparent if the immediate parent is an internal node with no hash.
- Parent() thor.Bytes32
+ // Blob returns the encoded blob and version num of the current node.
+ // If the current node is not stored as standalone node, the returned blob has zero length.
+ Blob() ([]byte, Version, error)
// Path returns the hex-encoded path to the current node.
// Callers must not retain references to the return value after calling Next.
@@ -106,31 +88,23 @@ type NodeIterator interface {
// positioned at a leaf. Callers must not retain references to the value after
// calling Next.
LeafKey() []byte
-
- // LeafProof returns the Merkle proof of the leaf. The method panics if the
- // iterator is not positioned at a leaf. Callers must not retain references
- // to the value after calling Next.
- LeafProof() [][]byte
}
// nodeIteratorState represents the iteration state at one particular node of the
// trie, which can be resumed at a later invocation.
type nodeIteratorState struct {
- hash thor.Bytes32 // Hash of the node being iterated (nil if not standalone)
- node node // Trie node being iterated
- parent thor.Bytes32 // Hash of the first full ancestor node (nil if current is the root)
- index int // Child to be processed next
- pathlen int // Length of the path to this node
+ node node // Trie node being iterated
+ index int // Child to be processed next
+ pathlen int // Length of the path to this node
+ blob []byte // Encoded blob of the node
}
type nodeIterator struct {
- trie *Trie // Trie being iterated
- stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state
- path []byte // Path to the current node
- err error // Failure set in case of an internal error in the iterator
- filter func(seq uint64) bool // The filter to filter iterated nodes.
- extended bool // If the trie is extended.
- nonCrypto bool // If the trie is non-crypto.
+ trie *Trie // Trie being iterated
+ stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state
+ path []byte // Path to the current node
+ err error // Failure set in case of an internal error in the iterator
+ minVer Version // Skips nodes whose version lower than minVer
}
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
@@ -146,73 +120,43 @@ func (e seekError) Error() string {
return "seek error: " + e.err.Error()
}
-func newNodeIterator(trie *Trie, start []byte, filter func(seq uint64) bool, extended, nonCrypto bool) NodeIterator {
- if trie.Hash() == emptyState {
- return new(nodeIterator)
- }
+func newNodeIterator(trie *Trie, start []byte, min Version) NodeIterator {
it := &nodeIterator{
- trie: trie,
- filter: filter,
- extended: extended,
- nonCrypto: nonCrypto,
+ trie: trie,
+ minVer: min,
}
it.err = it.seek(start)
return it
}
-func (it *nodeIterator) Hash() thor.Bytes32 {
+func (it *nodeIterator) Blob() (blob []byte, ver Version, err error) {
if len(it.stack) == 0 {
- return thor.Bytes32{}
- }
- return it.stack[len(it.stack)-1].hash
-}
-
-func (it *nodeIterator) Node(handler func(blob []byte) error) error {
- if len(it.stack) == 0 {
- return nil
+ return nil, Version{}, nil
}
st := it.stack[len(it.stack)-1]
- if st.hash.IsZero() {
- return nil
+ ref, _, dirty := st.node.cache()
+ // dirty node has no blob
+ if dirty {
+ return
}
- h := newHasher(0, 0)
- h.extended = it.extended
- h.nonCrypto = it.nonCrypto
- defer returnHasherToPool(h)
-
- collapsed, _, _ := h.hashChildren(st.node, nil, it.path)
-
- h.enc.Reset()
- collapsed.encode(&h.enc, h.nonCrypto)
- if it.extended {
- collapsed.encodeTrailing(&h.enc)
+ if len(st.blob) > 0 {
+ blob, ver = st.blob, ref.ver
+ return
}
- h.tmp.Reset()
- h.enc.ToWriter(&h.tmp)
- return handler(h.tmp)
-}
-
-func (it *nodeIterator) SeqNum() uint64 {
- for i := len(it.stack) - 1; i >= 0; i-- {
- if st := it.stack[i]; !st.hash.IsZero() {
- return st.node.seqNum()
- }
- }
- return 0
-}
-func (it *nodeIterator) Parent() thor.Bytes32 {
- if len(it.stack) == 0 {
- return thor.Bytes32{}
+ // load from db
+ if blob, err = it.trie.db.Get(it.path, ref.ver); err != nil {
+ return
}
- return it.stack[len(it.stack)-1].parent
+ st.blob, ver = blob, ref.ver
+ return
}
func (it *nodeIterator) Leaf() *Leaf {
if len(it.stack) > 0 {
- if node, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
- return &Leaf{node.Value, node.meta}
+ if vn, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
+ return &Leaf{Value: vn.val, Meta: vn.meta}
}
}
return nil
@@ -227,30 +171,6 @@ func (it *nodeIterator) LeafKey() []byte {
panic("not at leaf")
}
-func (it *nodeIterator) LeafProof() [][]byte {
- if len(it.stack) > 0 {
- if _, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
- hasher := newHasher(0, 0)
- defer returnHasherToPool(hasher)
-
- proofs := make([][]byte, 0, len(it.stack))
-
- for i, item := range it.stack[:len(it.stack)-1] {
- // Gather nodes that end up as hash nodes (or the root)
- node, _, _ := hasher.hashChildren(item.node, nil, nil)
- hashed, _ := hasher.store(node, nil, nil, false)
- if _, ok := hashed.(*hashNode); ok || i == 0 {
- hasher.enc.Reset()
- node.encode(&hasher.enc, hasher.nonCrypto)
- proofs = append(proofs, hasher.enc.ToBytes())
- }
- }
- return proofs
- }
- }
- panic("not at leaf")
-}
-
func (it *nodeIterator) Path() []byte {
return it.path
}
@@ -309,19 +229,21 @@ func (it *nodeIterator) seek(prefix []byte) error {
// peek creates the next state of the iterator.
func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) {
if len(it.stack) == 0 {
- if n := it.trie.root; n != nil {
- if !it.filter(n.seqNum()) {
+ n := it.trie.root
+ if n == nil {
+ return nil, nil, nil, errIteratorEnd
+ }
+ if ref, _, dirty := n.cache(); !dirty {
+ if ref.ver.Compare(it.minVer) < 0 {
return nil, nil, nil, errIteratorEnd
}
}
// Initialize the iterator if we've just started.
- root := it.trie.Hash()
state := &nodeIteratorState{node: it.trie.root, index: -1}
- if root != emptyRoot {
- state.hash = root
+ if err := state.resolve(it.trie, nil); err != nil {
+ return nil, nil, nil, err
}
- err := state.resolve(it.trie, nil)
- return state, nil, nil, err
+ return state, nil, nil, nil
}
if !descend {
// If we're skipping children, pop the current node first
@@ -331,11 +253,7 @@ func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, er
// Continue iteration to the next child
for len(it.stack) > 0 {
parent := it.stack[len(it.stack)-1]
- ancestor := parent.hash
- if (ancestor == thor.Bytes32{}) {
- ancestor = parent.parent
- }
- state, path, ok := it.nextChild(parent, ancestor)
+ state, path, ok := it.nextChild(parent)
if ok {
if err := state.resolve(it.trie, path); err != nil {
return parent, &parent.index, path, err
@@ -349,41 +267,35 @@ func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, er
}
func (st *nodeIteratorState) resolve(tr *Trie, path []byte) error {
- if hash, ok := st.node.(*hashNode); ok {
- resolved, err := tr.resolveHash(hash, path)
+ if ref, ok := st.node.(*refNode); ok {
+ blob, err := tr.db.Get(path, ref.ver)
if err != nil {
- return err
+ return &MissingNodeError{Ref: *ref, Path: path, Err: err}
}
- st.node = resolved
- st.hash = hash.Hash
+ st.blob = blob
+ st.node = mustDecodeNode(ref, blob, 0)
}
return nil
}
-func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor thor.Bytes32) (*nodeIteratorState, []byte, bool) {
+func (it *nodeIterator) nextChild(parent *nodeIteratorState) (*nodeIteratorState, []byte, bool) {
switch node := parent.node.(type) {
case *fullNode:
// Full node, move to the first non-nil child.
- for i := parent.index + 1; i < len(node.Children); i++ {
- child := node.Children[i]
- if child != nil {
- hash, _, _ := child.cache()
- if _, ok := child.(*hashNode); ok || hash != nil {
- if !it.filter(child.seqNum()) {
+ for i := parent.index + 1; i < len(node.children); i++ {
+ if child := node.children[i]; child != nil {
+ if ref, _, dirty := child.cache(); !dirty {
+ if ref.ver.Compare(it.minVer) < 0 {
continue
}
}
state := &nodeIteratorState{
node: child,
- parent: ancestor,
index: -1,
pathlen: len(it.path),
}
- if hash != nil {
- state.hash = hash.Hash
- }
parent.index = i - 1
return state, append(it.path, byte(i)), true
}
@@ -391,25 +303,18 @@ func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor thor.Bytes
case *shortNode:
// Short node, return the pointer singleton child
if parent.index < 0 {
- hash, _, _ := node.Val.cache()
-
- if _, ok := node.Val.(*hashNode); ok || hash != nil {
- if !it.filter(node.Val.seqNum()) {
+ if ref, _, dirty := node.child.cache(); !dirty {
+ if ref.ver.Compare(it.minVer) < 0 {
break
}
}
state := &nodeIteratorState{
- node: node.Val,
- parent: ancestor,
+ node: node.child,
index: -1,
pathlen: len(it.path),
}
-
- if hash != nil {
- state.hash = hash.Hash
- }
- return state, append(it.path, node.Key...), true
+ return state, append(it.path, node.key...), true
}
}
return parent, it.path, false
@@ -428,237 +333,3 @@ func (it *nodeIterator) pop() {
it.path = it.path[:parent.pathlen]
it.stack = it.stack[:len(it.stack)-1]
}
-
-func compareNodes(a, b NodeIterator) int {
- if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 {
- return cmp
- }
-
- aLeaf := a.Leaf()
- bLeaf := b.Leaf()
-
- if aLeaf != nil && bLeaf == nil {
- return -1
- } else if bLeaf != nil && aLeaf == nil {
- return 1
- }
- if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 {
- return cmp
- }
- if aLeaf != nil && bLeaf != nil {
- return bytes.Compare(aLeaf.Value, bLeaf.Value)
- }
- return 0
-}
-
-type differenceIterator struct {
- a, b NodeIterator // Nodes returned are those in b - a.
- eof bool // Indicates a has run out of elements
- count int // Number of nodes scanned on either trie
-}
-
-// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that
-// are not in a. Returns the iterator, and a pointer to an integer recording the number
-// of nodes seen.
-func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) {
- a.Next(true)
- it := &differenceIterator{
- a: a,
- b: b,
- }
- return it, &it.count
-}
-
-func (it *differenceIterator) Hash() thor.Bytes32 {
- return it.b.Hash()
-}
-
-func (it *differenceIterator) Node(handler func(blob []byte) error) error {
- return it.b.Node(handler)
-}
-
-func (it *differenceIterator) SeqNum() uint64 {
- return it.b.SeqNum()
-}
-
-func (it *differenceIterator) Parent() thor.Bytes32 {
- return it.b.Parent()
-}
-
-func (it *differenceIterator) Leaf() *Leaf {
- return it.b.Leaf()
-}
-
-func (it *differenceIterator) LeafKey() []byte {
- return it.b.LeafKey()
-}
-
-func (it *differenceIterator) LeafProof() [][]byte {
- return it.b.LeafProof()
-}
-
-func (it *differenceIterator) Path() []byte {
- return it.b.Path()
-}
-
-func (it *differenceIterator) Next(bool) bool {
- // Invariants:
- // - We always advance at least one element in b.
- // - At the start of this function, a's path is lexically greater than b's.
- if !it.b.Next(true) {
- return false
- }
- it.count++
-
- if it.eof {
- // a has reached eof, so we just return all elements from b
- return true
- }
-
- for {
- switch compareNodes(it.a, it.b) {
- case -1:
- // b jumped past a; advance a
- if !it.a.Next(true) {
- it.eof = true
- return true
- }
- it.count++
- case 1:
- // b is before a
- return true
- case 0:
- // a and b are identical; skip this whole subtree if the nodes have hashes
- hasHash := it.a.Hash() == thor.Bytes32{}
- if !it.b.Next(hasHash) {
- return false
- }
- it.count++
- if !it.a.Next(hasHash) {
- it.eof = true
- return true
- }
- it.count++
- }
- }
-}
-
-func (it *differenceIterator) Error() error {
- if err := it.a.Error(); err != nil {
- return err
- }
- return it.b.Error()
-}
-
-type nodeIteratorHeap []NodeIterator
-
-func (h nodeIteratorHeap) Len() int { return len(h) }
-func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 }
-func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) }
-func (h *nodeIteratorHeap) Pop() interface{} {
- n := len(*h)
- x := (*h)[n-1]
- *h = (*h)[0 : n-1]
- return x
-}
-
-type unionIterator struct {
- items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators
- count int // Number of nodes scanned across all tries
-}
-
-// NewUnionIterator constructs a NodeIterator that iterates over elements in the union
-// of the provided NodeIterators. Returns the iterator, and a pointer to an integer
-// recording the number of nodes visited.
-func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) {
- h := make(nodeIteratorHeap, len(iters))
- copy(h, iters)
- heap.Init(&h)
-
- ui := &unionIterator{items: &h}
- return ui, &ui.count
-}
-
-func (it *unionIterator) Hash() thor.Bytes32 {
- return (*it.items)[0].Hash()
-}
-
-func (it *unionIterator) Node(handler func(blob []byte) error) error {
- return (*it.items)[0].Node(handler)
-}
-
-func (it *unionIterator) SeqNum() uint64 {
- return (*it.items)[0].SeqNum()
-}
-
-func (it *unionIterator) Parent() thor.Bytes32 {
- return (*it.items)[0].Parent()
-}
-
-func (it *unionIterator) Leaf() *Leaf {
- return (*it.items)[0].Leaf()
-}
-
-func (it *unionIterator) LeafKey() []byte {
- return (*it.items)[0].LeafKey()
-}
-
-func (it *unionIterator) LeafProof() [][]byte {
- return (*it.items)[0].LeafProof()
-}
-
-func (it *unionIterator) Path() []byte {
- return (*it.items)[0].Path()
-}
-
-// Next returns the next node in the union of tries being iterated over.
-//
-// It does this by maintaining a heap of iterators, sorted by the iteration
-// order of their next elements, with one entry for each source trie. Each
-// time Next() is called, it takes the least element from the heap to return,
-// advancing any other iterators that also point to that same element. These
-// iterators are called with descend=false, since we know that any nodes under
-// these nodes will also be duplicates, found in the currently selected iterator.
-// Whenever an iterator is advanced, it is pushed back into the heap if it still
-// has elements remaining.
-//
-// In the case that descend=false - eg, we're asked to ignore all subnodes of the
-// current node - we also advance any iterators in the heap that have the current
-// path as a prefix.
-func (it *unionIterator) Next(descend bool) bool {
- if len(*it.items) == 0 {
- return false
- }
-
- // Get the next key from the union
- least := heap.Pop(it.items).(NodeIterator)
-
- // Skip over other nodes as long as they're identical, or, if we're not descending, as
- // long as they have the same prefix as the current node.
- for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) {
- skipped := heap.Pop(it.items).(NodeIterator)
- // Skip the whole subtree if the nodes have hashes; otherwise just skip this node
- if skipped.Next(skipped.Hash() == thor.Bytes32{}) {
- it.count++
- // If there are more elements, push the iterator back on the heap
- heap.Push(it.items, skipped)
- }
- }
-
- if least.Next(descend) {
- it.count++
- heap.Push(it.items, least)
- }
-
- return len(*it.items) > 0
-}
-
-func (it *unionIterator) Error() error {
- for i := 0; i < len(*it.items); i++ {
- if err := (*it.items)[i].Error(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index bddc99287..5da338417 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -18,22 +18,19 @@ package trie
import (
"bytes"
- "encoding/hex"
"fmt"
"math/rand/v2"
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
"github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/thor"
)
// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
+func makeTestTrie() (*memdb, *Trie, map[string][]byte) {
// Create an empty trie
- db := ethdb.NewMemDatabase()
- trie, _ := New(thor.Bytes32{}, db)
+ db := newMemDatabase()
+ trie := New(Root{}, db)
// Fill it with some arbitrary data
content := make(map[string][]byte)
@@ -41,27 +38,28 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) {
// Map the same data under multiple keys
key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.Update(key, val, nil)
key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.Update(key, val, nil)
// Add some other data to inflate the trie
for j := byte(3); j < 13; j++ {
key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i}
content[string(key)] = val
- trie.Update(key, val)
+ trie.Update(key, val, nil)
}
}
- trie.Commit()
+
+ trie.Commit(db, Version{Major: 1}, false)
// Return the generated trie
return db, trie, content
}
func TestIterator(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -74,12 +72,13 @@ func TestIterator(t *testing.T) {
all := make(map[string]string)
for _, val := range vals {
all[val.k] = val.v
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.Update([]byte(val.k), []byte(val.v), nil)
}
- trie.Commit()
+ db := newMemDatabase()
+ trie.Commit(db, Version{}, false)
found := make(map[string]string)
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.NodeIterator(nil, Version{}))
for it.Next() {
found[string(it.Key)] = string(it.Value)
}
@@ -97,19 +96,19 @@ type kv struct {
}
func TestIteratorLargeData(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
- trie.Update(value2.k, value2.v)
+ trie.Update(value.k, value.v, nil)
+ trie.Update(value2.k, value2.v, nil)
vals[string(value.k)] = value
vals[string(value2.k)] = value2
}
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.NodeIterator(nil, Version{}))
for it.Next() {
vals[string(it.Key)].t = true
}
@@ -134,21 +133,22 @@ func TestNodeIteratorCoverage(t *testing.T) {
// Create some arbitrary test trie to iterate
db, trie, _ := makeTestTrie()
- // Gather all the node hashes found by the iterator
- hashes := make(map[thor.Bytes32]struct{})
- for it := trie.NodeIterator(nil); it.Next(true); {
- if it.Hash() != (thor.Bytes32{}) {
- hashes[it.Hash()] = struct{}{}
+ // Gather all the node storage key found by the iterator
+ keys := make(map[string]struct{})
+ for it := trie.NodeIterator(nil, Version{}); it.Next(true); {
+ blob, ver, _ := it.Blob()
+ if len(blob) > 0 {
+ keys[string(makeKey(it.Path(), ver))] = struct{}{}
}
}
// Cross check the hashes and the database itself
- for hash := range hashes {
- if _, err := db.Get(hash.Bytes()); err != nil {
- t.Errorf("failed to retrieve reported node %x: %v", hash, err)
+ for key := range keys {
+ if _, err := db.db.Get([]byte(key)); err != nil {
+ t.Errorf("failed to retrieve reported node %x: %v", key, err)
}
}
- for _, key := range db.(*ethdb.MemDatabase).Keys() {
- if _, ok := hashes[thor.BytesToBytes32(key)]; !ok {
+ for _, key := range db.db.Keys() {
+ if _, ok := keys[string(key)]; !ok {
t.Errorf("state entry not reported %x", key)
}
}
@@ -180,25 +180,25 @@ var testdata2 = []kvs{
}
func TestIteratorSeek(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
for _, val := range testdata1 {
- trie.Update([]byte(val.k), []byte(val.v))
+ trie.Update([]byte(val.k), []byte(val.v), nil)
}
// Seek to the middle.
- it := NewIterator(trie.NodeIterator([]byte("fab")))
+ it := NewIterator(trie.NodeIterator([]byte("fab"), Version{}))
if err := checkIteratorOrder(testdata1[4:], it); err != nil {
t.Fatal(err)
}
// Seek to a non-existent key.
- it = NewIterator(trie.NodeIterator([]byte("barc")))
+ it = NewIterator(trie.NodeIterator([]byte("barc"), Version{}))
if err := checkIteratorOrder(testdata1[1:], it); err != nil {
t.Fatal(err)
}
// Seek beyond the end.
- it = NewIterator(trie.NodeIterator([]byte("z")))
+ it = NewIterator(trie.NodeIterator([]byte("z"), Version{}))
if err := checkIteratorOrder(nil, it); err != nil {
t.Fatal(err)
}
@@ -220,136 +220,55 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
return nil
}
-func TestDifferenceIterator(t *testing.T) {
- triea := newEmpty()
- for _, val := range testdata1 {
- triea.Update([]byte(val.k), []byte(val.v))
- }
- triea.Commit()
-
- trieb := newEmpty()
- for _, val := range testdata2 {
- trieb.Update([]byte(val.k), []byte(val.v))
- }
- trieb.Commit()
-
- found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
- it := NewIterator(di)
- for it.Next() {
- found[string(it.Key)] = string(it.Value)
- }
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "bd"},
- {"bars", "be"},
- {"jars", "d"},
- }
- for _, item := range all {
- if found[item.k] != item.v {
- t.Errorf("iterator value mismatch for %s: got %v want %v", item.k, found[item.k], item.v)
- }
- }
- if len(found) != len(all) {
- t.Errorf("iterator count mismatch: got %d values, want %d", len(found), len(all))
- }
-}
-
-func TestUnionIterator(t *testing.T) {
- triea := newEmpty()
- for _, val := range testdata1 {
- triea.Update([]byte(val.k), []byte(val.v))
- }
- triea.Commit()
-
- trieb := newEmpty()
- for _, val := range testdata2 {
- trieb.Update([]byte(val.k), []byte(val.v))
- }
- trieb.Commit()
-
- di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
- it := NewIterator(di)
-
- all := []struct{ k, v string }{
- {"aardvark", "c"},
- {"barb", "ba"},
- {"barb", "bd"},
- {"bard", "bc"},
- {"bars", "bb"},
- {"bars", "be"},
- {"bar", "b"},
- {"fab", "z"},
- {"food", "ab"},
- {"foos", "aa"},
- {"foo", "a"},
- {"jars", "d"},
- }
-
- for i, kv := range all {
- if !it.Next() {
- t.Errorf("Iterator ends prematurely at element %d", i)
- }
- if kv.k != string(it.Key) {
- t.Errorf("iterator value mismatch for element %d: got key %s want %s", i, it.Key, kv.k)
- }
- if kv.v != string(it.Value) {
- t.Errorf("iterator value mismatch for element %d: got value %s want %s", i, it.Value, kv.v)
- }
- }
- if it.Next() {
- t.Errorf("Iterator returned extra values.")
- }
-}
-
func TestIteratorNoDups(t *testing.T) {
var tr Trie
for _, val := range testdata1 {
- tr.Update([]byte(val.k), []byte(val.v))
+ tr.Update([]byte(val.k), []byte(val.v), nil)
}
- checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ checkIteratorNoDups(t, tr.NodeIterator(nil, Version{}), nil)
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
func TestIteratorContinueAfterError(t *testing.T) {
- db := ethdb.NewMemDatabase()
- tr, _ := New(thor.Bytes32{}, db)
+ db := newMemDatabase()
+ ver := Version{}
+ tr := New(Root{}, db)
for _, val := range testdata1 {
- tr.Update([]byte(val.k), []byte(val.v))
+ tr.Update([]byte(val.k), []byte(val.v), nil)
}
- tr.Commit()
- wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
- keys := db.Keys()
+ ver.Major++
+ tr.Commit(db, ver, false)
+ wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil, Version{}), nil)
+ keys := db.db.Keys()
t.Log("node count", wantNodeCount)
for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB.
- tr, _ := New(tr.Hash(), db)
+ tr := New(Root{tr.Hash(), ver}, db)
// Remove a random node from the database. It can't be the root node
// because that one is already loaded.
var rkey []byte
for {
//#nosec G404
- if rkey = keys[rand.N(len(keys))]; !bytes.Equal(rkey, tr.Hash().Bytes()) {
+ if rkey = keys[rand.N(len(keys))]; !bytes.Equal(rkey, makeKey(nil, ver)) {
break
}
}
- rval, _ := db.Get(rkey)
- db.Delete(rkey)
+ rval, _ := db.db.Get(rkey)
+ db.db.Delete(rkey)
// Iterate until the error is hit.
seen := make(map[string]bool)
- it := tr.NodeIterator(nil)
+ it := tr.NodeIterator(nil, Version{})
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
- if !ok || !bytes.Equal(missing.NodeHash.Hash[:], rkey) {
+ if !ok || !bytes.Equal(makeKey(missing.Path, ver), rkey) {
t.Fatal("didn't hit missing node, got", it.Error())
}
// Add the node back and continue iteration.
- db.Put(rkey, rval)
+ db.db.Put(rkey, rval)
checkIteratorNoDups(t, it, seen)
if it.Error() != nil {
t.Fatal("unexpected error", it.Error())
@@ -360,41 +279,6 @@ func TestIteratorContinueAfterError(t *testing.T) {
}
}
-// Similar to the test above, this one checks that failure to create nodeIterator at a
-// certain key prefix behaves correctly when Next is called. The expectation is that Next
-// should retry seeking before returning true for the first time.
-func TestIteratorContinueAfterSeekError(t *testing.T) {
- // Commit test trie to db, then remove the node containing "bars".
- db := ethdb.NewMemDatabase()
- ctr, _ := New(thor.Bytes32{}, db)
- for _, val := range testdata1 {
- ctr.Update([]byte(val.k), []byte(val.v))
- }
- root, _ := ctr.Commit()
- barNodeHash, _ := hex.DecodeString("d32fb77ad25227d60b76d53a512d28137304c9c03556db08a1709563c7ae9c9f")
- barNode, _ := db.Get(barNodeHash[:])
- db.Delete(barNodeHash[:])
-
- // Create a new iterator that seeks to "bars". Seeking can't proceed because
- // the node is missing.
- tr, _ := New(root, db)
- it := tr.NodeIterator([]byte("bars"))
- missing, ok := it.Error().(*MissingNodeError)
- if !ok {
- t.Fatal("want MissingNodeError, got", it.Error())
- } else if !bytes.Equal(missing.NodeHash.Hash[:], barNodeHash) {
- t.Fatal("wrong node missing")
- }
-
- // Reinsert the missing node.
- db.Put(barNodeHash[:], barNode[:])
-
- // Check that iteration produces the right set of values.
- if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil {
- t.Fatal(err)
- }
-}
-
func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) int {
if seen == nil {
seen = make(map[string]bool)
@@ -409,33 +293,36 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
}
func TestIteratorNodeFilter(t *testing.T) {
- db := ethdb.NewMemDatabase()
- tr := NewExtended(thor.Bytes32{}, 0, db, false)
+ db := newMemDatabase()
+ ver := Version{}
+ tr := New(Root{}, db)
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v), nil)
}
- root1, _ := tr.Commit(1)
- _ = root1
+ ver.Major++
+ tr.Commit(db, ver, false)
for _, val := range testdata2 {
tr.Update([]byte(val.k), []byte(val.v), nil)
}
- root2, _ := tr.Commit(2)
+ ver.Major++
+ tr.Commit(db, ver, false)
+ root2 := tr.Hash()
- tr = NewExtended(root2, 2, db, false)
+ tr = New(Root{root2, Version{Major: 2}}, db)
- it := tr.NodeIterator(nil, func(seq uint64) bool { return seq >= 1 })
+ it := tr.NodeIterator(nil, Version{Major: 1})
for it.Next(true) {
- if h := it.Hash(); !h.IsZero() {
- assert.True(t, it.SeqNum() >= 1)
+ if blob, ver, _ := it.Blob(); len(blob) > 0 {
+ assert.True(t, ver.Major >= 1)
}
}
- it = tr.NodeIterator(nil, func(seq uint64) bool { return seq >= 2 })
+ it = tr.NodeIterator(nil, Version{Major: 2})
for it.Next(true) {
- if h := it.Hash(); !h.IsZero() {
- assert.True(t, it.SeqNum() >= 2)
+ if blob, ver, _ := it.Blob(); len(blob) > 0 {
+ assert.True(t, ver.Major >= 2)
}
}
}
diff --git a/trie/node.go b/trie/node.go
index 77108aac3..eb295e8a0 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -17,108 +17,91 @@
package trie
import (
- "bytes"
- "errors"
+ "encoding/binary"
"fmt"
"io"
"strings"
- "github.com/ethereum/go-ethereum/rlp"
- "github.com/vechain/thor/v2/lowrlp"
- "github.com/vechain/thor/v2/thor"
+ "github.com/qianbin/drlp"
)
-var NonCryptoNodeHash = thor.BytesToBytes32(bytes.Repeat([]byte{0xff}, 32))
-var nonCryptoNodeHashPlaceholder = []byte{0}
-
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
+// node kinds (lower 3 bits of node tag)
+const (
+ kindEmpty byte = iota
+ kindFull
+ kindShort
+ kindRef
+ kindValue
+)
+
+// note attributes (higher 5 bits of node tag)
+const (
+ attrHasHash = byte(1 << iota) // indicates a ref node has the hash field
+ attrHasMajor // indicates a ref node has the ver.Major field
+ attrHasMinor // indicates a ref node has the ver.Minor field
+ attrHasMeta // indicates a value node has the meta field
+ attrHasManyRef // indicates a full node contains many ref nodes
+)
+
type node interface {
+ Version() Version
fstring(string) string
- cache() (*hashNode, bool, uint16)
- seqNum() uint64
- encode(e *lowrlp.Encoder, nonCrypto bool)
- encodeTrailing(*lowrlp.Encoder)
+ cache() (ref refNode, gen uint16, dirty bool)
+ encodeConsensus(buf []byte) []byte // encode the node for computing MPT root
+ encode(buf []byte, skipHash bool) []byte
}
type (
fullNode struct {
- Children [17]node // Actual trie node data to encode/decode (needs custom encoder)
+ children [17]node
flags nodeFlag
}
shortNode struct {
- Key []byte
- Val node
+ key []byte
+ child node
flags nodeFlag
}
- hashNode struct {
- Hash thor.Bytes32
- seq uint64 // the sequence number
+ refNode struct {
+ hash []byte
+ ver Version
}
valueNode struct {
- Value []byte
- meta []byte // metadata of the value
+ val []byte
+ meta []byte // metadata of the value
}
)
-// EncodeRLP encodes a full node into the consensus RLP format.
-func (n *fullNode) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, n.Children)
-}
+func (n *fullNode) Version() Version { return n.flags.ref.ver }
+func (n *shortNode) Version() Version { return n.flags.ref.ver }
+func (n *refNode) Version() Version { return n.ver }
+func (n *valueNode) Version() Version { return Version{} }
-// EncodeRLP encodes a hash node into the consensus RLP format.
-func (n *hashNode) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, n.Hash)
-}
-
-// EncodeRLP encodes a value node into the consensus RLP format.
-func (n *valueNode) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, n.Value)
-}
-
-func (n *fullNode) copy() *fullNode { cpy := *n; return &cpy }
-func (n *shortNode) copy() *shortNode { cpy := *n; return &cpy }
+func (n *fullNode) copy() *fullNode { copy := *n; return © }
+func (n *shortNode) copy() *shortNode { copy := *n; return © }
// nodeFlag contains caching-related metadata about a node.
type nodeFlag struct {
- hash *hashNode // cached hash of the node (may be nil)
- dirty bool // whether the node has changes that must be written to the database
- gen uint16 // cache generation counter
-}
-
-func (n *fullNode) cache() (*hashNode, bool, uint16) { return n.flags.hash, n.flags.dirty, n.flags.gen }
-func (n *shortNode) cache() (*hashNode, bool, uint16) {
- return n.flags.hash, n.flags.dirty, n.flags.gen
-}
-func (n *hashNode) cache() (*hashNode, bool, uint16) { return nil, true, 0 }
-func (n *valueNode) cache() (*hashNode, bool, uint16) { return nil, true, 0 }
-
-func (n *fullNode) seqNum() uint64 {
- if n.flags.hash != nil {
- return n.flags.hash.seq
- }
- return 0
+ ref refNode // cached ref of the node
+ gen uint16 // cache generation counter
+ dirty bool // whether the node has changes that must be written to the database
}
-func (n *shortNode) seqNum() uint64 {
- if n.flags.hash != nil {
- return n.flags.hash.seq
- }
- return 0
-}
-
-func (n *hashNode) seqNum() uint64 { return n.seq }
-func (n *valueNode) seqNum() uint64 { return 0 }
+func (n *fullNode) cache() (refNode, uint16, bool) { return n.flags.ref, n.flags.gen, n.flags.dirty }
+func (n *shortNode) cache() (refNode, uint16, bool) { return n.flags.ref, n.flags.gen, n.flags.dirty }
+func (n *refNode) cache() (refNode, uint16, bool) { return *n, 0, false }
+func (n *valueNode) cache() (refNode, uint16, bool) { return refNode{}, 0, true }
// Pretty printing.
func (n *fullNode) String() string { return n.fstring("") }
func (n *shortNode) String() string { return n.fstring("") }
-func (n *hashNode) String() string { return n.fstring("") }
+func (n *refNode) String() string { return n.fstring("") }
func (n *valueNode) String() string { return n.fstring("") }
func (n *fullNode) fstring(ind string) string {
resp := fmt.Sprintf("[\n%s ", ind)
- for i, node := range n.Children {
+ for i, node := range n.children {
if node == nil {
resp += fmt.Sprintf("%s: ", indices[i])
} else {
@@ -128,194 +111,165 @@ func (n *fullNode) fstring(ind string) string {
return resp + fmt.Sprintf("\n%s] ", ind)
}
func (n *shortNode) fstring(ind string) string {
- return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" "))
-}
-func (n *hashNode) fstring(_ string) string {
- return fmt.Sprintf("<%v> ", n.Hash)
-}
-func (n *valueNode) fstring(_ string) string {
- return fmt.Sprintf("%x ", n.Value)
-}
-
-// trailing is the splitted rlp list of extra data of the trie node.
-type trailing []byte
-
-func (t *trailing) next() ([]byte, error) {
- if t == nil {
- return nil, nil
- }
- if len(*t) == 0 {
- return nil, io.EOF
- }
-
- content, rest, err := rlp.SplitString(*t)
- if err != nil {
- return nil, err
- }
-
- *t = rest
- return content, nil
+ return fmt.Sprintf("{%x: %v} ", n.key, n.child.fstring(ind+" "))
}
-
-// NextSeq decodes the current list element to seq number and move to the next one.
-// It returns io.EOF if reaches end.
-func (t *trailing) NextSeq() (seq uint64, err error) {
- content, err := t.next()
- if err != nil {
- return 0, err
- }
- if len(content) > 8 {
- return 0, errors.New("encoded seq too long")
- }
-
- for _, b := range content {
- seq <<= 8
- seq |= uint64(b)
- }
- return
+func (n *refNode) fstring(ind string) string {
+ return fmt.Sprintf("<%x> #%v", n.hash, n.ver)
}
-
-// NextMeta returns the current list element as leaf metadata and move to the next one.
-// It returns io.EOF if reaches end.
-func (t *trailing) NextMeta() ([]byte, error) {
- return t.next()
+func (n *valueNode) fstring(ind string) string {
+ return fmt.Sprintf("%x - %x", n.val, n.meta)
}
-func mustDecodeNode(hash *hashNode, buf []byte, cacheGen uint16) node {
- _, _, rest, err := rlp.Split(buf)
- if err != nil {
- panic(fmt.Sprintf("node %v: %v", hash.Hash, err))
- }
- trailing := (*trailing)(&rest)
- if len(rest) == 0 {
- trailing = nil
- }
- buf = buf[:len(buf)-len(rest)]
- n, err := decodeNode(hash, buf, trailing, cacheGen)
+func mustDecodeNode(ref *refNode, buf []byte, cacheGen uint16) node {
+ n, _, err := decodeNode(ref, buf, cacheGen)
if err != nil {
- panic(fmt.Sprintf("node %v: %v", hash.Hash, err))
- }
- if trailing != nil && len(*trailing) != 0 {
- panic(fmt.Sprintf("node %v: trailing buffer not fully consumed", hash.Hash))
+ panic(fmt.Sprintf("node %v: %v", ref, err))
}
return n
}
-// decodeNode parses the RLP encoding of a trie node.
-func decodeNode(hash *hashNode, buf []byte, trailing *trailing, cacheGen uint16) (node, error) {
+// decodeNode parses a trie node in storage.
+func decodeNode(ref *refNode, buf []byte, cacheGen uint16) (node, []byte, error) {
if len(buf) == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- elems, _, err := rlp.SplitList(buf)
- if err != nil {
- return nil, fmt.Errorf("decode error: %v", err)
- }
- switch c, _ := rlp.CountValues(elems); c {
- case 2:
- n, err := decodeShort(hash, buf, elems, trailing, cacheGen)
- return n, wrapError(err, "short")
- case 17:
- n, err := decodeFull(hash, buf, elems, trailing, cacheGen)
- return n, wrapError(err, "full")
- default:
- return nil, fmt.Errorf("invalid number of list elements: %v", c)
- }
-}
-
-func decodeShort(hash *hashNode, buf, elems []byte, trailing *trailing, cacheGen uint16) (*shortNode, error) {
- kbuf, rest, err := rlp.SplitString(elems)
- if err != nil {
- return nil, err
- }
- flag := nodeFlag{hash: hash, gen: cacheGen}
- key := compactToHex(kbuf)
- if hasTerm(key) {
- // value node
- val, _, err := rlp.SplitString(rest)
+ return nil, nil, io.ErrUnexpectedEOF
+ }
+ tag := buf[0]
+ buf = buf[1:]
+ kind, attrs := tag&0x7, tag>>3
+ switch kind {
+ case kindEmpty:
+ return nil, buf, nil
+ case kindFull:
+ n, rest, err := decodeFull(ref, buf, cacheGen, attrs)
if err != nil {
- return nil, fmt.Errorf("invalid value node: %v", err)
+ return nil, nil, wrapError(err, "full")
}
- meta, err := trailing.NextMeta()
+ return n, rest, nil
+ case kindShort:
+ n, rest, err := decodeShort(ref, buf, cacheGen, attrs)
if err != nil {
- return nil, fmt.Errorf("invalid value meta: %v", err)
+ return nil, nil, wrapError(err, "short")
}
-
- vn := &valueNode{Value: append([]byte(nil), val...)}
- if len(meta) > 0 {
- vn.meta = append([]byte(nil), meta...)
+ return n, rest, nil
+ case kindRef:
+ n, rest, err := decodeRef(&refNode{}, buf, attrs)
+ if err != nil {
+ return nil, nil, wrapError(err, "ref")
+ }
+ return n, rest, nil
+ case kindValue:
+ n, rest, err := decodeValue(buf, attrs)
+ if err != nil {
+ return nil, nil, wrapError(err, "value")
}
- return &shortNode{key, vn, flag}, nil
+ return n, rest, nil
+ default:
+ return nil, nil, fmt.Errorf("invalid node kind %v", kind)
}
+}
- r, _, err := decodeRef(rest, trailing, cacheGen)
- if err != nil {
- return nil, wrapError(err, "val")
+func decodeFull(ref *refNode, buf []byte, cacheGen uint16, attrs byte) (*fullNode, []byte, error) {
+ var (
+ n = fullNode{flags: nodeFlag{gen: cacheGen}}
+ err error
+ refs []refNode // prealloced ref nodes
+ )
+ if ref != nil {
+ n.flags.ref = *ref
+ } else {
+ n.flags.dirty = true
+ }
+
+ // prealloc an array of refNode, to reduce alloc count
+ if (attrs & attrHasManyRef) != 0 {
+ refs = make([]refNode, 16)
+ }
+
+ for i := range n.children {
+ if tag := buf[0]; tag&0x7 == kindRef {
+ var ref *refNode
+ if len(refs) > 0 {
+ ref = &refs[0]
+ refs = refs[1:]
+ } else {
+ ref = &refNode{}
+ }
+ if n.children[i], buf, err = decodeRef(ref, buf[1:], tag>>3); err != nil {
+ return nil, nil, wrapError(err, fmt.Sprintf("[%d]", i))
+ }
+ } else {
+ if n.children[i], buf, err = decodeNode(nil, buf, cacheGen); err != nil {
+ return nil, nil, wrapError(err, fmt.Sprintf("[%d]", i))
+ }
+ }
}
- return &shortNode{key, r, flag}, nil
+ return &n, buf, nil
}
-func decodeFull(hash *hashNode, _, elems []byte, trailing *trailing, cacheGen uint16) (*fullNode, error) {
- n := &fullNode{flags: nodeFlag{hash: hash, gen: cacheGen}}
- for i := 0; i < 16; i++ {
- cld, rest, err := decodeRef(elems, trailing, cacheGen)
- if err != nil {
- return n, wrapError(err, fmt.Sprintf("[%d]", i))
- }
- n.Children[i], elems = cld, rest
+func decodeShort(ref *refNode, buf []byte, cacheGen uint16, attrs byte) (*shortNode, []byte, error) {
+ var (
+ n = shortNode{flags: nodeFlag{gen: cacheGen}}
+ err error
+ compactKey []byte
+ )
+ if ref != nil {
+ n.flags.ref = *ref
+ } else {
+ n.flags.dirty = true
}
- val, _, err := rlp.SplitString(elems)
- if err != nil {
- return n, err
+
+ // decode key
+ if compactKey, buf, err = vp.SplitString(buf); err != nil {
+ return nil, nil, err
}
- if len(val) > 0 {
- meta, err := trailing.NextMeta()
- if err != nil {
- return nil, fmt.Errorf("invalid value meta: %v", err)
- }
+ n.key = compactToHex(compactKey)
- vn := &valueNode{Value: append([]byte(nil), val...)}
- if len(meta) > 0 {
- vn.meta = append([]byte(nil), meta...)
- }
- n.Children[16] = vn
+ if hasTerm(n.key) {
+ // decode value
+ n.child, buf, err = decodeValue(buf, attrs)
+ } else {
+ // decode child node
+ n.child, buf, err = decodeNode(nil, buf, cacheGen)
}
- return n, nil
+ if err != nil {
+ return nil, nil, err
+ }
+ return &n, buf, nil
}
-const hashLen = len(thor.Bytes32{})
-
-func decodeRef(buf []byte, trailing *trailing, cacheGen uint16) (node, []byte, error) {
- kind, val, rest, err := rlp.Split(buf)
- if err != nil {
- return nil, buf, err
+func decodeValue(buf []byte, attrs byte) (*valueNode, []byte, error) {
+ var (
+ n valueNode
+ err error
+ )
+ // decode val
+ if n.val, buf, err = vp.SplitString(buf); err != nil {
+ return nil, nil, err
}
- if kind == rlp.List {
- // 'embedded' node reference. The encoding must be smaller
- // than a hash in order to be valid.
- if size := len(buf) - len(rest); size > hashLen {
- err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen)
- return nil, buf, err
+
+ // decode meta
+ if (attrs & attrHasMeta) != 0 {
+ if n.meta, buf, err = vp.SplitString(buf); err != nil {
+ return nil, nil, err
}
- n, err := decodeNode(nil, buf, trailing, cacheGen)
- return n, rest, err
- }
- // string kind
- valLen := len(val)
- if valLen == 0 {
- // empty node
- return nil, rest, nil
}
- seq, err := trailing.NextSeq()
- if err != nil {
- return nil, nil, fmt.Errorf("invalid seq number: %v", err)
+ return &n, buf, nil
+}
+
+func decodeRef(n *refNode, buf []byte, attrs byte) (*refNode, []byte, error) {
+ // decode hash
+ if (attrs & attrHasHash) != 0 {
+ n.hash, buf = buf[:32], buf[32:]
}
- if valLen == 32 {
- return &hashNode{Hash: thor.BytesToBytes32(val), seq: seq}, rest, nil
+
+ // decode version
+ if (attrs & attrHasMajor) != 0 {
+ n.ver.Major, buf = binary.BigEndian.Uint32(buf), buf[4:]
}
- if valLen == 1 && val[0] == nonCryptoNodeHashPlaceholder[0] {
- return &hashNode{Hash: NonCryptoNodeHash, seq: seq}, rest, nil
+ if (attrs & attrHasMinor) != 0 {
+ n.ver.Minor, buf = binary.BigEndian.Uint32(buf), buf[4:]
}
- return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0, 1 or 32)", len(val))
+ return n, buf, nil
}
// wraps a decoding error with information about the path to the
@@ -340,15 +294,160 @@ func (err *decodeError) Error() string {
return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-"))
}
-// VerifyNodeHash verifies the hash of the node blob (trailing excluded).
-func VerifyNodeHash(blob, expectedHash []byte) (bool, error) {
- // strip the trailing
- _, _, trailing, err := rlp.Split(blob)
- if err != nil {
- return false, err
+func (n *fullNode) encode(buf []byte, skipHash bool) []byte {
+ var (
+ tagPos = len(buf)
+ nRefNode = 0
+ )
+ // encode tag
+ buf = append(buf, kindFull)
+
+ // encode children
+ for _, cn := range n.children {
+ switch cn := cn.(type) {
+ case *refNode:
+ buf = cn.encode(buf, skipHash)
+ nRefNode++
+ case nil:
+ buf = append(buf, kindEmpty)
+ default:
+ if ref, _, dirty := cn.cache(); dirty {
+ buf = cn.encode(buf, skipHash)
+ } else {
+ buf = ref.encode(buf, skipHash)
+ }
+ }
+ }
+ if nRefNode > 4 {
+ buf[tagPos] |= (attrHasManyRef << 3)
}
+ return buf
+}
+
+func (n *shortNode) encode(buf []byte, skipHash bool) []byte {
+ var (
+ attrs byte
+ tagPos = len(buf)
+ )
+ // encode tag
+ buf = append(buf, kindShort)
+
+ // encode key
+ buf = vp.AppendUint32(buf, uint32(compactLen(n.key)))
+ buf = appendHexToCompact(buf, n.key)
+
+ if hasTerm(n.key) {
+ vn := n.child.(*valueNode)
+ // encode value
+ buf = vp.AppendString(buf, vn.val)
+ // encode meta
+ if len(vn.meta) > 0 {
+ attrs |= attrHasMeta
+ buf = vp.AppendString(buf, vn.meta)
+ }
+ buf[tagPos] |= (attrs << 3)
+ } else {
+ // encode child node
+ if ref, _, dirty := n.child.cache(); dirty {
+ buf = n.child.encode(buf, skipHash)
+ } else {
+ buf = ref.encode(buf, skipHash)
+ }
+ }
+ return buf
+}
+
+func (n *valueNode) encode(buf []byte, skipHash bool) []byte {
+ var (
+ attrs byte
+ tagPos = len(buf)
+ )
+ // encode tag
+ buf = append(buf, kindValue)
+
+ // encode value
+ buf = vp.AppendString(buf, n.val)
+
+ // encode meta
+ if len(n.meta) > 0 {
+ attrs |= attrHasMeta
+ buf = vp.AppendString(buf, n.meta)
+ }
+ buf[tagPos] |= (attrs << 3)
+ return buf
+}
+
+func (n *refNode) encode(buf []byte, skipHash bool) []byte {
+ var (
+ attrs byte
+ tagPos = len(buf)
+ )
+ // encode tag
+ buf = append(buf, kindRef)
+ // encode hash
+ if !skipHash {
+ attrs |= attrHasHash
+ buf = append(buf, n.hash...)
+ }
+ // encode version
+ if n.ver.Major != 0 {
+ attrs |= attrHasMajor
+ buf = binary.BigEndian.AppendUint32(buf, n.ver.Major)
+ }
+ if n.ver.Minor != 0 {
+ attrs |= attrHasMinor
+ buf = binary.BigEndian.AppendUint32(buf, n.ver.Minor)
+ }
+ buf[tagPos] |= (attrs << 3)
+ return buf
+}
+
+//// encodeConsensus
+
+func (n *fullNode) encodeConsensus(buf []byte) []byte {
+ offset := len(buf)
+
+ for _, cn := range n.children {
+ switch cn := cn.(type) {
+ case *refNode:
+ buf = cn.encodeConsensus(buf)
+ case nil:
+ buf = drlp.AppendString(buf, nil)
+ default:
+ if ref, _, _ := cn.cache(); ref.hash != nil {
+ buf = drlp.AppendString(buf, ref.hash)
+ } else {
+ buf = cn.encodeConsensus(buf)
+ }
+ }
+ }
+ return drlp.EndList(buf, offset)
+}
+
+func (n *shortNode) encodeConsensus(buf []byte) []byte {
+ offset := len(buf)
+
+ const maxHeaderSize = 5
+ // reserve space for rlp string header
+ buf = append(buf, make([]byte, maxHeaderSize)...)
+ // compact the key just after reserved space
+ buf = appendHexToCompact(buf, n.key)
+ // encode the compact key in the right place
+ buf = drlp.AppendString(buf[:offset], buf[offset+maxHeaderSize:])
+
+ if ref, _, _ := n.child.cache(); ref.hash != nil {
+ buf = drlp.AppendString(buf, ref.hash)
+ } else {
+ buf = n.child.encodeConsensus(buf)
+ }
+
+ return drlp.EndList(buf, offset)
+}
+
+func (n *valueNode) encodeConsensus(buf []byte) []byte {
+ return drlp.AppendString(buf, n.val)
+}
- node := blob[:len(blob)-len(trailing)]
- have := thor.Blake2b(node)
- return bytes.Equal(expectedHash, have.Bytes()), nil
+func (n *refNode) encodeConsensus(buf []byte) []byte {
+ return drlp.AppendString(buf, n.hash)
}
diff --git a/trie/node_test.go b/trie/node_test.go
index 9f42b969b..a9853c7d2 100644
--- a/trie/node_test.go
+++ b/trie/node_test.go
@@ -17,75 +17,226 @@
package trie
import (
+ "io"
"testing"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/vechain/thor/v2/thor"
+ "github.com/stretchr/testify/assert"
+ "github.com/vechain/thor/v2/test/datagen"
)
-// func TestCanUnload(t *testing.T) {
-// tests := []struct {
-// flag nodeFlag
-// cachegen, cachelimit uint16
-// want bool
-// }{
-// {
-// flag: nodeFlag{dirty: true, gen: 0},
-// want: false,
-// },
-// {
-// flag: nodeFlag{dirty: false, gen: 0},
-// cachegen: 0, cachelimit: 0,
-// want: true,
-// },
-// {
-// flag: nodeFlag{dirty: false, gen: 65534},
-// cachegen: 65535, cachelimit: 1,
-// want: true,
-// },
-// {
-// flag: nodeFlag{dirty: false, gen: 65534},
-// cachegen: 0, cachelimit: 1,
-// want: true,
-// },
-// {
-// flag: nodeFlag{dirty: false, gen: 1},
-// cachegen: 65535, cachelimit: 1,
-// want: true,
-// },
-// }
-
-// for _, test := range tests {
-// if got := test.flag.canUnload(test.cachegen, test.cachelimit); got != test.want {
-// t.Errorf("%+v\n got %t, want %t", test, got, test.want)
-// }
-// }
-// }
+func benchmarkEncodeFullNode(b *testing.B, consensus, skipHash bool) {
+ var (
+ f = fullNode{}
+ buf []byte
+ )
+ for i := 0; i < 16; i++ {
+ f.children[i] = &refNode{hash: datagen.RandomHash().Bytes()}
+ }
+ for i := 0; i < b.N; i++ {
+ if consensus {
+ buf = f.encodeConsensus(buf[:0])
+ } else {
+ buf = f.encode(buf[:0], skipHash)
+ }
+ }
+}
+func benchmarkEncodeShortNode(b *testing.B, consensus bool) {
+ var (
+ s = shortNode{
+ key: []byte{0x1, 0x2, 0x10},
+ child: &valueNode{val: datagen.RandBytes(32)},
+ }
+ buf []byte
+ )
+
+ for i := 0; i < b.N; i++ {
+ if consensus {
+ buf = s.encodeConsensus(buf[:0])
+ } else {
+ buf = s.encode(buf[:0], false)
+ }
+ }
+}
func BenchmarkEncodeFullNode(b *testing.B) {
- var buf sliceBuffer
- f := &fullNode{}
- for i := 0; i < len(f.Children); i++ {
- f.Children[i] = &hashNode{Hash: thor.BytesToBytes32(randBytes(32))}
+ benchmarkEncodeFullNode(b, false, false)
+}
+
+func BenchmarkEncodeFullNodeSkipHash(b *testing.B) {
+ benchmarkEncodeFullNode(b, false, true)
+}
+
+func BenchmarkEncodeFullNodeConsensus(b *testing.B) {
+ benchmarkEncodeFullNode(b, true, false)
+}
+
+func BenchmarkEncodeShortNode(b *testing.B) {
+ benchmarkEncodeShortNode(b, false)
+}
+
+func BenchmarkEncodeShortNodeConsensus(b *testing.B) {
+ benchmarkEncodeShortNode(b, true)
+}
+
+func benchmarkDecodeFullNode(b *testing.B, skipHash bool) {
+ f := fullNode{}
+ for i := 0; i < 16; i++ {
+ f.children[i] = &refNode{hash: datagen.RandomHash().Bytes()}
}
+ enc := f.encode(nil, skipHash)
for i := 0; i < b.N; i++ {
- buf.Reset()
- rlp.Encode(&buf, f)
+ mustDecodeNode(nil, enc, 0)
}
}
-func BenchmarkFastEncodeFullNode(b *testing.B) {
- f := &fullNode{}
- for i := 0; i < len(f.Children); i++ {
- f.Children[i] = &hashNode{Hash: thor.BytesToBytes32(randBytes(32))}
- }
+func BenchmarkDecodeFullNode(b *testing.B) {
+ benchmarkDecodeFullNode(b, false)
+}
+
+func BenchmarkDecodeFullNodeSkipHash(b *testing.B) {
+ benchmarkDecodeFullNode(b, true)
+}
- h := newHasher(0, 0)
+func BenchmarkDecodeShortNode(b *testing.B) {
+ s := shortNode{
+ key: []byte{0x1, 0x2, 0x10},
+ child: &valueNode{val: datagen.RandBytes(32)},
+ }
+ enc := s.encode(nil, false)
for i := 0; i < b.N; i++ {
- h.enc.Reset()
- f.encode(&h.enc, false)
- h.tmp.Reset()
- h.enc.ToWriter(&h.tmp)
+ mustDecodeNode(nil, enc, 0)
+ }
+}
+
+type fNode struct {
+ Children [17]interface{}
+}
+
+func (f *fNode) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, f.Children)
+}
+
+type sNode struct {
+ Key []byte
+ Val interface{}
+}
+type vNode []byte
+type hNode []byte
+
+func TestRefNodeEncodeConsensus(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ randHash := datagen.RandomHash()
+
+ h := hNode(randHash.Bytes())
+ ref := &refNode{hash: randHash.Bytes()}
+
+ expected, err := rlp.EncodeToBytes(h)
+ assert.Nil(t, err)
+ actual := ref.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
+ }
+}
+
+func TestValueNodeEncodeConsensus(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ randValue := datagen.RandBytes(datagen.RandIntN(30))
+
+ v := vNode(randValue)
+ value := &valueNode{val: randValue}
+
+ expected, err := rlp.EncodeToBytes(v)
+ assert.Nil(t, err)
+ actual := value.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
+ }
+}
+
+func TestShortNodeEncodeConsensus(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ randKey := datagen.RandBytes(datagen.RandIntN(32))
+ randValue := datagen.RandBytes(datagen.RandIntN(30))
+
+ randKey = append(randKey, 16)
+ s := &sNode{Key: hexToCompact(randKey), Val: vNode(randValue)}
+ short := &shortNode{key: randKey, child: &valueNode{val: randValue}}
+
+ expected, err := rlp.EncodeToBytes(s)
+ assert.Nil(t, err)
+ actual := short.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
+ }
+
+ for i := 0; i < 10; i++ {
+ randKey := datagen.RandBytes(datagen.RandIntN(32))
+ randHash := datagen.RandomHash()
+
+ s := &sNode{Key: hexToCompact(randKey), Val: hNode(randHash.Bytes())}
+ short := &shortNode{key: randKey, child: &refNode{hash: randHash.Bytes()}}
+
+ expected, err := rlp.EncodeToBytes(s)
+ assert.Nil(t, err)
+ actual := short.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
+ }
+}
+
+func TestFullNodeEncodeConsensus(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ randValue := datagen.RandBytes(datagen.RandIntN(30))
+
+ var (
+ f fNode
+ full fullNode
+ )
+
+ for i := 0; i < 16; i++ {
+ if datagen.RandIntN(2) == 1 {
+ randHash := datagen.RandomHash()
+
+ f.Children[i] = hNode(randHash.Bytes())
+ full.children[i] = &refNode{hash: randHash.Bytes()}
+ } else {
+ f.Children[i] = vNode(nil)
+ }
+ }
+ f.Children[16] = vNode(randValue)
+ full.children[16] = &valueNode{val: randValue}
+
+ expected, err := rlp.EncodeToBytes(&f)
+ assert.Nil(t, err)
+ actual := full.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
+ }
+
+ for i := 0; i < 10; i++ {
+ var (
+ f fNode
+ full fullNode
+ )
+
+ for i := 0; i < 16; i++ {
+ if datagen.RandIntN(2) == 1 {
+ randHash := datagen.RandomHash()
+
+ f.Children[i] = hNode(randHash.Bytes())
+ full.children[i] = &refNode{hash: randHash.Bytes()}
+ } else {
+ f.Children[i] = vNode(nil)
+ }
+ }
+ f.Children[16] = vNode(nil)
+
+ expected, err := rlp.EncodeToBytes(&f)
+ assert.Nil(t, err)
+ actual := full.encodeConsensus(nil)
+
+ assert.Equal(t, expected, actual)
}
}
diff --git a/trie/proof.go b/trie/proof.go
deleted file mode 100644
index 735bddeb7..000000000
--- a/trie/proof.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "fmt"
-
- "github.com/vechain/thor/v2/thor"
-)
-
-// Prove constructs a merkle proof for key. The result contains all
-// encoded nodes on the path to the value at key. The value itself is
-// also included in the last node and can be retrieved by verifying
-// the proof.
-//
-// If the trie does not contain a value for key, the returned proof
-// contains all nodes of the longest existing prefix of the key
-// (at least the root node), ending with the node that proves the
-// absence of the key.
-func (t *Trie) Prove(key []byte, fromLevel uint, proofDb DatabaseWriter) error {
- // Collect all nodes on the path to key.
- key = keybytesToHex(key)
- nodes := []node{}
- tn := t.root
- for len(key) > 0 && tn != nil {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- // The trie doesn't contain the key.
- tn = nil
- } else {
- tn = n.Val
- key = key[len(n.Key):]
- }
- nodes = append(nodes, n)
- case *fullNode:
- tn = n.Children[key[0]]
- key = key[1:]
- nodes = append(nodes, n)
- case *hashNode:
- var err error
- tn, err = t.resolveHash(n, nil)
- if err != nil {
- logger.Error(fmt.Sprintf("Unhandled trie error: %v", err))
- return err
- }
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
- hasher := newHasher(0, 0)
- for i, n := range nodes {
- // Don't bother checking for errors here since hasher panics
- // if encoding doesn't work and we're not writing to any database.
- n, _, _ = hasher.hashChildren(n, nil, nil)
- hn, _ := hasher.store(n, nil, nil, false)
- if hash, ok := hn.(*hashNode); ok || i == 0 {
- // If the node's database encoding is a hash (or is the
- // root node), it becomes a proof element.
- if fromLevel > 0 {
- fromLevel--
- } else {
- hasher.enc.Reset()
- n.encode(&hasher.enc, hasher.nonCrypto)
- hasher.tmp.Reset()
- hasher.enc.ToWriter(&hasher.tmp)
- if ok {
- proofDb.Put(hash.Hash[:], hasher.tmp)
- } else {
- proofDb.Put(thor.Blake2b(hasher.tmp).Bytes(), hasher.tmp)
- }
- }
- }
- }
- return nil
-}
-
-// VerifyProof checks merkle proofs. The given proof must contain the
-// value for key in a trie with the given root hash. VerifyProof
-// returns an error if the proof contains invalid trie nodes or the
-// wrong value.
-func VerifyProof(rootHash thor.Bytes32, key []byte, proofDb DatabaseReader) (value []byte, err error, nodes int) {
- key = keybytesToHex(key)
- wantHash := rootHash
- for i := 0; ; i++ {
- buf, _ := proofDb.Get(wantHash[:])
- if buf == nil {
- return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash[:]), i
- }
- n, err := decodeNode(&hashNode{Hash: wantHash}, buf, nil, 0)
- if err != nil {
- return nil, fmt.Errorf("bad proof node %d: %v", i, err), i
- }
- keyrest, cld := get(n, key)
- switch cld := cld.(type) {
- case nil:
- // The trie doesn't contain the key.
- return nil, nil, i
- case *hashNode:
- key = keyrest
- wantHash = cld.Hash
- case *valueNode:
- return cld.Value, nil, i + 1
- }
- }
-}
-
-func get(tn node, key []byte) ([]byte, node) {
- for {
- switch n := tn.(type) {
- case *shortNode:
- if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) {
- return nil, nil
- }
- tn = n.Val
- key = key[len(n.Key):]
- case *fullNode:
- tn = n.Children[key[0]]
- key = key[1:]
- case *hashNode:
- return key, n
- case nil:
- return key, nil
- case *valueNode:
- return nil, n
- default:
- panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
- }
- }
-}
diff --git a/trie/proof_test.go b/trie/proof_test.go
deleted file mode 100644
index 40b972bf8..000000000
--- a/trie/proof_test.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// #nosec G404
-package trie
-
-import (
- "bytes"
- crand "crypto/rand"
- mrand "math/rand/v2"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/vechain/thor/v2/thor"
-)
-
-func TestProof(t *testing.T) {
- trie, vals := randomTrie(500)
- root := trie.Hash()
- for _, kv := range vals {
- proofs := ethdb.NewMemDatabase()
- if trie.Prove(kv.k, 0, proofs) != nil {
- t.Fatalf("missing key %x while constructing proof", kv.k)
- }
- val, err, _ := VerifyProof(root, kv.k, proofs)
- if err != nil {
- t.Fatalf("VerifyProof error for key %x: %v\nraw proof: %v", kv.k, err, proofs)
- }
- if !bytes.Equal(val, kv.v) {
- t.Fatalf("VerifyProof returned wrong value for key %x: got %x, want %x", kv.k, val, kv.v)
- }
- }
-}
-
-func TestOneElementProof(t *testing.T) {
- trie := new(Trie)
- updateString(trie, "k", "v")
- proofs := ethdb.NewMemDatabase()
- trie.Prove([]byte("k"), 0, proofs)
- if len(proofs.Keys()) != 1 {
- t.Error("proof should have one element")
- }
- val, err, _ := VerifyProof(trie.Hash(), []byte("k"), proofs)
- if err != nil {
- t.Fatalf("VerifyProof error: %v\nproof hashes: %v", err, proofs.Keys())
- }
- if !bytes.Equal(val, []byte("v")) {
- t.Fatalf("VerifyProof returned wrong value: got %x, want 'k'", val)
- }
-}
-
-func TestVerifyBadProof(t *testing.T) {
- trie, vals := randomTrie(800)
- root := trie.Hash()
- for _, kv := range vals {
- proofs := ethdb.NewMemDatabase()
- trie.Prove(kv.k, 0, proofs)
- if len(proofs.Keys()) == 0 {
- t.Fatal("zero length proof")
- }
- keys := proofs.Keys()
- key := keys[mrand.N(len(keys))]
- node, _ := proofs.Get(key)
- proofs.Delete(key)
- mutateByte(node)
- proofs.Put(thor.Blake2b(node).Bytes(), node)
- if _, err, _ := VerifyProof(root, kv.k, proofs); err == nil {
- t.Fatalf("expected proof to fail for key %x", kv.k)
- }
- }
-}
-
-// mutateByte changes one byte in b.
-func mutateByte(b []byte) {
- for r := mrand.N(len(b)); ; {
- new := byte(mrand.N(255))
- if new != b[r] {
- b[r] = new
- break
- }
- }
-}
-
-func BenchmarkProve(b *testing.B) {
- trie, vals := randomTrie(100)
- var keys []string
- for k := range vals {
- keys = append(keys, k)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- kv := vals[keys[i%len(keys)]]
- proofs := ethdb.NewMemDatabase()
- if trie.Prove(kv.k, 0, proofs); len(proofs.Keys()) == 0 {
- b.Fatalf("zero length proof for %x", kv.k)
- }
- }
-}
-
-func BenchmarkVerifyProof(b *testing.B) {
- trie, vals := randomTrie(100)
- root := trie.Hash()
- var keys []string
- var proofs []*ethdb.MemDatabase
- for k := range vals {
- keys = append(keys, k)
- proof := ethdb.NewMemDatabase()
- trie.Prove([]byte(k), 0, proof)
- proofs = append(proofs, proof)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- im := i % len(keys)
- if _, err, _ := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil {
- b.Fatalf("key %x: %v", keys[im], err)
- }
- }
-}
-
-func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := new(Trie)
- vals := make(map[string]*kv)
- for i := byte(0); i < 100; i++ {
- value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
- value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false}
- trie.Update(value.k, value.v)
- trie.Update(value2.k, value2.v)
- vals[string(value.k)] = value
- vals[string(value2.k)] = value2
- }
- for i := 0; i < n; i++ {
- value := &kv{randBytes(32), randBytes(20), false}
- trie.Update(value.k, value.v)
- vals[string(value.k)] = value
- }
- return trie, vals
-}
-
-func randBytes(n int) []byte {
- r := make([]byte, n)
- crand.Read(r)
- return r
-}
diff --git a/trie/trie.go b/trie/trie.go
index 62308aa5b..bb75f1ee0 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -22,51 +22,63 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/vechain/thor/v2/log"
"github.com/vechain/thor/v2/thor"
)
var (
// This is the known root hash of an empty trie.
emptyRoot = thor.Blake2b(rlp.EmptyString)
- // This is the known hash of an empty state trie entry.
- emptyState = thor.Blake2b(nil)
-
- logger = log.WithContext("pkg", "trie")
)
-// Database must be implemented by backing stores for the trie.
-type Database interface {
- DatabaseReader
- DatabaseWriter
+// Version is the version number of a standalone trie node.
+type Version struct {
+ Major,
+ Minor uint32
+}
+
+// String pretty prints version.
+func (v Version) String() string {
+ return fmt.Sprintf("%v.%v", v.Major, v.Minor)
+}
+
+// Compare compares with b.
+// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
+func (a Version) Compare(b Version) int {
+ if a.Major > b.Major {
+ return 1
+ }
+ if a.Major < b.Major {
+ return -1
+ }
+ if a.Minor > b.Minor {
+ return 1
+ }
+ if a.Minor < b.Minor {
+ return -1
+ }
+ return 0
+}
+
+// Root wraps hash and version of the root node.
+type Root struct {
+ Hash thor.Bytes32
+ Ver Version
}
+// Node is the alias of inner node type.
+type Node = node
+
// DatabaseReader wraps the Get method of a backing store for the trie.
type DatabaseReader interface {
- Get(key []byte) (value []byte, err error)
+ Get(path []byte, ver Version) (value []byte, err error)
}
// DatabaseWriter wraps the Put method of a backing store for the trie.
type DatabaseWriter interface {
- // Put stores the mapping key->value in the database.
+ // Put stores the mapping (path, ver)->value in the database.
// Implementations must not hold onto the value bytes, the trie
// will reuse the slice across calls to Put.
- Put(key, value []byte) error
-}
-
-// DatabaseReaderTo wraps the GetTo method of backing store for the trie.
-// The purpose of this interface is to reuse read buffer and avoid allocs.
-// If the database implements this interface, DatabaseReader.Get will not be called when resolving nodes.
-type DatabaseReaderTo interface {
- // GetTo gets value for the given key and append to dst.
- GetTo(key, dst []byte) (value []byte, err error)
-}
-
-// DatabaseKeyEncoder defines the method how to produce database key.
-// If the database implements this interface, everytime before save the node, Encode is called and its
-// return-value will be the saving key instead of node hash.
-type DatabaseKeyEncoder interface {
- Encode(hash []byte, seq uint64, path []byte) []byte
+ Put(path []byte, ver Version, value []byte) error
}
// Trie is a Merkle Patricia Trie.
@@ -76,103 +88,117 @@ type DatabaseKeyEncoder interface {
// Trie is not safe for concurrent use.
type Trie struct {
root node
- db Database
+ db DatabaseReader
cacheGen uint16 // cache generation counter for next committed nodes
cacheTTL uint16 // the life time of cached nodes
}
+// SetCacheTTL sets the number of 'cache generations' to keep.
+// A cache generation is increased by a call to Commit.
+func (t *Trie) SetCacheTTL(ttl uint16) {
+ t.cacheTTL = ttl
+}
+
// newFlag returns the cache flag value for a newly created node.
func (t *Trie) newFlag() nodeFlag {
return nodeFlag{dirty: true, gen: t.cacheGen}
}
+// RootNode returns the root node.
+func (t *Trie) RootNode() Node {
+ return t.root
+}
+
// New creates a trie with an existing root node from db.
//
-// If root is the zero hash or the blake2b hash of an empty string, the
-// trie is initially empty and does not require a database. Otherwise,
-// New will panic if db is nil and returns a MissingNodeError if root does
-// not exist in the database. Accessing the trie loads nodes from db on demand.
-func New(root thor.Bytes32, db Database) (*Trie, error) {
- trie := &Trie{db: db}
- if (root != thor.Bytes32{}) && root != emptyRoot {
- if db == nil {
- panic("trie.New: cannot use existing root without a database")
- }
- rootnode, err := trie.resolveHash(&hashNode{Hash: root}, nil)
- if err != nil {
- return nil, err
- }
- trie.root = rootnode
+// If root hash is zero or the hash of an empty string, the trie is initially empty .
+// Accessing the trie loads nodes from db on demand.
+func New(root Root, db DatabaseReader) *Trie {
+ if root.Hash == emptyRoot || root.Hash.IsZero() {
+ return &Trie{db: db}
}
- return trie, nil
-}
-// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
-// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) NodeIterator {
- return newNodeIterator(t, start, func(seq uint64) bool { return true }, false, false)
+ return &Trie{
+ root: &refNode{root.Hash.Bytes(), root.Ver},
+ db: db,
+ }
}
-// Get returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
-func (t *Trie) Get(key []byte) []byte {
- res, err := t.TryGet(key)
- if err != nil {
- logger.Error(fmt.Sprintf("Unhandled trie error: %v", err))
+// FromRootNode creates a trie from a live root node.
+func FromRootNode(rootNode Node, db DatabaseReader) *Trie {
+ if rootNode != nil {
+ _, gen, _ := rootNode.cache()
+ return &Trie{
+ root: rootNode,
+ db: db,
+ cacheGen: gen + 1, // cacheGen is always one bigger than gen of root node
+ }
}
- return res
+ // allows nil root node
+ return &Trie{db: db}
+}
+
+// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
+// the key after the given start key. Nodes with version smaller than minVer are filtered out.
+func (t *Trie) NodeIterator(start []byte, minVer Version) NodeIterator {
+ return newNodeIterator(t, start, minVer)
}
-// TryGet returns the value for key stored in the trie.
-// The value bytes must not be modified by the caller.
+// Get returns the value with meta for key stored in the trie.
+// The value and meta bytes must not be modified by the caller.
// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryGet(key []byte) ([]byte, error) {
- value, newroot, err := t.tryGet(t.root, keybytesToHex(key), 0)
- if t.root != newroot {
- t.root = newroot
- }
+func (t *Trie) Get(key []byte) ([]byte, []byte, error) {
+ value, newRoot, _, err := t.tryGet(t.root, keybytesToHex(key), 0)
if err != nil {
- return nil, err
+ return nil, nil, err
}
+ t.root = newRoot
if value != nil {
- return value.Value, nil
+ return value.val, value.meta, nil
}
- return nil, nil
+ return nil, nil, nil
}
-func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, newnode node, err error) {
+func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, newnode node, didResolve bool, err error) {
switch n := (origNode).(type) {
case nil:
- return nil, nil, nil
+ return nil, nil, false, nil
case *valueNode:
- return n, n, nil
+ return n, n, false, nil
case *shortNode:
- if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) {
+ if len(key)-pos < len(n.key) || !bytes.Equal(n.key, key[pos:pos+len(n.key)]) {
// key not found in trie
- return nil, n, nil
+ return nil, n, false, nil
+ }
+ if value, newnode, didResolve, err = t.tryGet(n.child, key, pos+len(n.key)); err != nil {
+ return
}
- value, newnode, err = t.tryGet(n.Val, key, pos+len(n.Key))
- if newnode != nil && newnode != n.Val {
+ if didResolve {
n = n.copy()
- n.Val = newnode
+ n.child = newnode
+ n.flags.gen = t.cacheGen
}
- return value, n, err
+ return value, n, didResolve, nil
case *fullNode:
- child := n.Children[key[pos]]
- value, newnode, err = t.tryGet(child, key, pos+1)
- if newnode != nil && newnode != child {
+ if value, newnode, didResolve, err = t.tryGet(n.children[key[pos]], key, pos+1); err != nil {
+ return
+ }
+ if didResolve {
n = n.copy()
- n.Children[key[pos]] = newnode
+ n.flags.gen = t.cacheGen
+ n.children[key[pos]] = newnode
}
- return value, n, err
- case *hashNode:
- child, err := t.resolveHash(n, key[:pos])
- if err != nil {
- return nil, n, err
+ return value, n, didResolve, nil
+ case *refNode:
+ var child node
+ if child, err = t.resolveRef(n, key[:pos]); err != nil {
+ return
+ }
+ if value, newnode, _, err = t.tryGet(child, key, pos); err != nil {
+ return
}
- value, newnode, err := t.tryGet(child, key, pos)
- return value, newnode, err
+ return value, newnode, true, nil
default:
panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode))
}
@@ -184,24 +210,12 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, new
//
// The value bytes must not be modified by the caller while they are
// stored in the trie.
-func (t *Trie) Update(key, value []byte) {
- if err := t.TryUpdate(key, value); err != nil {
- log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
- }
-}
-
-// TryUpdate associates key with value in the trie. Subsequent calls to
-// Get will return value. If value has length zero, any existing value
-// is deleted from the trie and calls to Get will return nil.
-//
-// The value bytes must not be modified by the caller while they are
-// stored in the trie.
//
// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryUpdate(key, value []byte) error {
+func (t *Trie) Update(key, value, meta []byte) error {
k := keybytesToHex(key)
if len(value) != 0 {
- _, n, err := t.insert(t.root, nil, k, &valueNode{Value: value})
+ _, n, err := t.insert(t.root, nil, k, &valueNode{value, meta})
if err != nil {
return err
}
@@ -219,32 +233,32 @@ func (t *Trie) TryUpdate(key, value []byte) error {
func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) {
if len(key) == 0 {
if v, ok := n.(*valueNode); ok {
- _v := value.(*valueNode)
+ newVal := value.(*valueNode)
// dirty when value or meta is not equal
- return !bytes.Equal(v.Value, _v.Value) || !bytes.Equal(v.meta, _v.meta), value, nil
+ return !bytes.Equal(v.val, newVal.val) || !bytes.Equal(v.meta, newVal.meta), value, nil
}
return true, value, nil
}
switch n := n.(type) {
case *shortNode:
- matchlen := prefixLen(key, n.Key)
+ matchlen := prefixLen(key, n.key)
// If the whole key matches, keep this short node as is
// and only update the value.
- if matchlen == len(n.Key) {
- dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value)
+ if matchlen == len(n.key) {
+ dirty, nn, err := t.insert(n.child, append(prefix, key[:matchlen]...), key[matchlen:], value)
if !dirty || err != nil {
return false, n, err
}
- return true, &shortNode{n.Key, nn, t.newFlag()}, nil
+ return true, &shortNode{n.key, nn, t.newFlag()}, nil
}
// Otherwise branch out at the index where they differ.
branch := &fullNode{flags: t.newFlag()}
var err error
- _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val)
+ _, branch.children[n.key[matchlen]], err = t.insert(nil, append(prefix, n.key[:matchlen+1]...), n.key[matchlen+1:], n.child)
if err != nil {
return false, nil, err
}
- _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
+ _, branch.children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value)
if err != nil {
return false, nil, err
}
@@ -256,23 +270,23 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
case *fullNode:
- dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value)
+ dirty, nn, err := t.insert(n.children[key[0]], append(prefix, key[0]), key[1:], value)
if !dirty || err != nil {
return false, n, err
}
n = n.copy()
n.flags = t.newFlag()
- n.Children[key[0]] = nn
+ n.children[key[0]] = nn
return true, n, nil
case nil:
return true, &shortNode{key, value, t.newFlag()}, nil
- case *hashNode:
+ case *refNode:
// We've hit a part of the trie that isn't loaded yet. Load
// the node and insert into it. This leaves all child nodes on
// the path to the value in the trie.
- rn, err := t.resolveHash(n, prefix)
+ rn, err := t.resolveRef(n, prefix)
if err != nil {
return false, nil, err
}
@@ -287,33 +301,14 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
}
}
-// Delete removes any existing value for key from the trie.
-func (t *Trie) Delete(key []byte) {
- if err := t.TryDelete(key); err != nil {
- log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
- }
-}
-
-// TryDelete removes any existing value for key from the trie.
-// If a node was not found in the database, a MissingNodeError is returned.
-func (t *Trie) TryDelete(key []byte) error {
- k := keybytesToHex(key)
- _, n, err := t.delete(t.root, nil, k)
- if err != nil {
- return err
- }
- t.root = n
- return nil
-}
-
// delete returns the new root of the trie with key deleted.
// It reduces the trie to minimal form by simplifying
// nodes on the way up after deleting recursively.
func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
switch n := n.(type) {
case *shortNode:
- matchlen := prefixLen(key, n.Key)
- if matchlen < len(n.Key) {
+ matchlen := prefixLen(key, n.key)
+ if matchlen < len(n.key) {
return false, n, nil // don't replace n on mismatch
}
if matchlen == len(key) {
@@ -323,7 +318,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// from the subtrie. Child can never be nil here since the
// subtrie must contain at least two other values with keys
// longer than n.Key.
- dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):])
+ dirty, child, err := t.delete(n.child, append(prefix, key[:len(n.key)]...), key[len(n.key):])
if !dirty || err != nil {
return false, n, err
}
@@ -335,19 +330,19 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// always creates a new slice) instead of append to
// avoid modifying n.Key since it might be shared with
// other nodes.
- return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil
+ return true, &shortNode{concat(n.key, child.key...), child.child, t.newFlag()}, nil
default:
- return true, &shortNode{n.Key, child, t.newFlag()}, nil
+ return true, &shortNode{n.key, child, t.newFlag()}, nil
}
case *fullNode:
- dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:])
+ dirty, nn, err := t.delete(n.children[key[0]], append(prefix, key[0]), key[1:])
if !dirty || err != nil {
return false, n, err
}
n = n.copy()
n.flags = t.newFlag()
- n.Children[key[0]] = nn
+ n.children[key[0]] = nn
// Check how many non-nil entries are left after deleting and
// reduce the full node to a short node if only one entry is
@@ -359,7 +354,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// value that is left in n or -2 if n contains at least two
// values.
pos := -1
- for i, cld := range n.Children {
+ for i, cld := range n.children {
if cld != nil {
if pos == -1 {
pos = i
@@ -377,18 +372,18 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// shortNode{..., shortNode{...}}. Since the entry
// might not be loaded yet, resolve it just for this
// check.
- cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos)))
+ cnode, err := t.resolve(n.children[pos], append(prefix, byte(pos)))
if err != nil {
return false, nil, err
}
if cnode, ok := cnode.(*shortNode); ok {
- k := append([]byte{byte(pos)}, cnode.Key...)
- return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
+ k := append([]byte{byte(pos)}, cnode.key...)
+ return true, &shortNode{k, cnode.child, t.newFlag()}, nil
}
}
// Otherwise, n is replaced by a one-nibble short node
// containing the child.
- return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil
+ return true, &shortNode{[]byte{byte(pos)}, n.children[pos], t.newFlag()}, nil
}
// n still contains at least two values and cannot be reduced.
return true, n, nil
@@ -399,11 +394,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
case nil:
return false, nil, nil
- case *hashNode:
+ case *refNode:
// We've hit a part of the trie that isn't loaded yet. Load
// the node and delete from it. This leaves all child nodes on
// the path to the value in the trie.
- rn, err := t.resolveHash(n, prefix)
+ rn, err := t.resolveRef(n, prefix)
if err != nil {
return false, nil, err
}
@@ -426,84 +421,67 @@ func concat(s1 []byte, s2 ...byte) []byte {
}
func (t *Trie) resolve(n node, prefix []byte) (node, error) {
- if n, ok := n.(*hashNode); ok {
- node, err := t.resolveHash(n, prefix)
+ if ref, ok := n.(*refNode); ok {
+ node, err := t.resolveRef(ref, prefix)
return node, err
}
return n, nil
}
-func (t *Trie) resolveHash(n *hashNode, prefix []byte) (node node, err error) {
- key := n.Hash[:]
- if ke, ok := t.db.(DatabaseKeyEncoder); ok {
- key = ke.Encode(n.Hash[:], n.seq, prefix)
- }
-
- var blob []byte
- if r, ok := t.db.(DatabaseReaderTo); ok {
- h := newHasher(0, 0)
- defer returnHasherToPool(h)
- if blob, err = r.GetTo(key, h.tmp[:0]); err != nil {
- return nil, &MissingNodeError{NodeHash: n, Path: prefix, Err: err}
- }
- h.tmp = blob
- } else {
- if blob, err = t.db.Get(key); err != nil {
- return nil, &MissingNodeError{NodeHash: n, Path: prefix, Err: err}
- }
- }
- if len(blob) == 0 {
- return nil, &MissingNodeError{NodeHash: n, Path: prefix}
+func (t *Trie) resolveRef(ref *refNode, prefix []byte) (node, error) {
+ blob, err := t.db.Get(prefix, ref.ver)
+ if err != nil {
+ return nil, &MissingNodeError{Ref: *ref, Path: prefix, Err: err}
}
- return mustDecodeNode(n, blob, t.cacheGen), nil
+ return mustDecodeNode(ref, blob, t.cacheGen), nil
}
-// Root returns the root hash of the trie.
-// Deprecated: use Hash instead.
-func (t *Trie) Root() []byte { return t.Hash().Bytes() }
-
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() thor.Bytes32 {
- hash, cached, _ := t.hashRoot(nil)
- t.root = cached
- return hash.(*hashNode).Hash
+ if t.root == nil {
+ return emptyRoot
+ }
+
+ h := hasherPool.Get().(*hasher)
+ defer hasherPool.Put(h)
+
+ hash := h.hash(t.root, true)
+ return thor.BytesToBytes32(hash)
}
// Commit writes all nodes to the trie's database.
-// Nodes are stored with their blake2b hash as the key.
//
// Committing flushes nodes from memory.
// Subsequent Get calls will load nodes from the database.
-func (t *Trie) Commit() (root thor.Bytes32, err error) {
- if t.db == nil {
- panic("Commit called on trie with nil database")
+// If skipHash is true, less disk space is taken up but crypto features of merkle trie lost.
+func (t *Trie) Commit(db DatabaseWriter, newVer Version, skipHash bool) error {
+ if t.root == nil {
+ return nil
}
- return t.CommitTo(t.db)
-}
-// CommitTo writes all nodes to the given database.
-// Nodes are stored with their blake2b hash as the key.
-//
-// Committing flushes nodes from memory. Subsequent Get calls will
-// load nodes from the trie's database. Calling code must ensure that
-// the changes made to db are written back to the trie's attached
-// database before using the trie.
-func (t *Trie) CommitTo(db DatabaseWriter) (root thor.Bytes32, err error) {
- hash, cached, err := t.hashRoot(db)
+ // the root node might be refNode, resolve it before later process.
+ resolved, err := t.resolve(t.root, nil)
if err != nil {
- return (thor.Bytes32{}), err
+ return err
}
- t.root = cached
- t.cacheGen++
- return hash.(*hashNode).Hash, nil
-}
-func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) {
- if t.root == nil {
- return &hashNode{Hash: emptyRoot}, nil, nil
+ h := hasherPool.Get().(*hasher)
+ defer hasherPool.Put(h)
+ if !skipHash {
+ // hash the resolved root node before storing
+ h.hash(resolved, true)
+ }
+
+ h.newVer = newVer
+ h.cacheTTL = t.cacheTTL
+ h.skipHash = skipHash
+
+ rn, err := h.store(resolved, db, nil)
+ if err != nil {
+ return err
}
- h := newHasher(t.cacheGen, t.cacheTTL)
- defer returnHasherToPool(h)
- return h.hash(t.root, db, nil, true)
+ t.root = rn
+ t.cacheGen++
+ return nil
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 78c1ce7ce..bc7c284a2 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -22,13 +22,13 @@ import (
"fmt"
"math/big"
"math/rand"
- "os"
"reflect"
"testing"
"testing/quick"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/stretchr/testify/assert"
@@ -40,19 +40,34 @@ func init() {
spew.Config.DisableMethods = false
}
-// Used for testing
-func newEmpty() *Trie {
- db := ethdb.NewMemDatabase()
- trie, _ := New(thor.Bytes32{}, db)
- return trie
+func makeKey(path []byte, ver Version) []byte {
+ key := binary.AppendUvarint([]byte(nil), uint64(ver.Major))
+ key = binary.AppendUvarint(key, uint64(ver.Minor))
+ return append(key, path...)
+}
+
+type memdb struct {
+ db *ethdb.MemDatabase
+}
+
+func (m *memdb) Get(path []byte, ver Version) ([]byte, error) {
+ return m.db.Get(makeKey(path, ver))
+}
+
+func (m *memdb) Put(path []byte, ver Version, value []byte) error {
+ return m.db.Put(makeKey(path, ver), value)
+}
+
+func newMemDatabase() *memdb {
+ return &memdb{ethdb.NewMemDatabase()}
}
func TestEmptyTrie(t *testing.T) {
var trie Trie
res := trie.Hash()
- exp := emptyRoot
- if res != exp {
- t.Errorf("expected %x got %x", exp, res)
+
+ if res != emptyRoot {
+ t.Errorf("expected %x got %x", emptyRoot, res)
}
}
@@ -60,125 +75,129 @@ func TestNull(t *testing.T) {
var trie Trie
key := make([]byte, 32)
value := []byte("test")
- trie.Update(key, value)
- if !bytes.Equal(trie.Get(key), value) {
+ trie.Update(key, value, nil)
+ gotVal, _, _ := trie.Get(key)
+ if !bytes.Equal(gotVal, value) {
t.Fatal("wrong value")
}
}
func TestMissingRoot(t *testing.T) {
- db := ethdb.NewMemDatabase()
- root := thor.Bytes32{1, 2, 3, 4, 5}
- trie, err := New(root, db)
- if trie != nil {
- t.Error("New returned non-nil trie for invalid root")
- }
+ db := newMemDatabase()
+ hash := thor.Bytes32{1, 2, 3, 4, 5}
+ trie := New(Root{Hash: hash}, db)
+
+ // will resolve node
+ err := trie.Commit(db, Version{}, false)
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("New returned wrong error: %v", err)
}
}
func TestMissingNode(t *testing.T) {
- db := ethdb.NewMemDatabase()
- trie, _ := New(thor.Bytes32{}, db)
+ db := newMemDatabase()
+
+ root := Root{}
+ trie := New(root, db)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
+ updateString(trie, "120100", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, _ := trie.Commit()
+ root.Ver.Major++
+ trie.Commit(db, root.Ver, false)
+ root.Hash = trie.Hash()
- trie, _ = New(root, db)
- _, err := trie.TryGet([]byte("120000"))
+ trie = New(root, db)
+ _, _, err := trie.Get([]byte("120000"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, db)
- _, err = trie.TryGet([]byte("120099"))
+ trie = New(root, db)
+ _, _, err = trie.Get([]byte("120099"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, db)
- _, err = trie.TryGet([]byte("123456"))
+ trie = New(root, db)
+ _, _, err = trie.Get([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, db)
- err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
+ trie = New(root, db)
+ err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"), nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, db)
- err = trie.TryDelete([]byte("123456"))
+ trie = New(root, db)
+ err = trie.Update([]byte("123456"), nil, nil)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- db.Delete(common.FromHex("f4c6f22acf81fd2d993636c74c17d58ad0344b55343f5121bf16fb5f5ec1fc6f"))
+ db.db.Delete(makeKey([]byte{3, 1, 3, 2, 3, 0, 3}, root.Ver))
- trie, _ = New(root, db)
- _, err = trie.TryGet([]byte("120000"))
+ trie = New(root, db)
+ _, _, err = trie.Get([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(root, db)
- _, err = trie.TryGet([]byte("120099"))
+ trie = New(root, db)
+ _, _, err = trie.Get([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(root, db)
- _, err = trie.TryGet([]byte("123456"))
+ trie = New(root, db)
+ _, _, err = trie.Get([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, db)
- err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
- if _, ok := err.(*MissingNodeError); !ok {
- t.Errorf("Wrong error: %v", err)
- }
-
- trie, _ = New(root, db)
- err = trie.TryDelete([]byte("123456"))
+ trie = New(root, db)
+ err = trie.Update([]byte("120099"), []byte("zxcv"), nil)
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
}
func TestInsert(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
updateString(trie, "dogglesworth", "cat")
exp, _ := thor.ParseBytes32("6ca394ff9b13d6690a51dea30b1b5c43108e52944d30b9095227c49bae03ff8b")
- root := trie.Hash()
- if root != exp {
- t.Errorf("exp %v got %v", exp, root)
+ hash := trie.Hash()
+ if hash != exp {
+ t.Errorf("exp %v got %v", exp, hash)
}
- trie = newEmpty()
+ trie = new(Trie)
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp, _ = thor.ParseBytes32("e9d7f23f40cd82fe35f5a7a6778c3503f775f3623ba7a71fb335f0eee29dac8a")
- root, err := trie.Commit()
+ db := newMemDatabase()
+
+ err := trie.Commit(db, Version{}, false)
+ hash = trie.Hash()
if err != nil {
t.Fatalf("commit error: %v", err)
}
- if root != exp {
- t.Errorf("exp %v got %v", exp, root)
+ if hash != exp {
+ t.Errorf("exp %v got %v", exp, hash)
}
}
func TestGet(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
updateString(trie, "dogglesworth", "cat")
+ db := newMemDatabase()
for i := 0; i < 2; i++ {
res := getString(trie, "dog")
@@ -194,12 +213,12 @@ func TestGet(t *testing.T) {
if i == 1 {
return
}
- trie.Commit()
+ trie.Commit(db, Version{Major: uint32(i)}, false)
}
}
func TestDelete(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -226,7 +245,7 @@ func TestDelete(t *testing.T) {
}
func TestEmptyValues(t *testing.T) {
- trie := newEmpty()
+ trie := new(Trie)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -250,7 +269,8 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- trie := newEmpty()
+ db := newMemDatabase()
+ trie := new(Trie)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -263,27 +283,27 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- exp, err := trie.Commit()
- if err != nil {
+ ver := Version{}
+ if err := trie.Commit(db, ver, false); err != nil {
t.Fatalf("commit error: %v", err)
}
+ exp := trie.Hash()
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(exp, trie.db)
- if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", exp, err)
- }
+ trie2 := New(Root{exp, ver}, db)
+
for _, kv := range vals {
if string(getString(trie2, kv.k)) != kv.v {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
}
}
- hash, err := trie2.Commit()
- if err != nil {
+ ver.Major++
+ if err := trie2.Commit(db, ver, false); err != nil {
t.Fatalf("commit error: %v", err)
}
- if hash != exp {
- t.Errorf("root failure. expected %x got %x", exp, hash)
+ got := trie2.Hash()
+ if got != exp {
+ t.Errorf("root failure. expected %x got %x", exp, got)
}
// perform some insertions on the new trie.
@@ -307,42 +327,12 @@ func TestReplication(t *testing.T) {
}
func TestLargeValue(t *testing.T) {
- trie := newEmpty()
- trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
- trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
+ trie := new(Trie)
+ trie.Update([]byte("key1"), []byte{99, 99, 99, 99}, nil)
+ trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32), nil)
trie.Hash()
}
-// TestCacheUnload checks that decoded nodes are unloaded after a
-// certain number of commit operations.
-// func TestCacheUnload(t *testing.T) {
-// // Create test trie with two branches.
-// trie := newEmpty()
-// key1 := "---------------------------------"
-// key2 := "---some other branch"
-// updateString(trie, key1, "this is the branch of key1.")
-// updateString(trie, key2, "this is the branch of key2.")
-// root, _ := trie.Commit()
-
-// // Commit the trie repeatedly and access key1.
-// // The branch containing it is loaded from DB exactly two times:
-// // in the 0th and 6th iteration.
-// db := &countingDB{Database: trie.db, gets: make(map[string]int)}
-// trie, _ = New(root, db)
-// trie.SetCacheLimit(5)
-// for i := 0; i < 12; i++ {
-// getString(trie, key1)
-// trie.Commit()
-// }
-
-// // Check that it got loaded two times.
-// for dbkey, count := range db.gets {
-// if count != 2 {
-// t.Errorf("db key %x loaded %d times, want %d times", []byte(dbkey), count, 2)
-// }
-// }
-// }
-
// randTest performs random trie operations.
// Instances of this test are created by Generate.
type randTest []randTestStep
@@ -397,45 +387,44 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
}
func runRandTest(rt randTest) bool {
- db := ethdb.NewMemDatabase()
- tr, _ := New(thor.Bytes32{}, db)
+ db := newMemDatabase()
+ root := Root{}
+ tr := New(root, db)
values := make(map[string]string) // tracks content of the trie
for i, step := range rt {
switch step.op {
case opUpdate:
- tr.Update(step.key, step.value)
+ tr.Update(step.key, step.value, nil)
values[string(step.key)] = string(step.value)
case opDelete:
- tr.Delete(step.key)
+ tr.Update(step.key, nil, nil)
delete(values, string(step.key))
case opGet:
- v := tr.Get(step.key)
+ v, _, _ := tr.Get(step.key)
want := values[string(step.key)]
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
case opCommit:
- _, rt[i].err = tr.Commit()
+ root.Ver.Major++
+ rt[i].err = tr.Commit(db, root.Ver, false)
case opHash:
tr.Hash()
case opReset:
- hash, err := tr.Commit()
- if err != nil {
- rt[i].err = err
- return false
- }
- newtr, err := New(hash, db)
- if err != nil {
+ root.Ver.Major++
+ if err := tr.Commit(db, root.Ver, false); err != nil {
rt[i].err = err
return false
}
+ root.Hash = tr.Hash()
+ newtr := New(root, db)
tr = newtr
case opItercheckhash:
- checktr, _ := New(thor.Bytes32{}, nil)
- it := NewIterator(tr.NodeIterator(nil))
+ checktr := new(Trie)
+ it := NewIterator(tr.NodeIterator(nil, Version{}))
for it.Next() {
- checktr.Update(it.Key, it.Value)
+ checktr.Update(it.Key, it.Value, nil)
}
if tr.Hash() != checktr.Hash() {
rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
@@ -451,40 +440,6 @@ func runRandTest(rt randTest) bool {
return true
}
-// func checkCacheInvariant(n, parent node, parentCachegen uint16, parentDirty bool, depth int) error {
-// var children []node
-// var flag nodeFlag
-// switch n := n.(type) {
-// case *shortNode:
-// flag = n.flags
-// children = []node{n.Val}
-// case *fullNode:
-// flag = n.flags
-// children = n.Children[:]
-// default:
-// return nil
-// }
-
-// errorf := func(format string, args ...interface{}) error {
-// msg := fmt.Sprintf(format, args...)
-// msg += fmt.Sprintf("\nat depth %d node %s", depth, spew.Sdump(n))
-// msg += fmt.Sprintf("parent: %s", spew.Sdump(parent))
-// return errors.New(msg)
-// }
-// if flag.gen > parentCachegen {
-// return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen)
-// }
-// if depth > 0 && !parentDirty && flag.dirty {
-// return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen)
-// }
-// for _, child := range children {
-// if err := checkCacheInvariant(child, n, flag.gen, flag.dirty, depth+1); err != nil {
-// return err
-// }
-// }
-// return nil
-// }
-
func TestRandom(t *testing.T) {
if err := quick.Check(runRandTest, nil); err != nil {
if cerr, ok := err.(*quick.CheckError); ok {
@@ -503,18 +458,20 @@ const benchElemCount = 20000
func benchGet(b *testing.B, commit bool) {
trie := new(Trie)
+ db := newMemDatabase()
+ root := Root{}
if commit {
- _, tmpdb := tempDB()
- trie, _ = New(thor.Bytes32{}, tmpdb)
+ trie = New(root, db)
}
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
binary.LittleEndian.PutUint64(k, uint64(i))
- trie.Update(k, k)
+ trie.Update(k, k, nil)
}
binary.LittleEndian.PutUint64(k, benchElemCount/2)
if commit {
- trie.Commit()
+ root.Ver.Major++
+ trie.Commit(db, root.Ver, false)
}
b.ResetTimer()
@@ -522,20 +479,14 @@ func benchGet(b *testing.B, commit bool) {
trie.Get(k)
}
b.StopTimer()
-
- if commit {
- ldb := trie.db.(*ethdb.LDBDatabase)
- ldb.Close()
- os.RemoveAll(ldb.Path())
- }
}
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
- trie := newEmpty()
+ trie := new(Trie)
k := make([]byte, 32)
for i := 0; i < b.N; i++ {
e.PutUint64(k, uint64(i))
- trie.Update(k, k)
+ trie.Update(k, k, nil)
}
return trie
}
@@ -561,47 +512,44 @@ func BenchmarkHash(b *testing.B) {
nonce = uint64(random.Int63())
balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil))
root = emptyRoot
- code = thor.Keccak256(nil)
+ code = crypto.Keccak256(nil)
)
accounts[i], _ = rlp.EncodeToBytes([]interface{}{nonce, balance, root, code})
}
// Insert the accounts into the trie and hash it
- trie := newEmpty()
+ trie := new(Trie)
for i := 0; i < len(addresses); i++ {
- trie.Update(thor.Blake2b(addresses[i][:]).Bytes(), accounts[i])
+ trie.Update(thor.Blake2b(addresses[i][:]).Bytes(), accounts[i], nil)
}
b.ResetTimer()
b.ReportAllocs()
trie.Hash()
}
-func tempDB() (string, Database) {
- dir, err := os.MkdirTemp("", "trie-bench")
- if err != nil {
- panic(fmt.Sprintf("can't create temporary directory: %v", err))
- }
- db, err := ethdb.NewLDBDatabase(dir, 256, 0)
+func getString(trie *Trie, k string) []byte {
+ val, _, err := trie.Get([]byte(k))
if err != nil {
- panic(fmt.Sprintf("can't create temporary database: %v", err))
+ panic(err)
}
- return dir, db
-}
-
-func getString(trie *Trie, k string) []byte {
- return trie.Get([]byte(k))
+ return val
}
func updateString(trie *Trie, k, v string) {
- trie.Update([]byte(k), []byte(v))
+ if err := trie.Update([]byte(k), []byte(v), nil); err != nil {
+ panic(err)
+ }
}
func deleteString(trie *Trie, k string) {
- trie.Delete([]byte(k))
+ if err := trie.Update([]byte(k), nil, nil); err != nil {
+ panic(err)
+ }
}
func TestExtended(t *testing.T) {
- db := ethdb.NewMemDatabase()
- tr := NewExtended(thor.Bytes32{}, 0, db, false)
+ db := newMemDatabase()
+ ver := Version{}
+ tr := New(Root{}, db)
vals1 := []struct{ k, v string }{
{"do", "verb"},
@@ -634,20 +582,24 @@ func TestExtended(t *testing.T) {
tr.Update([]byte(v.k), []byte(v.v), thor.Blake2b([]byte(v.v)).Bytes())
}
- root1, err := tr.Commit(1)
+ ver.Major++
+ err := tr.Commit(db, ver, false)
if err != nil {
t.Errorf("commit failed %v", err)
}
+ root1 := tr.Hash()
for _, v := range vals2 {
tr.Update([]byte(v.k), []byte(v.v), thor.Blake2b([]byte(v.v)).Bytes())
}
- root2, err := tr.Commit(2)
+ ver.Major++
+ err = tr.Commit(db, ver, false)
if err != nil {
t.Errorf("commit failed %v", err)
}
+ root2 := tr.Hash()
- tr1 := NewExtended(root1, 1, db, false)
+ tr1 := New(Root{root1, Version{Major: 1}}, db)
for _, v := range vals1 {
val, meta, _ := tr1.Get([]byte(v.k))
if string(val) != v.v {
@@ -658,7 +610,7 @@ func TestExtended(t *testing.T) {
}
}
- tr2 := NewExtended(root2, 2, db, false)
+ tr2 := New(Root{root2, Version{Major: 2}}, db)
for _, v := range append(vals1, vals2...) {
val, meta, _ := tr2.Get([]byte(v.k))
if string(val) != v.v {
@@ -670,30 +622,20 @@ func TestExtended(t *testing.T) {
}
}
-type kedb struct {
- *ethdb.MemDatabase
-}
-
-func (db *kedb) Encode(_ []byte, seq uint64, path []byte) []byte {
- var k [8]byte
- binary.BigEndian.PutUint64(k[:], seq)
- return append(k[:], path...)
-}
-
-func TestNonCryptoExtended(t *testing.T) {
- db := &kedb{ethdb.NewMemDatabase()}
-
- tr := NewExtended(thor.Bytes32{}, 0, db, true)
- var root thor.Bytes32
+func TestCommitSkipHash(t *testing.T) {
+ db := newMemDatabase()
+ ver := Version{}
+ tr := New(Root{}, db)
n := uint32(100)
for i := uint32(0); i < n; i++ {
var k [4]byte
binary.BigEndian.PutUint32(k[:], i)
tr.Update(k[:], thor.Blake2b(k[:]).Bytes(), nil)
- root, _ = tr.Commit(uint64(i))
+ ver.Major++
+ tr.Commit(db, ver, true)
}
- tr = NewExtended(root, uint64(n-1), db, true)
+ tr = New(Root{thor.BytesToBytes32([]byte{1}), ver}, db)
for i := uint32(0); i < n; i++ {
var k [4]byte
binary.BigEndian.PutUint32(k[:], i)
@@ -703,9 +645,9 @@ func TestNonCryptoExtended(t *testing.T) {
}
}
-func TestExtendedCached(t *testing.T) {
- db := ethdb.NewMemDatabase()
- tr := NewExtended(thor.Bytes32{}, 0, db, false)
+func TestFromRootNode(t *testing.T) {
+ db := newMemDatabase()
+ tr := New(Root{}, db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -719,7 +661,7 @@ func TestExtendedCached(t *testing.T) {
tr.Update([]byte(val.k), []byte(val.v), nil)
}
- tr = NewExtendedCached(tr.RootNode(), db, false)
+ tr = FromRootNode(tr.RootNode(), db)
for _, val := range vals {
v, _, _ := tr.Get([]byte(val.k))
diff --git a/trie/vp.go b/trie/vp.go
new file mode 100644
index 000000000..5444b9531
--- /dev/null
+++ b/trie/vp.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package trie
+
+import (
+ "encoding/binary"
+ "errors"
+ "math"
+)
+
+type vpScope struct{}
+
+// vp implements varint-prefix coding.
+//
+// It's much simpler and a bit faster than RLP.
+// Trie nodes stored in database are encoded using vp.
+var vp vpScope
+
+// AppendUint32 appends vp-encoded i to buf and returns the extended buffer.
+func (vpScope) AppendUint32(buf []byte, i uint32) []byte {
+ return binary.AppendUvarint(buf, uint64(i))
+}
+
+// AppendString appends vp-encoded str to buf and returns the extended buffer.
+func (vpScope) AppendString(buf, str []byte) []byte {
+ buf = binary.AppendUvarint(buf, uint64(len(str)))
+ return append(buf, str...)
+}
+
+// SplitString extracts a string and returns rest bytes.
+// It'll panic if errored.
+func (vpScope) SplitString(buf []byte) (str []byte, rest []byte, err error) {
+ i, n := binary.Uvarint(buf)
+ if n <= 0 {
+ return nil, nil, errors.New("invalid uvarint prefix")
+ }
+ buf = buf[n:]
+ return buf[:i], buf[i:], nil
+}
+
+// SplitUint32 extracts uint32 and returns rest bytes.
+// It'll panic if errored.
+func (vpScope) SplitUint32(buf []byte) (i uint32, rest []byte, err error) {
+ i64, n := binary.Uvarint(buf)
+ if n <= 0 || i64 > math.MaxUint32 {
+ return 0, nil, errors.New("invalid uvarint prefix")
+ }
+ return uint32(i64), buf[n:], nil
+}
diff --git a/trie/vp_test.go b/trie/vp_test.go
new file mode 100644
index 000000000..cd066bacc
--- /dev/null
+++ b/trie/vp_test.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package trie
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestAppendString(t *testing.T) {
+ var buf []byte
+ want := []byte("vechain")
+ buf = vp.AppendString(buf, want)
+ got, buf, err := vp.SplitString(buf)
+ if err != nil {
+ t.Error("should no err")
+ }
+
+ if !bytes.Equal(got, want) {
+ t.Errorf("want %v got %v", want, got)
+ }
+
+ if len(buf) != 0 {
+ t.Error("rest buf should be 0")
+ }
+}
+
+func TestAppendUint(t *testing.T) {
+ var buf []byte
+ const want = 1234567
+ buf = vp.AppendUint32(buf, want)
+ got, buf, err := vp.SplitUint32(buf)
+ if err != nil {
+ t.Error("should no err")
+ }
+ if got != want {
+ t.Errorf("want %v got %v", want, got)
+ }
+
+ if len(buf) != 0 {
+ t.Error("rest buf should be 0")
+ }
+}
diff --git a/txpool/tx_object_map_test.go b/txpool/tx_object_map_test.go
index 9a0b38629..084bd8a78 100644
--- a/txpool/tx_object_map_test.go
+++ b/txpool/tx_object_map_test.go
@@ -19,8 +19,7 @@ import (
)
func TestGetByID(t *testing.T) {
- db := muxdb.NewMem()
- repo := newChainRepo(db)
+ repo := newChainRepo(muxdb.NewMem())
// Creating transactions
tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0])
@@ -49,8 +48,7 @@ func TestGetByID(t *testing.T) {
}
func TestFill(t *testing.T) {
- db := muxdb.NewMem()
- repo := newChainRepo(db)
+ repo := newChainRepo(muxdb.NewMem())
// Creating transactions
tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0])
@@ -83,8 +81,7 @@ func TestFill(t *testing.T) {
}
func TestTxObjMap(t *testing.T) {
- db := muxdb.NewMem()
- repo := newChainRepo(db)
+ repo := newChainRepo(muxdb.NewMem())
tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0])
tx2 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0])
@@ -120,8 +117,7 @@ func TestTxObjMap(t *testing.T) {
}
func TestLimitByDelegator(t *testing.T) {
- db := muxdb.NewMem()
- repo := newChainRepo(db)
+ repo := newChainRepo(muxdb.NewMem())
tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0])
tx2 := newDelegatedTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, genesis.DevAccounts()[0], genesis.DevAccounts()[1])
@@ -158,7 +154,7 @@ func TestPendingCost(t *testing.T) {
chain := repo.NewBestChain()
best := repo.BestBlockSummary()
- state := stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum)
+ state := stater.NewState(best.Root())
var err error
txObj1.executable, err = txObj1.Executable(chain, state, best.Header)
diff --git a/txpool/tx_object_test.go b/txpool/tx_object_test.go
index 8358f1a6d..764b22b07 100644
--- a/txpool/tx_object_test.go
+++ b/txpool/tx_object_test.go
@@ -18,6 +18,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
)
@@ -78,8 +79,8 @@ func SetupTest() (genesis.DevAccount, *chain.Repository, *block.Block, *state.St
repo := newChainRepo(db)
b0 := repo.GenesisBlock()
b1 := new(block.Builder).ParentID(b0.Header().ID()).GasLimit(10000000).TotalScore(100).Build()
- repo.AddBlock(b1, nil, 0)
- st := state.New(db, repo.GenesisBlock().Header().StateRoot(), 0, 0, 0)
+ repo.AddBlock(b1, nil, 0, false)
+ st := state.New(db, trie.Root{Hash: repo.GenesisBlock().Header().StateRoot()})
return acc, repo, b1, st
}
@@ -137,7 +138,14 @@ func TestResolve(t *testing.T) {
}
func TestExecutable(t *testing.T) {
- acc, repo, b1, st := SetupTest()
+ acc := genesis.DevAccounts()[0]
+
+ db := muxdb.NewMem()
+ repo := newChainRepo(db)
+ b0 := repo.GenesisBlock()
+ b1 := new(block.Builder).ParentID(b0.Header().ID()).GasLimit(10000000).TotalScore(100).Build()
+ repo.AddBlock(b1, nil, 0, false)
+ st := state.New(db, trie.Root{Hash: repo.GenesisBlock().Header().StateRoot()})
tests := []struct {
tx *tx.Transaction
diff --git a/txpool/tx_pool.go b/txpool/tx_pool.go
index 928751676..c339c8b5b 100644
--- a/txpool/tx_pool.go
+++ b/txpool/tx_pool.go
@@ -245,7 +245,7 @@ func (p *TxPool) add(newTx *tx.Transaction, rejectNonExecutable bool, localSubmi
}
}
- state := p.stater.NewState(headSummary.Header.StateRoot(), headSummary.Header.Number(), headSummary.Conflicts, headSummary.SteadyNum)
+ state := p.stater.NewState(headSummary.Root())
executable, err := txObj.Executable(p.repo.NewChain(headSummary.Header.ID()), state, headSummary.Header)
if err != nil {
return txRejectedError{err.Error()}
@@ -391,7 +391,7 @@ func (p *TxPool) wash(headSummary *chain.BlockSummary) (executables tx.Transacti
// recreate state every time to avoid high RAM usage when the pool at hight water-mark.
newState := func() *state.State {
- return p.stater.NewState(headSummary.Header.StateRoot(), headSummary.Header.Number(), headSummary.Conflicts, headSummary.SteadyNum)
+ return p.stater.NewState(headSummary.Root())
}
baseGasPrice, err := builtin.Params.Native(newState()).Get(thor.KeyBaseGasPrice)
if err != nil {
diff --git a/txpool/tx_pool_test.go b/txpool/tx_pool_test.go
index 73fe7db0a..1f281e0bc 100644
--- a/txpool/tx_pool_test.go
+++ b/txpool/tx_pool_test.go
@@ -26,6 +26,7 @@ import (
"github.com/vechain/thor/v2/muxdb"
"github.com/vechain/thor/v2/state"
"github.com/vechain/thor/v2/thor"
+ "github.com/vechain/thor/v2/trie"
"github.com/vechain/thor/v2/tx"
Tx "github.com/vechain/thor/v2/tx"
)
@@ -215,8 +216,8 @@ func TestSubscribeNewTx(t *testing.T) {
pool := newPool(LIMIT, LIMIT_PER_ACCOUNT)
defer pool.Close()
- st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0)
- stage, _ := st.Stage(1, 0)
+ st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()})
+ stage, _ := st.Stage(trie.Version{Major: 1})
root1, _ := stage.Commit()
var sig [65]byte
@@ -229,10 +230,9 @@ func TestSubscribeNewTx(t *testing.T) {
GasLimit(10000000).
StateRoot(root1).
Build().WithSignature(sig[:])
- if err := pool.repo.AddBlock(b1, nil, 0); err != nil {
+ if err := pool.repo.AddBlock(b1, nil, 0, true); err != nil {
t.Fatal(err)
}
- pool.repo.SetBestBlockID(b1.Header().ID())
txCh := make(chan *TxEvent)
@@ -261,8 +261,8 @@ func TestWashTxs(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, Tx.Transactions{tx1}, txs)
- st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0)
- stage, _ := st.Stage(1, 0)
+ st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()})
+ stage, _ := st.Stage(trie.Version{Major: 1})
root1, _ := stage.Commit()
b1 := new(block.Builder).
ParentID(pool.repo.GenesisBlock().Header().ID()).
@@ -271,7 +271,7 @@ func TestWashTxs(t *testing.T) {
GasLimit(10000000).
StateRoot(root1).
Build()
- pool.repo.AddBlock(b1, nil, 0)
+ pool.repo.AddBlock(b1, nil, 0, false)
txs, _, err = pool.wash(pool.repo.BestBlockSummary())
assert.Nil(t, err)
@@ -324,8 +324,8 @@ func TestFillPool(t *testing.T) {
func TestAdd(t *testing.T) {
pool := newPool(LIMIT, LIMIT_PER_ACCOUNT)
defer pool.Close()
- st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0)
- stage, _ := st.Stage(1, 0)
+ st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()})
+ stage, _ := st.Stage(trie.Version{Major: 1})
root1, _ := stage.Commit()
var sig [65]byte
@@ -337,8 +337,7 @@ func TestAdd(t *testing.T) {
GasLimit(10000000).
StateRoot(root1).
Build().WithSignature(sig[:])
- pool.repo.AddBlock(b1, nil, 0)
- pool.repo.SetBestBlockID(b1.Header().ID())
+ pool.repo.AddBlock(b1, nil, 0, true)
acc := devAccounts[0]
dupTx := newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), acc)
@@ -614,8 +613,8 @@ func TestAddOverPendingCost(t *testing.T) {
b0, _, _, err := builder.Build(state.NewStater(db))
assert.Nil(t, err)
- st := state.New(db, b0.Header().StateRoot(), 0, 0, 0)
- stage, err := st.Stage(1, 0)
+ st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()})
+ stage, err := st.Stage(trie.Version{Major: 1})
assert.Nil(t, err)
root, err := stage.Commit()
assert.Nil(t, err)
@@ -631,8 +630,7 @@ func TestAddOverPendingCost(t *testing.T) {
TransactionFeatures(feat).Build()
repo, _ := chain.NewRepository(db, b0)
- repo.AddBlock(b1, tx.Receipts{}, 0)
- repo.SetBestBlockID(b1.Header().ID())
+ repo.AddBlock(b1, tx.Receipts{}, 0, true)
pool := New(repo, state.NewStater(db), Options{
Limit: LIMIT,
LimitPerAccount: LIMIT,
From fc1c56181c094dd29496cd794c71c78ef2885cba Mon Sep 17 00:00:00 2001
From: Pedro Gomes
Date: Mon, 9 Dec 2024 16:03:32 +0000
Subject: [PATCH 09/25] Pedro/merge/feat/db (#914)
* fix(documentation): use absolute links in markdown (#889)
* Add benchmark test to node block process (#892)
* Add benchmark test to node block process
* added file-based storage
* use tempdir
* update dependency go-ethereum (#895)
* chore: update API metrics bucket and endpoint names (#893)
* chore: update API metrics bucket and endpoint names
* fix: typo & tests
* fix: lint
* chore: add websocket total counter
* fix: txs endpoints names & ws subject
* fix: unit tests
* chore: standardise naming convention
* chore: add websocke duration & http code
* chore: add websocke duration & http code
* fix: lint issues
* fix: sync issues with metrics
* chore: update websocket durations bucket
* fix: PR comments - use sync.Once
* chore: update builtin generation (#896)
* chore: update builtin generation
* fix: update GHA
* getreceipts metrics + lint (#902)
* chore: add flag to enable/disable deprecated APIs (#897)
* chore: add flag to enable/disable deprecated APIs
* chore: update for PR comments
* chore: update for PR comments
* fix: update e2e commit sha
* fix: update e2e commit sha
* fix: update flag name
* fix: solo start flags (#906)
* chore: make thorclient configurable + fix type error (#908)
* chore: make thorclient configurable
* fix: subscriptions block type
* fix: compile errors
* fix: remove test with lint error
* add 'raw' query parameter to the blocks (#899)
* add 'raw' query parameter to the blocks
* summary -> summary.Header
Co-authored-by: libotony
* change variable name
* make expanded and raw mutually exclusive
* add unit tests
* fix linting
---------
Co-authored-by: libotony
* Adding Health endpoint (#836)
* Adding Health endpoint
* pr comments + 503 if not healthy
* refactored admin server and api + health endpoint tests
* fix health condition
* fix admin routing
* added comments + changed from ChainSync to ChainBootstrapStatus
* Adding healthcheck for solo mode
* adding solo + tests
* fix log_level handler funcs
* refactor health package + add p2p count
* remove solo methods
* moving health service to api pkg
* added defaults + api health query
* pr comments
* pr comments
* pr comments
* Update cmd/thor/main.go
* Darren/admin api log toggler (#877)
* Adding Health endpoint
* pr comments + 503 if not healthy
* refactored admin server and api + health endpoint tests
* fix health condition
* fix admin routing
* added comments + changed from ChainSync to ChainBootstrapStatus
* Adding healthcheck for solo mode
* adding solo + tests
* fix log_level handler funcs
* feat(admin): toggle api logs via admin API
* feat(admin): add license headers
* refactor health package + add p2p count
* remove solo methods
* moving health service to api pkg
* added defaults + api health query
* pr comments
* pr comments
---------
Co-authored-by: otherview
* Darren/chore/backport metrics (#909)
* chore(muxdb): backport muxdb cache metrics
* chore(muxdb): backport muxdb cache metrics
* chore(metrics): backport disk IO
* chore(metrics): fix lint
* chore(chain): add repo cache metrics
* fix(chain): fix cache return value
* refactor(chain): cache hit miss
* chore(thor): update version (#912)
* chore(thor): update version
* chore(openapi): version
* feat(api/debug): support debug trace without blockId (#905)
* api/debug: support debug with txhash
Signed-off-by: jsvisa
api/debug: blockId should use tx's instead
Signed-off-by: jsvisa
fix tests
Signed-off-by: jsvisa
* debug: add test
Signed-off-by: jsvisa
* improve parseTarget
Signed-off-by: jsvisa
* update doc
Signed-off-by: jsvisa
* fix tests
Signed-off-by: jsvisa
---------
Signed-off-by: jsvisa
Co-authored-by: tony
* version
---------
Signed-off-by: jsvisa
Co-authored-by: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Co-authored-by: libotony
Co-authored-by: YeahNotSewerSide <47860375+YeahNotSewerSide@users.noreply.github.com>
Co-authored-by: Delweng
---
.github/workflows/test-e2e.yaml | 4 +-
api/accounts/accounts.go | 16 ++-
api/accounts/accounts_test.go | 21 ++-
api/admin.go | 62 --------
api/admin/admin.go | 32 +++++
api/admin/apilogs/api_logs.go | 70 +++++++++
api/admin/apilogs/api_logs_test.go | 91 ++++++++++++
api/admin/health/health.go | 84 +++++++++++
api/admin/health/health_api.go | 68 +++++++++
api/admin/health/health_api_test.go | 59 ++++++++
api/admin/health/health_test.go | 71 ++++++++++
api/admin/loglevel/log_level.go | 76 ++++++++++
.../loglevel/log_level_test.go} | 10 +-
api/admin/loglevel/types.go | 14 ++
api/admin_server.go | 38 ++---
api/api.go | 50 ++++---
api/blocks/blocks.go | 29 +++-
api/blocks/blocks_test.go | 55 +++++++
api/blocks/types.go | 4 +
api/debug/debug.go | 134 +++++++++++-------
api/debug/debug_test.go | 30 +++-
api/doc/thor.yaml | 24 +++-
api/events/events_test.go | 2 +-
api/events/types.go | 2 +-
api/events/types_test.go | 22 +--
api/metrics_test.go | 4 +-
api/node/node_test.go | 3 +-
api/request_logger.go | 7 +-
api/request_logger_test.go | 5 +-
api/subscriptions/subscriptions.go | 27 ++--
api/subscriptions/subscriptions_test.go | 19 ++-
api/utils/http.go | 13 ++
chain/repository.go | 1 -
cmd/thor/flags.go | 4 +
cmd/thor/main.go | 98 +++++++------
cmd/thor/utils.go | 19 +++
comm/communicator.go | 5 +-
metrics/noop.go | 3 -
metrics/prometheus.go | 52 +++----
thorclient/api_test.go | 2 +-
thorclient/httpclient/client.go | 6 +-
thorclient/thorclient.go | 10 +-
thorclient/wsclient/client.go | 5 +-
thorclient/wsclient/client_test.go | 14 +-
44 files changed, 1051 insertions(+), 314 deletions(-)
delete mode 100644 api/admin.go
create mode 100644 api/admin/admin.go
create mode 100644 api/admin/apilogs/api_logs.go
create mode 100644 api/admin/apilogs/api_logs_test.go
create mode 100644 api/admin/health/health.go
create mode 100644 api/admin/health/health_api.go
create mode 100644 api/admin/health/health_api_test.go
create mode 100644 api/admin/health/health_test.go
create mode 100644 api/admin/loglevel/log_level.go
rename api/{admin_test.go => admin/loglevel/log_level_test.go} (93%)
create mode 100644 api/admin/loglevel/types.go
diff --git a/.github/workflows/test-e2e.yaml b/.github/workflows/test-e2e.yaml
index babbb3a39..55c82689e 100644
--- a/.github/workflows/test-e2e.yaml
+++ b/.github/workflows/test-e2e.yaml
@@ -43,8 +43,8 @@ jobs:
uses: actions/checkout@v4
with:
repository: vechain/thor-e2e-tests
- # https://github.com/vechain/thor-e2e-tests/tree/209f6ea9a81a98dc2d5e42bf036d2878c5837036
- ref: 209f6ea9a81a98dc2d5e42bf036d2878c5837036
+ # https://github.com/vechain/thor-e2e-tests/tree/8b72bedff11c9e8873d88b6e2dba356d43b56779
+ ref: 8b72bedff11c9e8873d88b6e2dba356d43b56779
- name: Download artifact
uses: actions/download-artifact@v4
diff --git a/api/accounts/accounts.go b/api/accounts/accounts.go
index 54058a160..22698bdbd 100644
--- a/api/accounts/accounts.go
+++ b/api/accounts/accounts.go
@@ -27,11 +27,12 @@ import (
)
type Accounts struct {
- repo *chain.Repository
- stater *state.Stater
- callGasLimit uint64
- forkConfig thor.ForkConfig
- bft bft.Committer
+ repo *chain.Repository
+ stater *state.Stater
+ callGasLimit uint64
+ forkConfig thor.ForkConfig
+ bft bft.Committer
+ enabledDeprecated bool
}
func New(
@@ -40,6 +41,7 @@ func New(
callGasLimit uint64,
forkConfig thor.ForkConfig,
bft bft.Committer,
+ enabledDeprecated bool,
) *Accounts {
return &Accounts{
repo,
@@ -47,6 +49,7 @@ func New(
callGasLimit,
forkConfig,
bft,
+ enabledDeprecated,
}
}
@@ -168,6 +171,9 @@ func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) er
}
func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error {
+ if !a.enabledDeprecated {
+ return utils.HTTPError(nil, http.StatusGone)
+ }
callData := &CallData{}
if err := utils.ParseJSON(req.Body, &callData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
diff --git a/api/accounts/accounts_test.go b/api/accounts/accounts_test.go
index 9294723eb..8630bea4b 100644
--- a/api/accounts/accounts_test.go
+++ b/api/accounts/accounts_test.go
@@ -103,7 +103,7 @@ var (
)
func TestAccount(t *testing.T) {
- initAccountServer(t)
+ initAccountServer(t, true)
defer ts.Close()
tclient = thorclient.New(ts.URL)
@@ -126,6 +126,21 @@ func TestAccount(t *testing.T) {
}
}
+func TestDeprecated(t *testing.T) {
+ initAccountServer(t, false)
+ defer ts.Close()
+
+ tclient = thorclient.New(ts.URL)
+
+ body := &accounts.CallData{}
+
+ _, statusCode, _ := tclient.RawHTTPClient().RawHTTPPost("/accounts", body)
+ assert.Equal(t, http.StatusGone, statusCode, "invalid address")
+
+ _, statusCode, _ = tclient.RawHTTPClient().RawHTTPPost("/accounts/"+contractAddr.String(), body)
+ assert.Equal(t, http.StatusGone, statusCode, "invalid address")
+}
+
func getAccount(t *testing.T) {
_, statusCode, err := tclient.RawHTTPClient().RawHTTPGet("/accounts/" + invalidAddr)
require.NoError(t, err)
@@ -264,7 +279,7 @@ func getStorageWithNonExistingRevision(t *testing.T) {
assert.Equal(t, "revision: leveldb: not found\n", string(res), "revision not found")
}
-func initAccountServer(t *testing.T) {
+func initAccountServer(t *testing.T, enabledDeprecated bool) {
thorChain, err := testchain.NewIntegrationTestChain()
require.NoError(t, err)
@@ -291,7 +306,7 @@ func initAccountServer(t *testing.T) {
)
router := mux.NewRouter()
- accounts.New(thorChain.Repo(), thorChain.Stater(), uint64(gasLimit), thor.NoFork, thorChain.Engine()).
+ accounts.New(thorChain.Repo(), thorChain.Stater(), uint64(gasLimit), thor.NoFork, thorChain.Engine(), enabledDeprecated).
Mount(router, "/accounts")
ts = httptest.NewServer(router)
diff --git a/api/admin.go b/api/admin.go
deleted file mode 100644
index afd299cfa..000000000
--- a/api/admin.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2024 The VeChainThor developers
-
-// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
-// file LICENSE or
-
-package api
-
-import (
- "log/slog"
- "net/http"
-
- "github.com/pkg/errors"
- "github.com/vechain/thor/v2/api/utils"
- "github.com/vechain/thor/v2/log"
-)
-
-type logLevelRequest struct {
- Level string `json:"level"`
-}
-
-type logLevelResponse struct {
- CurrentLevel string `json:"currentLevel"`
-}
-
-func getLogLevelHandler(logLevel *slog.LevelVar) utils.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- return utils.WriteJSON(w, logLevelResponse{
- CurrentLevel: logLevel.Level().String(),
- })
- }
-}
-
-func postLogLevelHandler(logLevel *slog.LevelVar) utils.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) error {
- var req logLevelRequest
-
- if err := utils.ParseJSON(r.Body, &req); err != nil {
- return utils.BadRequest(errors.WithMessage(err, "Invalid request body"))
- }
-
- switch req.Level {
- case "debug":
- logLevel.Set(log.LevelDebug)
- case "info":
- logLevel.Set(log.LevelInfo)
- case "warn":
- logLevel.Set(log.LevelWarn)
- case "error":
- logLevel.Set(log.LevelError)
- case "trace":
- logLevel.Set(log.LevelTrace)
- case "crit":
- logLevel.Set(log.LevelCrit)
- default:
- return utils.BadRequest(errors.New("Invalid verbosity level"))
- }
-
- return utils.WriteJSON(w, logLevelResponse{
- CurrentLevel: logLevel.Level().String(),
- })
- }
-}
diff --git a/api/admin/admin.go b/api/admin/admin.go
new file mode 100644
index 000000000..9b819c875
--- /dev/null
+++ b/api/admin/admin.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package admin
+
+import (
+ "log/slog"
+ "net/http"
+ "sync/atomic"
+
+ "github.com/gorilla/handlers"
+ "github.com/gorilla/mux"
+ "github.com/vechain/thor/v2/api/admin/apilogs"
+ "github.com/vechain/thor/v2/api/admin/loglevel"
+
+ healthAPI "github.com/vechain/thor/v2/api/admin/health"
+)
+
+func New(logLevel *slog.LevelVar, health *healthAPI.Health, apiLogsToggle *atomic.Bool) http.HandlerFunc {
+ router := mux.NewRouter()
+ subRouter := router.PathPrefix("/admin").Subrouter()
+
+ loglevel.New(logLevel).Mount(subRouter, "/loglevel")
+ healthAPI.NewAPI(health).Mount(subRouter, "/health")
+ apilogs.New(apiLogsToggle).Mount(subRouter, "/apilogs")
+
+ handler := handlers.CompressHandler(router)
+
+ return handler.ServeHTTP
+}
diff --git a/api/admin/apilogs/api_logs.go b/api/admin/apilogs/api_logs.go
new file mode 100644
index 000000000..0f815d579
--- /dev/null
+++ b/api/admin/apilogs/api_logs.go
@@ -0,0 +1,70 @@
+// Copyright (c) 2024 The VeChainThor developers
+//
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package apilogs
+
+import (
+ "net/http"
+ "sync"
+ "sync/atomic"
+
+ "github.com/gorilla/mux"
+ "github.com/vechain/thor/v2/api/utils"
+ "github.com/vechain/thor/v2/log"
+)
+
+type APILogs struct {
+ enabled *atomic.Bool
+ mu sync.Mutex
+}
+
+type Status struct {
+ Enabled bool `json:"enabled"`
+}
+
+func New(enabled *atomic.Bool) *APILogs {
+ return &APILogs{
+ enabled: enabled,
+ }
+}
+
+func (a *APILogs) Mount(root *mux.Router, pathPrefix string) {
+ sub := root.PathPrefix(pathPrefix).Subrouter()
+ sub.Path("").
+ Methods(http.MethodGet).
+ Name("get-api-logs-enabled").
+ HandlerFunc(utils.WrapHandlerFunc(a.areAPILogsEnabled))
+
+ sub.Path("").
+ Methods(http.MethodPost).
+ Name("post-api-logs-enabled").
+ HandlerFunc(utils.WrapHandlerFunc(a.setAPILogsEnabled))
+}
+
+func (a *APILogs) areAPILogsEnabled(w http.ResponseWriter, _ *http.Request) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ return utils.WriteJSON(w, Status{
+ Enabled: a.enabled.Load(),
+ })
+}
+
+func (a *APILogs) setAPILogsEnabled(w http.ResponseWriter, r *http.Request) error {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ var req Status
+ if err := utils.ParseJSON(r.Body, &req); err != nil {
+ return utils.BadRequest(err)
+ }
+ a.enabled.Store(req.Enabled)
+
+ log.Info("api logs updated", "pkg", "apilogs", "enabled", req.Enabled)
+
+ return utils.WriteJSON(w, Status{
+ Enabled: a.enabled.Load(),
+ })
+}
diff --git a/api/admin/apilogs/api_logs_test.go b/api/admin/apilogs/api_logs_test.go
new file mode 100644
index 000000000..95cf2c6ac
--- /dev/null
+++ b/api/admin/apilogs/api_logs_test.go
@@ -0,0 +1,91 @@
+// Copyright (c) 2024 The VeChainThor developers
+//
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package apilogs
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "sync/atomic"
+ "testing"
+
+ "github.com/gorilla/mux"
+ "github.com/stretchr/testify/assert"
+)
+
+type TestCase struct {
+ name string
+ method string
+ expectedHTTP int
+ startValue bool
+ expectedEndValue bool
+ requestBody bool
+}
+
+func marshalBody(tt TestCase, t *testing.T) []byte {
+ var reqBody []byte
+ var err error
+ if tt.method == "POST" {
+ reqBody, err = json.Marshal(Status{Enabled: tt.requestBody})
+ if err != nil {
+ t.Fatalf("could not marshal request body: %v", err)
+ }
+ }
+ return reqBody
+}
+
+func TestLogLevelHandler(t *testing.T) {
+ tests := []TestCase{
+ {
+ name: "Valid POST input - set logs to enabled",
+ method: "POST",
+ expectedHTTP: http.StatusOK,
+ startValue: false,
+ requestBody: true,
+ expectedEndValue: true,
+ },
+ {
+ name: "Valid POST input - set logs to disabled",
+ method: "POST",
+ expectedHTTP: http.StatusOK,
+ startValue: true,
+ requestBody: false,
+ expectedEndValue: false,
+ },
+ {
+ name: "GET request - get current level INFO",
+ method: "GET",
+ expectedHTTP: http.StatusOK,
+ startValue: true,
+ expectedEndValue: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ logLevel := atomic.Bool{}
+ logLevel.Store(tt.startValue)
+
+ reqBodyBytes := marshalBody(tt, t)
+
+ req, err := http.NewRequest(tt.method, "/admin/apilogs", bytes.NewBuffer(reqBodyBytes))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rr := httptest.NewRecorder()
+ router := mux.NewRouter()
+ New(&logLevel).Mount(router, "/admin/apilogs")
+ router.ServeHTTP(rr, req)
+
+ assert.Equal(t, tt.expectedHTTP, rr.Code)
+ responseBody := Status{}
+ assert.NoError(t, json.Unmarshal(rr.Body.Bytes(), &responseBody))
+ assert.Equal(t, tt.expectedEndValue, responseBody.Enabled)
+ })
+ }
+}
diff --git a/api/admin/health/health.go b/api/admin/health/health.go
new file mode 100644
index 000000000..41522e32d
--- /dev/null
+++ b/api/admin/health/health.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package health
+
+import (
+ "time"
+
+ "github.com/vechain/thor/v2/chain"
+ "github.com/vechain/thor/v2/comm"
+ "github.com/vechain/thor/v2/thor"
+)
+
+type BlockIngestion struct {
+ ID *thor.Bytes32 `json:"id"`
+ Timestamp *time.Time `json:"timestamp"`
+}
+
+type Status struct {
+ Healthy bool `json:"healthy"`
+ BestBlockTime *time.Time `json:"bestBlockTime"`
+ PeerCount int `json:"peerCount"`
+ IsNetworkProgressing bool `json:"isNetworkProgressing"`
+}
+
+type Health struct {
+ repo *chain.Repository
+ p2p *comm.Communicator
+}
+
+const (
+ defaultBlockTolerance = time.Duration(2*thor.BlockInterval) * time.Second // 2 blocks tolerance
+ defaultMinPeerCount = 2
+)
+
+func New(repo *chain.Repository, p2p *comm.Communicator) *Health {
+ return &Health{
+ repo: repo,
+ p2p: p2p,
+ }
+}
+
+// isNetworkProgressing checks if the network is producing new blocks within the allowed interval.
+func (h *Health) isNetworkProgressing(now time.Time, bestBlockTimestamp time.Time, blockTolerance time.Duration) bool {
+ return now.Sub(bestBlockTimestamp) <= blockTolerance
+}
+
+// isNodeConnectedP2P checks if the node is connected to peers
+func (h *Health) isNodeConnectedP2P(peerCount int, minPeerCount int) bool {
+ return peerCount >= minPeerCount
+}
+
+func (h *Health) Status(blockTolerance time.Duration, minPeerCount int) (*Status, error) {
+ // Fetch the best block details
+ bestBlock := h.repo.BestBlockSummary()
+ bestBlockTimestamp := time.Unix(int64(bestBlock.Header.Timestamp()), 0)
+
+ // Fetch the current connected peers
+ var connectedPeerCount int
+ if h.p2p == nil {
+ connectedPeerCount = minPeerCount // ignore peers in solo mode
+ } else {
+ connectedPeerCount = h.p2p.PeerCount()
+ }
+
+ now := time.Now()
+
+ // Perform the checks
+ networkProgressing := h.isNetworkProgressing(now, bestBlockTimestamp, blockTolerance)
+ nodeConnected := h.isNodeConnectedP2P(connectedPeerCount, minPeerCount)
+
+ // Calculate overall health status
+ healthy := networkProgressing && nodeConnected
+
+ // Return the current status
+ return &Status{
+ Healthy: healthy,
+ BestBlockTime: &bestBlockTimestamp,
+ IsNetworkProgressing: networkProgressing,
+ PeerCount: connectedPeerCount,
+ }, nil
+}
diff --git a/api/admin/health/health_api.go b/api/admin/health/health_api.go
new file mode 100644
index 000000000..3bad13f07
--- /dev/null
+++ b/api/admin/health/health_api.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package health
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/gorilla/mux"
+ "github.com/vechain/thor/v2/api/utils"
+)
+
+type API struct {
+ healthStatus *Health
+}
+
+func NewAPI(healthStatus *Health) *API {
+ return &API{
+ healthStatus: healthStatus,
+ }
+}
+
+func (h *API) handleGetHealth(w http.ResponseWriter, r *http.Request) error {
+ // Parse query parameters
+ query := r.URL.Query()
+
+ // Default to constants if query parameters are not provided
+ blockTolerance := defaultBlockTolerance
+ minPeerCount := defaultMinPeerCount
+
+ // Override with query parameters if they exist
+ if queryBlockTolerance := query.Get("blockTolerance"); queryBlockTolerance != "" {
+ if parsed, err := time.ParseDuration(queryBlockTolerance); err == nil {
+ blockTolerance = parsed
+ }
+ }
+
+ if queryMinPeerCount := query.Get("minPeerCount"); queryMinPeerCount != "" {
+ if parsed, err := strconv.Atoi(queryMinPeerCount); err == nil {
+ minPeerCount = parsed
+ }
+ }
+
+ acc, err := h.healthStatus.Status(blockTolerance, minPeerCount)
+ if err != nil {
+ return err
+ }
+
+ if !acc.Healthy {
+ w.WriteHeader(http.StatusServiceUnavailable) // Set the status to 503
+ } else {
+ w.WriteHeader(http.StatusOK) // Set the status to 200
+ }
+ return utils.WriteJSON(w, acc)
+}
+
+func (h *API) Mount(root *mux.Router, pathPrefix string) {
+ sub := root.PathPrefix(pathPrefix).Subrouter()
+
+ sub.Path("").
+ Methods(http.MethodGet).
+ Name("health").
+ HandlerFunc(utils.WrapHandlerFunc(h.handleGetHealth))
+}
diff --git a/api/admin/health/health_api_test.go b/api/admin/health/health_api_test.go
new file mode 100644
index 000000000..e50af0398
--- /dev/null
+++ b/api/admin/health/health_api_test.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package health
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/gorilla/mux"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/vechain/thor/v2/comm"
+ "github.com/vechain/thor/v2/test/testchain"
+ "github.com/vechain/thor/v2/txpool"
+)
+
+var ts *httptest.Server
+
+func TestHealth(t *testing.T) {
+ initAPIServer(t)
+
+ var healthStatus Status
+ respBody, statusCode := httpGet(t, ts.URL+"/health")
+ require.NoError(t, json.Unmarshal(respBody, &healthStatus))
+ assert.False(t, healthStatus.Healthy)
+ assert.Equal(t, http.StatusServiceUnavailable, statusCode)
+}
+
+func initAPIServer(t *testing.T) {
+ thorChain, err := testchain.NewIntegrationTestChain()
+ require.NoError(t, err)
+
+ router := mux.NewRouter()
+ NewAPI(
+ New(thorChain.Repo(), comm.New(thorChain.Repo(), txpool.New(thorChain.Repo(), nil, txpool.Options{}))),
+ ).Mount(router, "/health")
+
+ ts = httptest.NewServer(router)
+}
+
+func httpGet(t *testing.T, url string) ([]byte, int) {
+ res, err := http.Get(url) //#nosec G107
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ r, err := io.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return r, res.StatusCode
+}
diff --git a/api/admin/health/health_test.go b/api/admin/health/health_test.go
new file mode 100644
index 000000000..60f9a3dcd
--- /dev/null
+++ b/api/admin/health/health_test.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package health
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHealth_isNetworkProgressing(t *testing.T) {
+ h := &Health{}
+
+ now := time.Now()
+
+ tests := []struct {
+ name string
+ bestBlockTimestamp time.Time
+ expectedProgressing bool
+ }{
+ {
+ name: "Progressing - block within timeBetweenBlocks",
+ bestBlockTimestamp: now.Add(-5 * time.Second),
+ expectedProgressing: true,
+ },
+ {
+ name: "Not Progressing - block outside timeBetweenBlocks",
+ bestBlockTimestamp: now.Add(-25 * time.Second),
+ expectedProgressing: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ isProgressing := h.isNetworkProgressing(now, tt.bestBlockTimestamp, defaultBlockTolerance)
+ assert.Equal(t, tt.expectedProgressing, isProgressing, "isNetworkProgressing result mismatch")
+ })
+ }
+}
+
+func TestHealth_isNodeConnectedP2P(t *testing.T) {
+ h := &Health{}
+
+ tests := []struct {
+ name string
+ peerCount int
+ expectedConnected bool
+ }{
+ {
+ name: "Connected - more than one peer",
+ peerCount: 3,
+ expectedConnected: true,
+ },
+ {
+ name: "Not Connected - one or fewer peers",
+ peerCount: 1,
+ expectedConnected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ isConnected := h.isNodeConnectedP2P(tt.peerCount, defaultMinPeerCount)
+ assert.Equal(t, tt.expectedConnected, isConnected, "isNodeConnectedP2P result mismatch")
+ })
+ }
+}
diff --git a/api/admin/loglevel/log_level.go b/api/admin/loglevel/log_level.go
new file mode 100644
index 000000000..c91702d2d
--- /dev/null
+++ b/api/admin/loglevel/log_level.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package loglevel
+
+import (
+ "log/slog"
+ "net/http"
+
+ "github.com/gorilla/mux"
+ "github.com/pkg/errors"
+ "github.com/vechain/thor/v2/api/utils"
+ "github.com/vechain/thor/v2/log"
+)
+
+type LogLevel struct {
+ logLevel *slog.LevelVar
+}
+
+func New(logLevel *slog.LevelVar) *LogLevel {
+ return &LogLevel{
+ logLevel: logLevel,
+ }
+}
+
+func (l *LogLevel) Mount(root *mux.Router, pathPrefix string) {
+ sub := root.PathPrefix(pathPrefix).Subrouter()
+ sub.Path("").
+ Methods(http.MethodGet).
+ Name("get-log-level").
+ HandlerFunc(utils.WrapHandlerFunc(l.getLogLevelHandler))
+
+ sub.Path("").
+ Methods(http.MethodPost).
+ Name("post-log-level").
+ HandlerFunc(utils.WrapHandlerFunc(l.postLogLevelHandler))
+}
+
+func (l *LogLevel) getLogLevelHandler(w http.ResponseWriter, _ *http.Request) error {
+ return utils.WriteJSON(w, Response{
+ CurrentLevel: l.logLevel.Level().String(),
+ })
+}
+
+func (l *LogLevel) postLogLevelHandler(w http.ResponseWriter, r *http.Request) error {
+ var req Request
+
+ if err := utils.ParseJSON(r.Body, &req); err != nil {
+ return utils.BadRequest(errors.WithMessage(err, "Invalid request body"))
+ }
+
+ switch req.Level {
+ case "debug":
+ l.logLevel.Set(log.LevelDebug)
+ case "info":
+ l.logLevel.Set(log.LevelInfo)
+ case "warn":
+ l.logLevel.Set(log.LevelWarn)
+ case "error":
+ l.logLevel.Set(log.LevelError)
+ case "trace":
+ l.logLevel.Set(log.LevelTrace)
+ case "crit":
+ l.logLevel.Set(log.LevelCrit)
+ default:
+ return utils.BadRequest(errors.New("Invalid verbosity level"))
+ }
+
+ log.Info("log level changed", "pkg", "loglevel", "level", l.logLevel.Level().String())
+
+ return utils.WriteJSON(w, Response{
+ CurrentLevel: l.logLevel.Level().String(),
+ })
+}
diff --git a/api/admin_test.go b/api/admin/loglevel/log_level_test.go
similarity index 93%
rename from api/admin_test.go
rename to api/admin/loglevel/log_level_test.go
index be2847cbf..3d1a8a960 100644
--- a/api/admin_test.go
+++ b/api/admin/loglevel/log_level_test.go
@@ -3,7 +3,7 @@
// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or
-package api
+package loglevel
import (
"bytes"
@@ -14,6 +14,7 @@ import (
"strings"
"testing"
+ "github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
)
@@ -76,15 +77,16 @@ func TestLogLevelHandler(t *testing.T) {
}
rr := httptest.NewRecorder()
- handler := http.HandlerFunc(HTTPHandler(&logLevel).ServeHTTP)
- handler.ServeHTTP(rr, req)
+ router := mux.NewRouter()
+ New(&logLevel).Mount(router, "/admin/loglevel")
+ router.ServeHTTP(rr, req)
if status := rr.Code; status != tt.expectedStatus {
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatus)
}
if tt.expectedLevel != "" {
- var response logLevelResponse
+ var response Response
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
t.Fatalf("could not decode response: %v", err)
}
diff --git a/api/admin/loglevel/types.go b/api/admin/loglevel/types.go
new file mode 100644
index 000000000..ce57187b1
--- /dev/null
+++ b/api/admin/loglevel/types.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2024 The VeChainThor developers
+
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package loglevel
+
+type Request struct {
+ Level string `json:"level"`
+}
+
+type Response struct {
+ CurrentLevel string `json:"currentLevel"`
+}
diff --git a/api/admin_server.go b/api/admin_server.go
index 26054e908..dca428b36 100644
--- a/api/admin_server.go
+++ b/api/admin_server.go
@@ -9,42 +9,32 @@ import (
"log/slog"
"net"
"net/http"
+ "sync/atomic"
"time"
- "github.com/gorilla/handlers"
- "github.com/gorilla/mux"
"github.com/pkg/errors"
- "github.com/vechain/thor/v2/api/utils"
+ "github.com/vechain/thor/v2/api/admin"
+ "github.com/vechain/thor/v2/api/admin/health"
+ "github.com/vechain/thor/v2/chain"
"github.com/vechain/thor/v2/co"
+ "github.com/vechain/thor/v2/comm"
)
-func HTTPHandler(logLevel *slog.LevelVar) http.Handler {
- router := mux.NewRouter()
- sub := router.PathPrefix("/admin").Subrouter()
- sub.Path("/loglevel").
- Methods(http.MethodGet).
- Name("get-log-level").
- HandlerFunc(utils.WrapHandlerFunc(getLogLevelHandler(logLevel)))
-
- sub.Path("/loglevel").
- Methods(http.MethodPost).
- Name("post-log-level").
- HandlerFunc(utils.WrapHandlerFunc(postLogLevelHandler(logLevel)))
-
- return handlers.CompressHandler(router)
-}
-
-func StartAdminServer(addr string, logLevel *slog.LevelVar) (string, func(), error) {
+func StartAdminServer(
+ addr string,
+ logLevel *slog.LevelVar,
+ repo *chain.Repository,
+ p2p *comm.Communicator,
+ apiLogs *atomic.Bool,
+) (string, func(), error) {
listener, err := net.Listen("tcp", addr)
if err != nil {
return "", nil, errors.Wrapf(err, "listen admin API addr [%v]", addr)
}
- router := mux.NewRouter()
- router.PathPrefix("/admin").Handler(HTTPHandler(logLevel))
- handler := handlers.CompressHandler(router)
+ adminHandler := admin.New(logLevel, health.New(repo, p2p), apiLogs)
- srv := &http.Server{Handler: handler, ReadHeaderTimeout: time.Second, ReadTimeout: 5 * time.Second}
+ srv := &http.Server{Handler: adminHandler, ReadHeaderTimeout: time.Second, ReadTimeout: 5 * time.Second}
var goes co.Goes
goes.Go(func() {
srv.Serve(listener)
diff --git a/api/api.go b/api/api.go
index 38b412a97..c57e2a957 100644
--- a/api/api.go
+++ b/api/api.go
@@ -9,6 +9,7 @@ import (
"net/http"
"net/http/pprof"
"strings"
+ "sync/atomic"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
@@ -32,6 +33,21 @@ import (
var logger = log.WithContext("pkg", "api")
+type Config struct {
+ AllowedOrigins string
+ BacktraceLimit uint32
+ CallGasLimit uint64
+ PprofOn bool
+ SkipLogs bool
+ AllowCustomTracer bool
+ EnableReqLogger *atomic.Bool
+ EnableMetrics bool
+ LogsLimit uint64
+ AllowedTracers []string
+ SoloMode bool
+ EnableDeprecated bool
+}
+
// New return api router
func New(
repo *chain.Repository,
@@ -41,19 +57,9 @@ func New(
bft bft.Committer,
nw node.Network,
forkConfig thor.ForkConfig,
- allowedOrigins string,
- backtraceLimit uint32,
- callGasLimit uint64,
- pprofOn bool,
- skipLogs bool,
- allowCustomTracer bool,
- enableReqLogger bool,
- enableMetrics bool,
- logsLimit uint64,
- allowedTracers []string,
- soloMode bool,
+ config Config,
) (http.HandlerFunc, func()) {
- origins := strings.Split(strings.TrimSpace(allowedOrigins), ",")
+ origins := strings.Split(strings.TrimSpace(config.AllowedOrigins), ",")
for i, o := range origins {
origins[i] = strings.ToLower(strings.TrimSpace(o))
}
@@ -71,27 +77,27 @@ func New(
http.Redirect(w, req, "doc/stoplight-ui/", http.StatusTemporaryRedirect)
})
- accounts.New(repo, stater, callGasLimit, forkConfig, bft).
+ accounts.New(repo, stater, config.CallGasLimit, forkConfig, bft, config.EnableDeprecated).
Mount(router, "/accounts")
- if !skipLogs {
- events.New(repo, logDB, logsLimit).
+ if !config.SkipLogs {
+ events.New(repo, logDB, config.LogsLimit).
Mount(router, "/logs/event")
- transfers.New(repo, logDB, logsLimit).
+ transfers.New(repo, logDB, config.LogsLimit).
Mount(router, "/logs/transfer")
}
blocks.New(repo, bft).
Mount(router, "/blocks")
transactions.New(repo, txPool).
Mount(router, "/transactions")
- debug.New(repo, stater, forkConfig, callGasLimit, allowCustomTracer, bft, allowedTracers, soloMode).
+ debug.New(repo, stater, forkConfig, config.CallGasLimit, config.AllowCustomTracer, bft, config.AllowedTracers, config.SoloMode).
Mount(router, "/debug")
node.New(nw).
Mount(router, "/node")
- subs := subscriptions.New(repo, origins, backtraceLimit, txPool)
+ subs := subscriptions.New(repo, origins, config.BacktraceLimit, txPool, config.EnableDeprecated)
subs.Mount(router, "/subscriptions")
- if pprofOn {
+ if config.PprofOn {
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
@@ -99,7 +105,7 @@ func New(
router.PathPrefix("/debug/pprof/").HandlerFunc(pprof.Index)
}
- if enableMetrics {
+ if config.EnableMetrics {
router.Use(metricsMiddleware)
}
@@ -110,9 +116,7 @@ func New(
handlers.ExposedHeaders([]string{"x-genesis-id", "x-thorest-ver"}),
)(handler)
- if enableReqLogger {
- handler = RequestLoggerHandler(handler, logger)
- }
+ handler = RequestLoggerHandler(handler, logger, config.EnableReqLogger)
return handler.ServeHTTP, subs.Close // subscriptions handles hijacked conns, which need to be closed
}
diff --git a/api/blocks/blocks.go b/api/blocks/blocks.go
index ff86e02e6..a8e072a02 100644
--- a/api/blocks/blocks.go
+++ b/api/blocks/blocks.go
@@ -6,8 +6,11 @@
package blocks
import (
+ "encoding/hex"
+ "fmt"
"net/http"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/vechain/thor/v2/api/utils"
@@ -34,9 +37,17 @@ func (b *Blocks) handleGetBlock(w http.ResponseWriter, req *http.Request) error
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "revision"))
}
- expanded := req.URL.Query().Get("expanded")
- if expanded != "" && expanded != "false" && expanded != "true" {
- return utils.BadRequest(errors.WithMessage(errors.New("should be boolean"), "expanded"))
+ raw, err := utils.StringToBoolean(req.URL.Query().Get("raw"), false)
+ if err != nil {
+ return utils.BadRequest(errors.WithMessage(err, "raw"))
+ }
+ expanded, err := utils.StringToBoolean(req.URL.Query().Get("expanded"), false)
+ if err != nil {
+ return utils.BadRequest(errors.WithMessage(err, "expanded"))
+ }
+
+ if raw && expanded {
+ return utils.BadRequest(errors.WithMessage(errors.New("Raw and Expanded are mutually exclusive"), "raw&expanded"))
}
summary, err := utils.GetSummary(revision, b.repo, b.bft)
@@ -47,6 +58,16 @@ func (b *Blocks) handleGetBlock(w http.ResponseWriter, req *http.Request) error
return err
}
+ if raw {
+ rlpEncoded, err := rlp.EncodeToBytes(summary.Header)
+ if err != nil {
+ return err
+ }
+ return utils.WriteJSON(w, &JSONRawBlockSummary{
+ fmt.Sprintf("0x%s", hex.EncodeToString(rlpEncoded)),
+ })
+ }
+
isTrunk, err := b.isTrunk(summary.Header.ID(), summary.Header.Number())
if err != nil {
return err
@@ -61,7 +82,7 @@ func (b *Blocks) handleGetBlock(w http.ResponseWriter, req *http.Request) error
}
jSummary := buildJSONBlockSummary(summary, isTrunk, isFinalized)
- if expanded == "true" {
+ if expanded {
txs, err := b.repo.GetBlockTransactions(summary.Header.ID())
if err != nil {
return err
diff --git a/api/blocks/blocks_test.go b/api/blocks/blocks_test.go
index dcb6c4e94..8c0439e59 100644
--- a/api/blocks/blocks_test.go
+++ b/api/blocks/blocks_test.go
@@ -6,6 +6,7 @@
package blocks_test
import (
+ "encoding/hex"
"encoding/json"
"math"
"math/big"
@@ -15,6 +16,7 @@ import (
"strings"
"testing"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -55,6 +57,8 @@ func TestBlock(t *testing.T) {
"testGetFinalizedBlock": testGetFinalizedBlock,
"testGetJustifiedBlock": testGetJustifiedBlock,
"testGetBlockWithRevisionNumberTooHigh": testGetBlockWithRevisionNumberTooHigh,
+ "testMutuallyExclusiveQueries": testMutuallyExclusiveQueries,
+ "testGetRawBlock": testGetRawBlock,
} {
t.Run(name, tt)
}
@@ -67,6 +71,22 @@ func testBadQueryParams(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, statusCode)
assert.Equal(t, "expanded: should be boolean", strings.TrimSpace(string(res)))
+
+ badQueryParams = "?raw=1"
+ res, statusCode, err = tclient.RawHTTPClient().RawHTTPGet("/blocks/best" + badQueryParams)
+ require.NoError(t, err)
+
+ assert.Equal(t, http.StatusBadRequest, statusCode)
+ assert.Equal(t, "raw: should be boolean", strings.TrimSpace(string(res)))
+}
+
+func testMutuallyExclusiveQueries(t *testing.T) {
+ badQueryParams := "?expanded=true&raw=true"
+ res, statusCode, err := tclient.RawHTTPClient().RawHTTPGet("/blocks/best" + badQueryParams)
+ require.NoError(t, err)
+
+ assert.Equal(t, http.StatusBadRequest, statusCode)
+ assert.Equal(t, "raw&expanded: Raw and Expanded are mutually exclusive", strings.TrimSpace(string(res)))
}
func testGetBestBlock(t *testing.T) {
@@ -80,6 +100,41 @@ func testGetBestBlock(t *testing.T) {
assert.Equal(t, http.StatusOK, statusCode)
}
+func testGetRawBlock(t *testing.T) {
+ res, statusCode, err := tclient.RawHTTPClient().RawHTTPGet("/blocks/best?raw=true")
+ require.NoError(t, err)
+ rawBlock := new(blocks.JSONRawBlockSummary)
+ if err := json.Unmarshal(res, &rawBlock); err != nil {
+ t.Fatal(err)
+ }
+
+ blockBytes, err := hex.DecodeString(rawBlock.Raw[2:len(rawBlock.Raw)])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ header := block.Header{}
+ err = rlp.DecodeBytes(blockBytes, &header)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expHeader := blk.Header()
+ assert.Equal(t, expHeader.Number(), header.Number(), "Number should be equal")
+ assert.Equal(t, expHeader.ID(), header.ID(), "Hash should be equal")
+ assert.Equal(t, expHeader.ParentID(), header.ParentID(), "ParentID should be equal")
+ assert.Equal(t, expHeader.Timestamp(), header.Timestamp(), "Timestamp should be equal")
+ assert.Equal(t, expHeader.TotalScore(), header.TotalScore(), "TotalScore should be equal")
+ assert.Equal(t, expHeader.GasLimit(), header.GasLimit(), "GasLimit should be equal")
+ assert.Equal(t, expHeader.GasUsed(), header.GasUsed(), "GasUsed should be equal")
+ assert.Equal(t, expHeader.Beneficiary(), header.Beneficiary(), "Beneficiary should be equal")
+ assert.Equal(t, expHeader.TxsRoot(), header.TxsRoot(), "TxsRoot should be equal")
+ assert.Equal(t, expHeader.StateRoot(), header.StateRoot(), "StateRoot should be equal")
+ assert.Equal(t, expHeader.ReceiptsRoot(), header.ReceiptsRoot(), "ReceiptsRoot should be equal")
+
+ assert.Equal(t, http.StatusOK, statusCode)
+}
+
func testGetBlockByHeight(t *testing.T) {
res, statusCode, err := tclient.RawHTTPClient().RawHTTPGet("/blocks/1")
require.NoError(t, err)
diff --git a/api/blocks/types.go b/api/blocks/types.go
index 38261b2e5..989b63041 100644
--- a/api/blocks/types.go
+++ b/api/blocks/types.go
@@ -33,6 +33,10 @@ type JSONBlockSummary struct {
IsFinalized bool `json:"isFinalized"`
}
+type JSONRawBlockSummary struct {
+ Raw string `json:"raw"`
+}
+
type JSONCollapsedBlock struct {
*JSONBlockSummary
Transactions []thor.Bytes32 `json:"transactions"`
diff --git a/api/debug/debug.go b/api/debug/debug.go
index 5ff54f1dc..667ff2302 100644
--- a/api/debug/debug.go
+++ b/api/debug/debug.go
@@ -75,22 +75,8 @@ func New(
}
}
-func (d *Debug) prepareClauseEnv(ctx context.Context, blockID thor.Bytes32, txIndex uint64, clauseIndex uint32) (*runtime.Runtime, *runtime.TransactionExecutor, thor.Bytes32, error) {
- block, err := d.repo.GetBlock(blockID)
- if err != nil {
- if d.repo.IsNotFound(err) {
- return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("block not found"))
- }
- return nil, nil, thor.Bytes32{}, err
- }
- txs := block.Transactions()
- if txIndex >= uint64(len(txs)) {
- return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("tx index out of range"))
- }
- txID := txs[txIndex].ID()
- if clauseIndex >= uint32(len(txs[txIndex].Clauses())) {
- return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("clause index out of range"))
- }
+// prepareClauseEnv prepares the runtime environment for the specified clause.
+func (d *Debug) prepareClauseEnv(ctx context.Context, block *block.Block, txID thor.Bytes32, clauseIndex uint32) (*runtime.Runtime, *runtime.TransactionExecutor, thor.Bytes32, error) {
rt, err := consensus.New(
d.repo,
d.stater,
@@ -99,17 +85,29 @@ func (d *Debug) prepareClauseEnv(ctx context.Context, blockID thor.Bytes32, txIn
if err != nil {
return nil, nil, thor.Bytes32{}, err
}
- for i, tx := range txs {
- if uint64(i) > txIndex {
- break
+
+ var found bool
+ txs := block.Transactions()
+ for _, tx := range txs {
+ if txID == tx.ID() {
+ found = true
+ if clauseIndex >= uint32(len(tx.Clauses())) {
+ return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("clause index out of range"))
+ }
}
+ }
+ if !found {
+ return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("transaction not found"))
+ }
+
+ for _, tx := range block.Transactions() {
txExec, err := rt.PrepareTransaction(tx)
if err != nil {
return nil, nil, thor.Bytes32{}, err
}
clauseCounter := uint32(0)
for txExec.HasNextClause() {
- if txIndex == uint64(i) && clauseIndex == clauseCounter {
+ if tx.ID() == txID && clauseIndex == clauseCounter {
return rt, txExec, txID, nil
}
exec, _ := txExec.PrepareNext()
@@ -127,18 +125,27 @@ func (d *Debug) prepareClauseEnv(ctx context.Context, blockID thor.Bytes32, txIn
default:
}
}
+
+ // no env created, that means tx was reverted at an early clause
return nil, nil, thor.Bytes32{}, utils.Forbidden(errors.New("early reverted"))
}
// trace an existed clause
-func (d *Debug) traceClause(ctx context.Context, tracer tracers.Tracer, blockID thor.Bytes32, txIndex uint64, clauseIndex uint32) (interface{}, error) {
- rt, txExec, txID, err := d.prepareClauseEnv(ctx, blockID, txIndex, clauseIndex)
+func (d *Debug) traceClause(ctx context.Context, tracer tracers.Tracer, block *block.Block, txID thor.Bytes32, clauseIndex uint32) (interface{}, error) {
+ rt, txExec, txID, err := d.prepareClauseEnv(ctx, block, txID, clauseIndex)
if err != nil {
return nil, err
}
+ var txIndex uint64 = math.MaxUint64
+ for i, tx := range block.Transactions() {
+ if tx.ID() == txID {
+ txIndex = uint64(i)
+ break
+ }
+ }
tracer.SetContext(&tracers.Context{
- BlockID: blockID,
+ BlockID: block.Header().ID(),
BlockTime: rt.Context().Time,
TxID: txID,
TxIndex: txIndex,
@@ -178,11 +185,11 @@ func (d *Debug) handleTraceClause(w http.ResponseWriter, req *http.Request) erro
return utils.Forbidden(err)
}
- blockID, txIndex, clauseIndex, err := d.parseTarget(opt.Target)
+ block, txID, clauseIndex, err := d.parseTarget(opt.Target)
if err != nil {
return err
}
- res, err := d.traceClause(req.Context(), tracer, blockID, txIndex, clauseIndex)
+ res, err := d.traceClause(req.Context(), tracer, block, txID, clauseIndex)
if err != nil {
return err
}
@@ -291,8 +298,8 @@ func (d *Debug) traceCall(ctx context.Context, tracer tracers.Tracer, header *bl
return tracer.GetResult()
}
-func (d *Debug) debugStorage(ctx context.Context, contractAddress thor.Address, blockID thor.Bytes32, txIndex uint64, clauseIndex uint32, keyStart []byte, maxResult int) (*StorageRangeResult, error) {
- rt, _, _, err := d.prepareClauseEnv(ctx, blockID, txIndex, clauseIndex)
+func (d *Debug) debugStorage(ctx context.Context, contractAddress thor.Address, block *block.Block, txID thor.Bytes32, clauseIndex uint32, keyStart []byte, maxResult int) (*StorageRangeResult, error) {
+ rt, _, _, err := d.prepareClauseEnv(ctx, block, txID, clauseIndex)
if err != nil {
return nil, err
}
@@ -357,41 +364,72 @@ func (d *Debug) handleDebugStorage(w http.ResponseWriter, req *http.Request) err
return utils.WriteJSON(w, res)
}
-func (d *Debug) parseTarget(target string) (blockID thor.Bytes32, txIndex uint64, clauseIndex uint32, err error) {
+func (d *Debug) parseTarget(target string) (block *block.Block, txID thor.Bytes32, clauseIndex uint32, err error) {
+ // target can be `${blockID}/${txID|txIndex}/${clauseIndex}` or `${txID}/${clauseIndex}`
parts := strings.Split(target, "/")
- if len(parts) != 3 {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.New("target:" + target + " unsupported"))
+ if len(parts) != 3 && len(parts) != 2 {
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.New("target:" + target + " unsupported"))
}
- blockID, err = thor.ParseBytes32(parts[0])
- if err != nil {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.WithMessage(err, "target[0]"))
- }
- if len(parts[1]) == 64 || len(parts[1]) == 66 {
- txID, err := thor.ParseBytes32(parts[1])
+
+ if len(parts) == 2 {
+ txID, err = thor.ParseBytes32(parts[0])
if err != nil {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.WithMessage(err, "target[1]"))
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, "target([0]"))
}
-
- txMeta, err := d.repo.NewChain(blockID).GetTransactionMeta(txID)
+ bestChain := d.repo.NewBestChain()
+ txMeta, err := bestChain.GetTransactionMeta(txID)
if err != nil {
if d.repo.IsNotFound(err) {
- return thor.Bytes32{}, 0, 0, utils.Forbidden(errors.New("transaction not found"))
+ return nil, thor.Bytes32{}, 0, utils.Forbidden(errors.New("transaction not found"))
}
- return thor.Bytes32{}, 0, 0, err
+ return nil, thor.Bytes32{}, 0, err
+ }
+ block, err = bestChain.GetBlock(txMeta.BlockNum)
+ if err != nil {
+ return nil, thor.Bytes32{}, 0, err
}
- txIndex = txMeta.Index
} else {
- i, err := strconv.ParseUint(parts[1], 0, 0)
+ blockID, err := thor.ParseBytes32(parts[0])
+ if err != nil {
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, "target[0]"))
+ }
+ block, err = d.repo.GetBlock(blockID)
if err != nil {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.WithMessage(err, "target[1]"))
+ return nil, thor.Bytes32{}, 0, err
+ }
+ if len(parts[1]) == 64 || len(parts[1]) == 66 {
+ txID, err = thor.ParseBytes32(parts[1])
+ if err != nil {
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, "target[1]"))
+ }
+
+ var found bool
+ for _, tx := range block.Transactions() {
+ if tx.ID() == txID {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, thor.Bytes32{}, 0, utils.Forbidden(errors.New("transaction not found"))
+ }
+ } else {
+ i, err := strconv.ParseUint(parts[1], 0, 0)
+ if err != nil {
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, "target[1]"))
+ }
+ if i >= uint64(len(block.Transactions())) {
+ return nil, thor.Bytes32{}, 0, utils.Forbidden(errors.New("tx index out of range"))
+ }
+ txID = block.Transactions()[i].ID()
}
- txIndex = i
}
- i, err := strconv.ParseUint(parts[2], 0, 0)
+
+ i, err := strconv.ParseUint(parts[len(parts)-1], 0, 0)
if err != nil {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.WithMessage(err, "target[2]"))
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("target[%d]", len(parts)-1)))
} else if i > math.MaxUint32 {
- return thor.Bytes32{}, 0, 0, utils.BadRequest(errors.New("invalid target[2]"))
+ return nil, thor.Bytes32{}, 0, utils.BadRequest(fmt.Errorf("invalid target[%d]", len(parts)-1))
}
clauseIndex = uint32(i)
return
diff --git a/api/debug/debug_test.go b/api/debug/debug_test.go
index 0b0b2d3b3..478fe4a42 100644
--- a/api/debug/debug_test.go
+++ b/api/debug/debug_test.go
@@ -6,7 +6,6 @@
package debug
import (
- "context"
"encoding/json"
"fmt"
"math/big"
@@ -63,6 +62,7 @@ func TestDebug(t *testing.T) {
"testTraceClauseWithClauseIndexOutOfBound": testTraceClauseWithClauseIndexOutOfBound,
"testTraceClauseWithCustomTracer": testTraceClauseWithCustomTracer,
"testTraceClause": testTraceClause,
+ "testTraceClauseWithoutBlockID": testTraceClauseWithoutBlockID,
} {
t.Run(name, tt)
}
@@ -175,9 +175,11 @@ func testTraceClauseWithBadBlockID(t *testing.T) {
}
func testTraceClauseWithNonExistingBlockID(t *testing.T) {
- _, _, _, err := debug.prepareClauseEnv(context.Background(), datagen.RandomHash(), 1, 1)
-
- assert.Error(t, err)
+ traceClauseOption := &TraceClauseOption{
+ Name: "structLogger",
+ Target: fmt.Sprintf("%s/x/x", datagen.RandomHash()),
+ }
+ httpPostAndCheckResponseStatus(t, "/debug/tracers", traceClauseOption, 500)
}
func testTraceClauseWithBadTxID(t *testing.T) {
@@ -264,6 +266,26 @@ func testTraceClause(t *testing.T) {
assert.Equal(t, expectedExecutionResult, parsedExecutionRes)
}
+func testTraceClauseWithoutBlockID(t *testing.T) {
+ traceClauseOption := &TraceClauseOption{
+ Name: "structLogger",
+ Target: fmt.Sprintf("%s/1", transaction.ID()),
+ }
+ expectedExecutionResult := &logger.ExecutionResult{
+ Gas: 0,
+ Failed: false,
+ ReturnValue: "",
+ StructLogs: make([]logger.StructLogRes, 0),
+ }
+ res := httpPostAndCheckResponseStatus(t, "/debug/tracers", traceClauseOption, 200)
+
+ var parsedExecutionRes *logger.ExecutionResult
+ if err := json.Unmarshal([]byte(res), &parsedExecutionRes); err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, expectedExecutionResult, parsedExecutionRes)
+}
+
func testTraceClauseWithTxIndexOutOfBound(t *testing.T) {
traceClauseOption := &TraceClauseOption{
Name: "structLogger",
diff --git a/api/doc/thor.yaml b/api/doc/thor.yaml
index 2a0d30b9e..99416a039 100644
--- a/api/doc/thor.yaml
+++ b/api/doc/thor.yaml
@@ -12,7 +12,7 @@ info:
license:
name: LGPL 3.0
url: https://www.gnu.org/licenses/lgpl-3.0.en.html
- version: 2.1.4
+ version: 2.2.0
servers:
- url: /
description: Current Node
@@ -292,6 +292,7 @@ paths:
parameters:
- $ref: '#/components/parameters/RevisionInPath'
- $ref: '#/components/parameters/ExpandedInQuery'
+ - $ref: '#/components/parameters/RawBlockInQuery'
tags:
- Blocks
summary: Retrieve a block
@@ -2139,11 +2140,12 @@ components:
The unified path of the target to be traced. Currently, only the clause is supported.
Format:
- `blockID/(txIndex|txId)/clauseIndex`
+ `blockID/(txIndex|txId)/clauseIndex` or `txID/clauseIndex`
+
example: '0x010709463c1f0c9aa66a31182fb36d1977d99bfb6526bae0564a0eac4006c31a/0/0'
nullable: false
- pattern: '^0x[0-9a-fA-F]{64}\/(0x[0-9a-fA-F]{64}|\d+)\/[0-9]+$'
+ pattern: '^0x[0-9a-fA-F]{64}(\/(0x[0-9a-fA-F]{64}|\d+))?\/[0-9]+$'
example:
target: '0x010709463c1f0c9aa66a31182fb36d1977d99bfb6526bae0564a0eac4006c31a/0/0'
@@ -2209,9 +2211,9 @@ components:
The unified path of the transaction clause.
Format:
- `blockID/(txIndex|txId)/clauseIndex`
+ `blockID/(txIndex|txId)/clauseIndex` or `txID/clauseIndex`
nullable: false
- pattern: '^0x[0-9a-fA-F]{64}\/(0x[0-9a-fA-F]{64}|\d+)\/[0-9]+$'
+ pattern: '^0x[0-9a-fA-F]{64}(\/(0x[0-9a-fA-F]{64}|\d+))?\/[0-9]+$'
StorageRange:
type: object
@@ -2389,6 +2391,18 @@ components:
type: boolean
example: false
+ RawBlockInQuery:
+ name: raw
+ in: query
+ required: false
+ description: |
+ Whether the block should be returned in RLP encoding or not.
+ - `true` returns `block` as an RLP encoded object
+ - `false` returns `block` as a structured JSON object
+ schema:
+ type: boolean
+ example: false
+
PendingInQuery:
name: pending
in: query
diff --git a/api/events/events_test.go b/api/events/events_test.go
index 1054266fe..ffc247843 100644
--- a/api/events/events_test.go
+++ b/api/events/events_test.go
@@ -152,7 +152,7 @@ func TestOption(t *testing.T) {
}
func TestZeroFrom(t *testing.T) {
- thorChain := initEventServer(t, 5)
+ thorChain := initEventServer(t, 100)
defer ts.Close()
insertBlocks(t, thorChain, 5)
diff --git a/api/events/types.go b/api/events/types.go
index bfb032095..78d0cf712 100644
--- a/api/events/types.go
+++ b/api/events/types.go
@@ -179,7 +179,7 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
}, nil
}
- // Units are block numbers - numbers will have a max ceiling at logdb.MaxBlockNumbe
+ // Units are block numbers - numbers will have a max ceiling at logdb.MaxBlockNumber
if r.From != nil && *r.From > logdb.MaxBlockNumber {
return &emptyRange, nil
}
diff --git a/api/events/types_test.go b/api/events/types_test.go
index 7b911b453..6d16ca4d8 100644
--- a/api/events/types_test.go
+++ b/api/events/types_test.go
@@ -6,7 +6,6 @@
package events
import (
- "math"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -49,26 +48,13 @@ func testConvertRangeWithBlockRangeType(t *testing.T, chain *testchain.Chain) {
assert.NoError(t, err)
assert.Equal(t, uint32(*rng.From), convertedRng.From)
assert.Equal(t, uint32(*rng.To), convertedRng.To)
-
- // ensure wild block numbers have a max ceiling of chain.head
- rng = newRange(BlockRangeType, 100, 2200)
-
- convertedRng, err = ConvertRange(chain.Repo().NewBestChain(), rng)
- require.NoError(t, err)
-
- bestBlock, err := chain.BestBlock()
- require.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, bestBlock.Header().Number(), convertedRng.From)
- assert.Equal(t, bestBlock.Header().Number(), convertedRng.To)
}
func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *testchain.Chain) {
rng := newRange(TimeRangeType, 100, 2200)
expectedEmptyRange := &logdb.Range{
- From: math.MaxUint32,
- To: math.MaxUint32,
+ From: logdb.MaxBlockNumber,
+ To: logdb.MaxBlockNumber,
}
convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
@@ -97,8 +83,8 @@ func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *testchain.C
rng := newRange(TimeRangeType, genesis.Timestamp()+1_000, genesis.Timestamp()+10_000)
expectedEmptyRange := &logdb.Range{
- From: math.MaxUint32,
- To: math.MaxUint32,
+ From: logdb.MaxBlockNumber,
+ To: logdb.MaxBlockNumber,
}
convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng)
diff --git a/api/metrics_test.go b/api/metrics_test.go
index b7b1b7e0d..c76533162 100644
--- a/api/metrics_test.go
+++ b/api/metrics_test.go
@@ -48,7 +48,7 @@ func TestMetricsMiddleware(t *testing.T) {
assert.NotNil(t, err)
router := mux.NewRouter()
- acc := accounts.New(thorChain.Repo(), thorChain.Stater(), math.MaxUint64, thor.NoFork, thorChain.Engine())
+ acc := accounts.New(thorChain.Repo(), thorChain.Stater(), math.MaxUint64, thor.NoFork, thorChain.Engine(), true)
acc.Mount(router, "/accounts")
router.PathPrefix("/metrics").Handler(metrics.HTTPHandler())
router.Use(metricsMiddleware)
@@ -103,7 +103,7 @@ func TestWebsocketMetrics(t *testing.T) {
require.NoError(t, err)
router := mux.NewRouter()
- sub := subscriptions.New(thorChain.Repo(), []string{"*"}, 10, txpool.New(thorChain.Repo(), thorChain.Stater(), txpool.Options{}))
+ sub := subscriptions.New(thorChain.Repo(), []string{"*"}, 10, txpool.New(thorChain.Repo(), thorChain.Stater(), txpool.Options{}), true)
sub.Mount(router, "/subscriptions")
router.PathPrefix("/metrics").Handler(metrics.HTTPHandler())
router.Use(metricsMiddleware)
diff --git a/api/node/node_test.go b/api/node/node_test.go
index 3dd2e96ee..873ad29ad 100644
--- a/api/node/node_test.go
+++ b/api/node/node_test.go
@@ -40,7 +40,8 @@ func initCommServer(t *testing.T) {
Limit: 10000,
LimitPerAccount: 16,
MaxLifetime: 10 * time.Minute,
- }))
+ }),
+ )
router := mux.NewRouter()
node.New(communicator).Mount(router, "/node")
diff --git a/api/request_logger.go b/api/request_logger.go
index 3d48a2d36..451059814 100644
--- a/api/request_logger.go
+++ b/api/request_logger.go
@@ -9,14 +9,19 @@ import (
"bytes"
"io"
"net/http"
+ "sync/atomic"
"time"
"github.com/vechain/thor/v2/log"
)
// RequestLoggerHandler returns a http handler to ensure requests are syphoned into the writer
-func RequestLoggerHandler(handler http.Handler, logger log.Logger) http.Handler {
+func RequestLoggerHandler(handler http.Handler, logger log.Logger, enabled *atomic.Bool) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
+ if !enabled.Load() {
+ handler.ServeHTTP(w, r)
+ return
+ }
// Read and log the body (note: this can only be done once)
// Ensure you don't disrupt the request body for handlers that need to read it
var bodyBytes []byte
diff --git a/api/request_logger_test.go b/api/request_logger_test.go
index 6b8ddcd91..3368e6fc8 100644
--- a/api/request_logger_test.go
+++ b/api/request_logger_test.go
@@ -10,6 +10,7 @@ import (
"net/http"
"net/http/httptest"
"strings"
+ "sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
@@ -59,6 +60,8 @@ func (m *mockLogger) GetLoggedData() []interface{} {
func TestRequestLoggerHandler(t *testing.T) {
mockLog := &mockLogger{}
+ enabled := atomic.Bool{}
+ enabled.Store(true)
// Define a test handler to wrap
testHandler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
@@ -67,7 +70,7 @@ func TestRequestLoggerHandler(t *testing.T) {
})
// Create the RequestLoggerHandler
- loggerHandler := RequestLoggerHandler(testHandler, mockLog)
+ loggerHandler := RequestLoggerHandler(testHandler, mockLog, &enabled)
// Create a test HTTP request
reqBody := "test body"
diff --git a/api/subscriptions/subscriptions.go b/api/subscriptions/subscriptions.go
index 7582da5bb..715a71308 100644
--- a/api/subscriptions/subscriptions.go
+++ b/api/subscriptions/subscriptions.go
@@ -25,14 +25,15 @@ import (
const txQueueSize = 20
type Subscriptions struct {
- backtraceLimit uint32
- repo *chain.Repository
- upgrader *websocket.Upgrader
- pendingTx *pendingTx
- done chan struct{}
- wg sync.WaitGroup
- beat2Cache *messageCache[Beat2Message]
- beatCache *messageCache[BeatMessage]
+ backtraceLimit uint32
+ enabledDeprecated bool
+ repo *chain.Repository
+ upgrader *websocket.Upgrader
+ pendingTx *pendingTx
+ done chan struct{}
+ wg sync.WaitGroup
+ beat2Cache *messageCache[Beat2Message]
+ beatCache *messageCache[BeatMessage]
}
type msgReader interface {
@@ -50,10 +51,11 @@ const (
pingPeriod = (pongWait * 7) / 10
)
-func New(repo *chain.Repository, allowedOrigins []string, backtraceLimit uint32, txpool *txpool.TxPool) *Subscriptions {
+func New(repo *chain.Repository, allowedOrigins []string, backtraceLimit uint32, txpool *txpool.TxPool, enabledDeprecated bool) *Subscriptions {
sub := &Subscriptions{
- backtraceLimit: backtraceLimit,
- repo: repo,
+ backtraceLimit: backtraceLimit,
+ repo: repo,
+ enabledDeprecated: enabledDeprecated,
upgrader: &websocket.Upgrader{
EnableCompression: true,
CheckOrigin: func(r *http.Request) bool {
@@ -195,6 +197,9 @@ func (s *Subscriptions) handleSubject(w http.ResponseWriter, req *http.Request)
return err
}
case "beat":
+ if !s.enabledDeprecated {
+ return utils.HTTPError(nil, http.StatusGone)
+ }
if reader, err = s.handleBeatReader(w, req); err != nil {
return err
}
diff --git a/api/subscriptions/subscriptions_test.go b/api/subscriptions/subscriptions_test.go
index 0c0bffe3a..8cfb55f7f 100644
--- a/api/subscriptions/subscriptions_test.go
+++ b/api/subscriptions/subscriptions_test.go
@@ -36,7 +36,7 @@ var ts *httptest.Server
var blocks []*block.Block
func TestSubscriptions(t *testing.T) {
- initSubscriptionsServer(t)
+ initSubscriptionsServer(t, true)
defer ts.Close()
for name, tt := range map[string]func(*testing.T){
@@ -51,6 +51,17 @@ func TestSubscriptions(t *testing.T) {
}
}
+func TestDeprecatedSubscriptions(t *testing.T) {
+ initSubscriptionsServer(t, false)
+ defer ts.Close()
+
+ u := url.URL{Scheme: "ws", Host: strings.TrimPrefix(ts.URL, "http://"), Path: "/subscriptions/beat"}
+
+ _, resp, err := websocket.DefaultDialer.Dial(u.String(), nil)
+ assert.Error(t, err)
+ assert.Equal(t, http.StatusGone, resp.StatusCode)
+}
+
func testHandleSubjectWithBlock(t *testing.T) {
genesisBlock := blocks[0]
queryArg := fmt.Sprintf("pos=%s", genesisBlock.Header().ID().String())
@@ -216,7 +227,7 @@ func TestParseAddress(t *testing.T) {
assert.Equal(t, expectedAddr, *result)
}
-func initSubscriptionsServer(t *testing.T) {
+func initSubscriptionsServer(t *testing.T, enabledDeprecated bool) {
thorChain, err := testchain.NewIntegrationTestChain()
require.NoError(t, err)
@@ -263,7 +274,7 @@ func initSubscriptionsServer(t *testing.T) {
require.NoError(t, err)
router := mux.NewRouter()
- New(thorChain.Repo(), []string{}, 5, txPool).
+ New(thorChain.Repo(), []string{}, 5, txPool, enabledDeprecated).
Mount(router, "/subscriptions")
ts = httptest.NewServer(router)
}
@@ -319,7 +330,7 @@ func TestSubscriptionsBacktrace(t *testing.T) {
require.NoError(t, err)
router := mux.NewRouter()
- New(thorChain.Repo(), []string{}, 5, txPool).Mount(router, "/subscriptions")
+ New(thorChain.Repo(), []string{}, 5, txPool, true).Mount(router, "/subscriptions")
ts = httptest.NewServer(router)
defer ts.Close()
diff --git a/api/utils/http.go b/api/utils/http.go
index 652c3e408..2235797de 100644
--- a/api/utils/http.go
+++ b/api/utils/http.go
@@ -9,6 +9,8 @@ import (
"encoding/json"
"io"
"net/http"
+
+ "github.com/pkg/errors"
)
type httpError struct {
@@ -36,6 +38,17 @@ func BadRequest(cause error) error {
}
}
+func StringToBoolean(boolStr string, defaultVal bool) (bool, error) {
+ if boolStr == "" {
+ return defaultVal, nil
+ } else if boolStr == "false" {
+ return false, nil
+ } else if boolStr == "true" {
+ return true, nil
+ }
+ return false, errors.New("should be boolean")
+}
+
// Forbidden convenience method to create http forbidden error.
func Forbidden(cause error) error {
return &httpError{
diff --git a/chain/repository.go b/chain/repository.go
index 5b69d88e2..df622bd30 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -352,7 +352,6 @@ func (r *Repository) GetBlock(id thor.Bytes32) (*block.Block, error) {
return block.Compose(summary.Header, txs), nil
}
-
func (r *Repository) getReceipt(key []byte) (*tx.Receipt, error) {
result := "hit"
receipt, err := r.caches.receipts.GetOrLoad(string(key), func() (interface{}, error) {
diff --git a/cmd/thor/flags.go b/cmd/thor/flags.go
index 4b18f22ad..2ce97516e 100644
--- a/cmd/thor/flags.go
+++ b/cmd/thor/flags.go
@@ -69,6 +69,10 @@ var (
Value: 1000,
Usage: "limit the number of logs returned by /logs API",
}
+ apiEnableDeprecatedFlag = cli.BoolFlag{
+ Name: "api-enable-deprecated",
+ Usage: "enable deprecated API endpoints (POST /accounts/{address}, POST /accounts, WS /subscriptions/beat",
+ }
enableAPILogsFlag = cli.BoolFlag{
Name: "enable-api-logs",
Usage: "enables API requests logging",
diff --git a/cmd/thor/main.go b/cmd/thor/main.go
index ce8d1f965..bb5036472 100644
--- a/cmd/thor/main.go
+++ b/cmd/thor/main.go
@@ -11,7 +11,7 @@ import (
"io"
"os"
"path/filepath"
- "strings"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -80,6 +80,7 @@ func main() {
apiCallGasLimitFlag,
apiBacktraceLimitFlag,
apiAllowCustomTracerFlag,
+ apiEnableDeprecatedFlag,
enableAPILogsFlag,
apiLogsLimitFlag,
verbosityFlag,
@@ -115,6 +116,7 @@ func main() {
apiCallGasLimitFlag,
apiBacktraceLimitFlag,
apiAllowCustomTracerFlag,
+ apiEnableDeprecatedFlag,
enableAPILogsFlag,
apiLogsLimitFlag,
onDemandFlag,
@@ -179,16 +181,6 @@ func defaultAction(ctx *cli.Context) error {
defer func() { log.Info("stopping metrics server..."); closeFunc() }()
}
- adminURL := ""
- if ctx.Bool(enableAdminFlag.Name) {
- url, closeFunc, err := api.StartAdminServer(ctx.String(adminAddrFlag.Name), logLevel)
- if err != nil {
- return fmt.Errorf("unable to start admin server - %w", err)
- }
- adminURL = url
- defer func() { log.Info("stopping admin server..."); closeFunc() }()
- }
-
gene, forkConfig, err := selectGenesis(ctx)
if err != nil {
return err
@@ -242,6 +234,24 @@ func defaultAction(ctx *cli.Context) error {
return err
}
+ adminURL := ""
+ logAPIRequests := &atomic.Bool{}
+ logAPIRequests.Store(ctx.Bool(enableAPILogsFlag.Name))
+ if ctx.Bool(enableAdminFlag.Name) {
+ url, closeFunc, err := api.StartAdminServer(
+ ctx.String(adminAddrFlag.Name),
+ logLevel,
+ repo,
+ p2pCommunicator.Communicator(),
+ logAPIRequests,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to start admin server - %w", err)
+ }
+ adminURL = url
+ defer func() { log.Info("stopping admin server..."); closeFunc() }()
+ }
+
bftEngine, err := bft.NewEngine(repo, mainDB, forkConfig, master.Address())
if err != nil {
return errors.Wrap(err, "init bft engine")
@@ -255,17 +265,7 @@ func defaultAction(ctx *cli.Context) error {
bftEngine,
p2pCommunicator.Communicator(),
forkConfig,
- ctx.String(apiCorsFlag.Name),
- uint32(ctx.Uint64(apiBacktraceLimitFlag.Name)),
- ctx.Uint64(apiCallGasLimitFlag.Name),
- ctx.Bool(pprofFlag.Name),
- skipLogs,
- ctx.Bool(apiAllowCustomTracerFlag.Name),
- ctx.Bool(enableAPILogsFlag.Name),
- ctx.Bool(enableMetricsFlag.Name),
- ctx.Uint64(apiLogsLimitFlag.Name),
- parseTracerList(strings.TrimSpace(ctx.String(allowedTracersFlag.Name))),
- false,
+ makeAPIConfig(ctx, logAPIRequests, false),
)
defer func() { log.Info("closing API..."); apiCloser() }()
@@ -298,7 +298,8 @@ func defaultAction(ctx *cli.Context) error {
p2pCommunicator.Communicator(),
ctx.Uint64(targetGasLimitFlag.Name),
skipLogs,
- forkConfig).Run(exitSignal)
+ forkConfig,
+ ).Run(exitSignal)
}
func soloAction(ctx *cli.Context) error {
@@ -312,6 +313,12 @@ func soloAction(ctx *cli.Context) error {
logLevel := initLogger(lvl, ctx.Bool(jsonLogsFlag.Name))
+ onDemandBlockProduction := ctx.Bool(onDemandFlag.Name)
+ blockProductionInterval := ctx.Uint64(blockInterval.Name)
+ if blockProductionInterval == 0 {
+ return errors.New("block-interval cannot be zero")
+ }
+
// enable metrics as soon as possible
metricsURL := ""
if ctx.Bool(enableMetricsFlag.Name) {
@@ -324,16 +331,6 @@ func soloAction(ctx *cli.Context) error {
defer func() { log.Info("stopping metrics server..."); closeFunc() }()
}
- adminURL := ""
- if ctx.Bool(enableAdminFlag.Name) {
- url, closeFunc, err := api.StartAdminServer(ctx.String(adminAddrFlag.Name), logLevel)
- if err != nil {
- return fmt.Errorf("unable to start admin server - %w", err)
- }
- adminURL = url
- defer func() { log.Info("stopping admin server..."); closeFunc() }()
- }
-
var (
gene *genesis.Genesis
forkConfig thor.ForkConfig
@@ -378,6 +375,24 @@ func soloAction(ctx *cli.Context) error {
return err
}
+ adminURL := ""
+ logAPIRequests := &atomic.Bool{}
+ logAPIRequests.Store(ctx.Bool(enableAPILogsFlag.Name))
+ if ctx.Bool(enableAdminFlag.Name) {
+ url, closeFunc, err := api.StartAdminServer(
+ ctx.String(adminAddrFlag.Name),
+ logLevel,
+ repo,
+ nil,
+ logAPIRequests,
+ )
+ if err != nil {
+ return fmt.Errorf("unable to start admin server - %w", err)
+ }
+ adminURL = url
+ defer func() { log.Info("stopping admin server..."); closeFunc() }()
+ }
+
printStartupMessage1(gene, repo, nil, instanceDir, forkConfig)
skipLogs := ctx.Bool(skipLogsFlag.Name)
@@ -401,6 +416,7 @@ func soloAction(ctx *cli.Context) error {
defer func() { log.Info("closing tx pool..."); txPool.Close() }()
bftEngine := solo.NewBFTEngine(repo)
+
apiHandler, apiCloser := api.New(
repo,
state.NewStater(mainDB),
@@ -409,17 +425,7 @@ func soloAction(ctx *cli.Context) error {
bftEngine,
&solo.Communicator{},
forkConfig,
- ctx.String(apiCorsFlag.Name),
- uint32(ctx.Uint64(apiBacktraceLimitFlag.Name)),
- ctx.Uint64(apiCallGasLimitFlag.Name),
- ctx.Bool(pprofFlag.Name),
- skipLogs,
- ctx.Bool(apiAllowCustomTracerFlag.Name),
- ctx.Bool(enableAPILogsFlag.Name),
- ctx.Bool(enableMetricsFlag.Name),
- ctx.Uint64(apiLogsLimitFlag.Name),
- parseTracerList(strings.TrimSpace(ctx.String(allowedTracersFlag.Name))),
- true,
+ makeAPIConfig(ctx, logAPIRequests, true),
)
defer func() { log.Info("closing API..."); apiCloser() }()
@@ -449,9 +455,9 @@ func soloAction(ctx *cli.Context) error {
logDB,
txPool,
ctx.Uint64(gasLimitFlag.Name),
- ctx.Bool(onDemandFlag.Name),
+ onDemandBlockProduction,
skipLogs,
- blockInterval,
+ blockProductionInterval,
forkConfig).Run(exitSignal)
}
diff --git a/cmd/thor/utils.go b/cmd/thor/utils.go
index 396b153ae..3be6d4186 100644
--- a/cmd/thor/utils.go
+++ b/cmd/thor/utils.go
@@ -23,6 +23,7 @@ import (
"runtime"
"runtime/debug"
"strings"
+ "sync/atomic"
"syscall"
"time"
@@ -37,6 +38,7 @@ import (
"github.com/mattn/go-isatty"
"github.com/mattn/go-tty"
"github.com/pkg/errors"
+ "github.com/vechain/thor/v2/api"
"github.com/vechain/thor/v2/api/doc"
"github.com/vechain/thor/v2/chain"
"github.com/vechain/thor/v2/cmd/thor/node"
@@ -274,6 +276,23 @@ func parseGenesisFile(filePath string) (*genesis.Genesis, thor.ForkConfig, error
return customGen, forkConfig, nil
}
+func makeAPIConfig(ctx *cli.Context, logAPIRequests *atomic.Bool, soloMode bool) api.Config {
+ return api.Config{
+ AllowedOrigins: ctx.String(apiCorsFlag.Name),
+ BacktraceLimit: uint32(ctx.Uint64(apiBacktraceLimitFlag.Name)),
+ CallGasLimit: ctx.Uint64(apiCallGasLimitFlag.Name),
+ PprofOn: ctx.Bool(pprofFlag.Name),
+ SkipLogs: ctx.Bool(skipLogsFlag.Name),
+ AllowCustomTracer: ctx.Bool(apiAllowCustomTracerFlag.Name),
+ EnableReqLogger: logAPIRequests,
+ EnableMetrics: ctx.Bool(enableMetricsFlag.Name),
+ LogsLimit: ctx.Uint64(apiLogsLimitFlag.Name),
+ AllowedTracers: parseTracerList(strings.TrimSpace(ctx.String(allowedTracersFlag.Name))),
+ EnableDeprecated: ctx.Bool(apiEnableDeprecatedFlag.Name),
+ SoloMode: soloMode,
+ }
+}
+
func makeConfigDir(ctx *cli.Context) (string, error) {
dir := ctx.String(configDirFlag.Name)
if dir == "" {
diff --git a/comm/communicator.go b/comm/communicator.go
index 9d0a5a530..48419779a 100644
--- a/comm/communicator.go
+++ b/comm/communicator.go
@@ -72,7 +72,7 @@ func (c *Communicator) Sync(ctx context.Context, handler HandleBlockStream) {
delay := initSyncInterval
syncCount := 0
- shouldSynced := func() bool {
+ isSynced := func() bool {
bestBlockTime := c.repo.BestBlockSummary().Header.Timestamp()
now := uint64(time.Now().Unix())
if bestBlockTime+thor.BlockInterval >= now {
@@ -115,9 +115,10 @@ func (c *Communicator) Sync(ctx context.Context, handler HandleBlockStream) {
}
syncCount++
- if shouldSynced() {
+ if isSynced() {
delay = syncInterval
c.onceSynced.Do(func() {
+ // once off - after a bootstrap the syncedCh trigger the peers.syncTxs
close(c.syncedCh)
})
}
diff --git a/metrics/noop.go b/metrics/noop.go
index b804486b6..a9e24ab2c 100644
--- a/metrics/noop.go
+++ b/metrics/noop.go
@@ -7,7 +7,6 @@ package metrics
import (
"net/http"
- "time"
)
// noopMetrics implements a no operations metrics service
@@ -51,5 +50,3 @@ func (n noopMeters) Set(int64) {}
func (n noopMeters) Observe(int64) {}
func (n *noopMetrics) ObserveWithLabels(int64, map[string]string) {}
-
-func (n *noopMetrics) collectDiskIO(time.Duration) {}
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
index c1a21a345..15447f6dc 100644
--- a/metrics/prometheus.go
+++ b/metrics/prometheus.go
@@ -10,6 +10,7 @@ import (
"fmt"
"net/http"
"os"
+ "runtime"
"strconv"
"strings"
"sync"
@@ -131,30 +132,6 @@ func (o *prometheusMetrics) GetOrCreateGaugeVecMeter(name string, labels []strin
return meter
}
-func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) HistogramMeter {
- var floatBuckets []float64
- for _, bucket := range buckets {
- floatBuckets = append(floatBuckets, float64(bucket))
- }
-
- meter := prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: namespace,
- Name: name,
- Buckets: floatBuckets,
- },
- )
-
- err := prometheus.Register(meter)
- if err != nil {
- logger.Warn("unable to register metric", "err", err)
- }
-
- return &promHistogramMeter{
- histogram: meter,
- }
-}
-
func getIOLineValue(line string) int64 {
fields := strings.Fields(line)
if len(fields) != 2 {
@@ -194,6 +171,9 @@ func getDiskIOData() (int64, int64, error) {
}
func (o *prometheusMetrics) collectDiskIO(refresh time.Duration) {
+ if runtime.GOOS != "linux" {
+ return
+ }
for {
reads, writes, err := getDiskIOData()
if err == nil {
@@ -208,6 +188,30 @@ func (o *prometheusMetrics) collectDiskIO(refresh time.Duration) {
}
}
+func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) HistogramMeter {
+ var floatBuckets []float64
+ for _, bucket := range buckets {
+ floatBuckets = append(floatBuckets, float64(bucket))
+ }
+
+ meter := prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: namespace,
+ Name: name,
+ Buckets: floatBuckets,
+ },
+ )
+
+ err := prometheus.Register(meter)
+ if err != nil {
+ logger.Warn("unable to register metric", "err", err)
+ }
+
+ return &promHistogramMeter{
+ histogram: meter,
+ }
+}
+
type promHistogramMeter struct {
histogram prometheus.Histogram
}
diff --git a/thorclient/api_test.go b/thorclient/api_test.go
index e6a0e43be..e8ae49a8a 100644
--- a/thorclient/api_test.go
+++ b/thorclient/api_test.go
@@ -50,7 +50,7 @@ func initAPIServer(t *testing.T) (*testchain.Chain, *httptest.Server) {
router := mux.NewRouter()
- accounts.New(thorChain.Repo(), thorChain.Stater(), uint64(gasLimit), thor.NoFork, thorChain.Engine()).
+ accounts.New(thorChain.Repo(), thorChain.Stater(), uint64(gasLimit), thor.NoFork, thorChain.Engine(), true).
Mount(router, "/accounts")
blocks.New(thorChain.Repo(), thorChain.Engine()).Mount(router, "/blocks")
diff --git a/thorclient/httpclient/client.go b/thorclient/httpclient/client.go
index 8f88783f5..ce05bf17f 100644
--- a/thorclient/httpclient/client.go
+++ b/thorclient/httpclient/client.go
@@ -33,9 +33,13 @@ type Client struct {
// New creates a new Client with the provided URL.
func New(url string) *Client {
+ return NewWithHTTP(url, http.DefaultClient)
+}
+
+func NewWithHTTP(url string, c *http.Client) *Client {
return &Client{
url: url,
- c: &http.Client{},
+ c: c,
}
}
diff --git a/thorclient/thorclient.go b/thorclient/thorclient.go
index 8458a0ae4..0b7939f51 100644
--- a/thorclient/thorclient.go
+++ b/thorclient/thorclient.go
@@ -11,6 +11,7 @@ package thorclient
import (
"fmt"
+ "net/http"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
@@ -44,6 +45,13 @@ func New(url string) *Client {
}
}
+// NewWithHTTP creates a new Client using the provided HTTP URL and HTTP client.
+func NewWithHTTP(url string, c *http.Client) *Client {
+ return &Client{
+ httpConn: httpclient.NewWithHTTP(url, c),
+ }
+}
+
// NewWithWS creates a new Client using the provided HTTP and WebSocket URLs.
// Returns an error if the WebSocket connection fails.
func NewWithWS(url string) (*Client, error) {
@@ -202,7 +210,7 @@ func (c *Client) ChainTag() (byte, error) {
}
// SubscribeBlocks subscribes to block updates over WebSocket.
-func (c *Client) SubscribeBlocks(pos string) (*common.Subscription[*blocks.JSONCollapsedBlock], error) {
+func (c *Client) SubscribeBlocks(pos string) (*common.Subscription[*subscriptions.BlockMessage], error) {
if c.wsConn == nil {
return nil, fmt.Errorf("not a websocket typed client")
}
diff --git a/thorclient/wsclient/client.go b/thorclient/wsclient/client.go
index 057d5aa48..9eb1519ab 100644
--- a/thorclient/wsclient/client.go
+++ b/thorclient/wsclient/client.go
@@ -16,7 +16,6 @@ import (
"github.com/vechain/thor/v2/thor"
"github.com/gorilla/websocket"
- "github.com/vechain/thor/v2/api/blocks"
"github.com/vechain/thor/v2/api/subscriptions"
"github.com/vechain/thor/v2/thorclient/common"
)
@@ -89,7 +88,7 @@ func (c *Client) SubscribeEvents(pos string, filter *subscriptions.EventFilter)
// SubscribeBlocks subscribes to block updates based on the provided query.
// It returns a Subscription that streams block messages or an error if the connection fails.
-func (c *Client) SubscribeBlocks(pos string) (*common.Subscription[*blocks.JSONCollapsedBlock], error) {
+func (c *Client) SubscribeBlocks(pos string) (*common.Subscription[*subscriptions.BlockMessage], error) {
queryValues := &url.Values{}
queryValues.Add("pos", pos)
conn, err := c.connect("/subscriptions/block", queryValues)
@@ -97,7 +96,7 @@ func (c *Client) SubscribeBlocks(pos string) (*common.Subscription[*blocks.JSONC
return nil, fmt.Errorf("unable to connect - %w", err)
}
- return subscribe[blocks.JSONCollapsedBlock](conn), nil
+ return subscribe[subscriptions.BlockMessage](conn), nil
}
// SubscribeTransfers subscribes to transfer events based on the provided query.
diff --git a/thorclient/wsclient/client_test.go b/thorclient/wsclient/client_test.go
index 483ae7233..19dd1b395 100644
--- a/thorclient/wsclient/client_test.go
+++ b/thorclient/wsclient/client_test.go
@@ -13,13 +13,11 @@ import (
"testing"
"time"
- "github.com/vechain/thor/v2/test/datagen"
- "github.com/vechain/thor/v2/thor"
-
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
- "github.com/vechain/thor/v2/api/blocks"
"github.com/vechain/thor/v2/api/subscriptions"
+ "github.com/vechain/thor/v2/test/datagen"
+ "github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/thorclient/common"
)
@@ -50,7 +48,7 @@ func TestClient_SubscribeEvents(t *testing.T) {
func TestClient_SubscribeBlocks(t *testing.T) {
pos := "best"
- expectedBlock := &blocks.JSONCollapsedBlock{}
+ expectedBlock := &subscriptions.BlockMessage{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/subscriptions/block", r.URL.Path)
@@ -288,7 +286,7 @@ func TestClient_SubscribeBlocks_ServerError(t *testing.T) {
func TestClient_SubscribeBlocks_ServerShutdown(t *testing.T) {
pos := "best"
- expectedBlock := &blocks.JSONCollapsedBlock{}
+ expectedBlock := &subscriptions.BlockMessage{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/subscriptions/block", r.URL.Path)
@@ -325,7 +323,7 @@ func TestClient_SubscribeBlocks_ServerShutdown(t *testing.T) {
func TestClient_SubscribeBlocks_ClientShutdown(t *testing.T) {
pos := "best"
- expectedBlock := &blocks.JSONCollapsedBlock{}
+ expectedBlock := &subscriptions.BlockMessage{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/subscriptions/block", r.URL.Path)
@@ -377,7 +375,7 @@ func TestClient_SubscribeBlocks_ClientShutdown(t *testing.T) {
func TestClient_SubscribeBlocks_ClientShutdown_LongBlocks(t *testing.T) {
pos := "best"
- expectedBlock := &blocks.JSONCollapsedBlock{}
+ expectedBlock := &subscriptions.BlockMessage{}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/subscriptions/block", r.URL.Path)
From 822eb972b429bce58acdfba5e0d85e9aec9c88ce Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Mon, 9 Dec 2024 14:28:35 +0000
Subject: [PATCH 10/25] chore: fixed vulnerability
---
chain/repository.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/chain/repository.go b/chain/repository.go
index df622bd30..631591f8c 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -177,6 +177,7 @@ func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflic
}
r.caches.txs.Add(string(keyBuf), tx)
}
+ metricTransactionRepositoryCounter().AddWithLabel(int64(len(txs)), map[string]string{"type": "write", "target": "db"})
// save receipts
for i, receipt := range receipts {
From a751385061f12ce9f2c43a556b7319b579194c4e Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Mon, 9 Dec 2024 14:31:18 +0000
Subject: [PATCH 11/25] chore: fixed vulnerability
---
state/state.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/state/state.go b/state/state.go
index 5312e297c..ef23db60a 100644
--- a/state/state.go
+++ b/state/state.go
@@ -538,6 +538,8 @@ func (s *State) Stage(newVer trie.Version) (*Stage, error) {
return err
}
}
+ // Just once for the account trie.
+ metricAccountCounter().AddWithLabel(int64(len(changes)), map[string]string{"type": "write", "target": "trie"})
return nil
},
}, nil
From e5df4aaa415944b1227ac581e061cb0016ea7ec8 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:00:19 +0000
Subject: [PATCH 12/25] first commit
---
chain/cache.go | 1 +
chain/chain.go | 2 ++
chain/metrics.go | 3 +++
chain/persist.go | 1 +
chain/repository.go | 1 +
metrics/prometheus.go | 12 ++++++------
state/metrics.go | 10 ++++++++++
7 files changed, 24 insertions(+), 6 deletions(-)
create mode 100644 state/metrics.go
diff --git a/chain/cache.go b/chain/cache.go
index c1e9aae06..2b995491b 100644
--- a/chain/cache.go
+++ b/chain/cache.go
@@ -20,6 +20,7 @@ func newCache(maxSize int) *cache {
func (c *cache) GetOrLoad(key interface{}, load func() (interface{}, error)) (interface{}, error) {
if value, ok := c.Get(key); ok {
+ metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "cache"})
return value, nil
}
value, err := load()
diff --git a/chain/chain.go b/chain/chain.go
index c7e30d9e9..107665cd8 100644
--- a/chain/chain.go
+++ b/chain/chain.go
@@ -241,6 +241,7 @@ func (c *Chain) GetTransaction(id thor.Bytes32) (*tx.Transaction, *TxMeta, error
if err != nil {
return nil, nil, err
}
+ metricTransactionRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read"})
return tx, txMeta, nil
}
@@ -256,6 +257,7 @@ func (c *Chain) GetTransactionReceipt(txID thor.Bytes32) (*tx.Receipt, error) {
if err != nil {
return nil, err
}
+ metricReceiptRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read"})
return receipt, nil
}
diff --git a/chain/metrics.go b/chain/metrics.go
index 8c9a764d4..2e2a45d4c 100644
--- a/chain/metrics.go
+++ b/chain/metrics.go
@@ -9,4 +9,7 @@ import "github.com/vechain/thor/v2/metrics"
var (
metricCacheHitMiss = metrics.LazyLoadCounterVec("repo_cache_hit_miss_count", []string{"type", "event"})
+ metricBlockRepositoryCounter = metrics.LazyLoadCounterVec("block_repository_count", []string{"type", "target"})
+ metricTransactionRepositoryCounter = metrics.LazyLoadCounterVec("transaction_repository_count", []string{"type", "target"})
+ metricReceiptRepositoryCounter = metrics.LazyLoadCounterVec("receipt_repository_count", []string{"type", "target"})
)
diff --git a/chain/persist.go b/chain/persist.go
index 0a73b98ac..7b9e8e75c 100644
--- a/chain/persist.go
+++ b/chain/persist.go
@@ -90,5 +90,6 @@ func loadBlockSummary(r kv.Getter, id thor.Bytes32) (*BlockSummary, error) {
if err := loadRLP(r, id[:], &summary); err != nil {
return nil, err
}
+ metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return &summary, nil
}
diff --git a/chain/repository.go b/chain/repository.go
index 631591f8c..cd0997fd3 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -187,6 +187,7 @@ func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflic
}
r.caches.receipts.Add(string(keyBuf), receipt)
}
+ metricReceiptRepositoryCounter().AddWithLabel(int64(len(receipts)), map[string]string{"type": "write", "target": "db"})
}
if err := indexChainHead(headPutter, header); err != nil {
return nil, err
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
index 15447f6dc..b048189ea 100644
--- a/metrics/prometheus.go
+++ b/metrics/prometheus.go
@@ -204,7 +204,7 @@ func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) Hist
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promHistogramMeter{
@@ -237,7 +237,7 @@ func (o *prometheusMetrics) newHistogramVecMeter(name string, labels []string, b
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promHistogramVecMeter{
@@ -263,7 +263,7 @@ func (o *prometheusMetrics) newCountMeter(name string) CountMeter {
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promCountMeter{
counter: meter,
@@ -281,7 +281,7 @@ func (o *prometheusMetrics) newCountVecMeter(name string, labels []string) Count
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promCountVecMeter{
counter: meter,
@@ -298,7 +298,7 @@ func (o *prometheusMetrics) newGaugeMeter(name string) GaugeMeter {
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promGaugeMeter{
gauge: meter,
@@ -316,7 +316,7 @@ func (o *prometheusMetrics) newGaugeVecMeter(name string, labels []string) Gauge
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "err", err)
+ logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
}
return &promGaugeVecMeter{
gauge: meter,
diff --git a/state/metrics.go b/state/metrics.go
new file mode 100644
index 000000000..6ddfd4af7
--- /dev/null
+++ b/state/metrics.go
@@ -0,0 +1,10 @@
+// Copyright (c) 2024 The VeChainThor developers
+//
+// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
+// file LICENSE or
+
+package state
+
+import "github.com/vechain/thor/v2/metrics"
+
+var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type", "target"})
\ No newline at end of file
From d1ddff0a7e19c55c044fce9ee2297f9d80b51dd5 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:02:39 +0000
Subject: [PATCH 13/25] second commit
---
chain/repository.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/chain/repository.go b/chain/repository.go
index cd0997fd3..c8a1bce14 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -336,6 +336,7 @@ func (r *Repository) GetBlockTransactions(id thor.Bytes32) (tx.Transactions, err
return nil, err
}
}
+ metricTransactionRepositoryCounter().AddWithLabel(int64(n), map[string]string{"type": "read"})
return txs, nil
}
return nil, nil
From f801be24c2d43edf6e3abd96d2fcb8ac5d7b8e5e Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:05:22 +0000
Subject: [PATCH 14/25] added receipt
---
chain/repository.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/chain/repository.go b/chain/repository.go
index c8a1bce14..62b440fd2 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -393,6 +393,7 @@ func (r *Repository) GetBlockReceipts(id thor.Bytes32) (tx.Receipts, error) {
return nil, err
}
}
+ metricReceiptRepositoryCounter().AddWithLabel(int64(n), map[string]string{"type": "read", "target": "db"})
return receipts, nil
}
return nil, nil
From 4bb278abcb4395bb070a1b7d27c72e8cfaceeb50 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:06:51 +0000
Subject: [PATCH 15/25] added receipt
---
state/state.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/state/state.go b/state/state.go
index ef23db60a..e63a67154 100644
--- a/state/state.go
+++ b/state/state.go
@@ -120,6 +120,7 @@ func (s *State) getCachedObject(addr thor.Address) (*cachedObject, error) {
if err != nil {
return nil, err
}
+ metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "trie"})
co := newCachedObject(s.db, addr, a, am)
s.cache[addr] = co
return co, nil
From 8d1488a67df14894be02802b365b061faf8bcea0 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:07:23 +0000
Subject: [PATCH 16/25] state commit
---
state/state.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/state/state.go b/state/state.go
index e63a67154..ef81f6a16 100644
--- a/state/state.go
+++ b/state/state.go
@@ -132,6 +132,7 @@ func (s *State) getAccount(addr thor.Address) (*Account, error) {
if err != nil {
return nil, err
}
+ metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "stackedmap"})
return v.(*Account), nil
}
From 64b9219f66c91e1c1b3d71fe73c22592935066b8 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo
Date: Tue, 10 Dec 2024 09:30:54 +0000
Subject: [PATCH 17/25] fix: Get Devnet ID after the Thor flags are set (#915)
* first commit
* using bytes32 method
---
cmd/thor/utils.go | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/cmd/thor/utils.go b/cmd/thor/utils.go
index 3be6d4186..561b1276a 100644
--- a/cmd/thor/utils.go
+++ b/cmd/thor/utils.go
@@ -57,7 +57,7 @@ import (
"gopkg.in/urfave/cli.v1"
)
-var devNetGenesisID = genesis.NewDevnet().ID()
+var devNetGenesisID thor.Bytes32
func initLogger(lvl int, jsonLogs bool) *slog.LevelVar {
logLevel := log.FromLegacyLevel(lvl)
@@ -630,6 +630,13 @@ func printStartupMessage1(
)
}
+func getOrCreateDevnetID() thor.Bytes32 {
+ if devNetGenesisID.IsZero() {
+ devNetGenesisID = genesis.NewDevnet().ID()
+ }
+ return devNetGenesisID
+}
+
func printStartupMessage2(
gene *genesis.Genesis,
apiURL string,
@@ -668,7 +675,7 @@ func printStartupMessage2(
}(),
func() string {
// print default dev net's dev accounts info
- if gene.ID() == devNetGenesisID {
+ if gene.ID() == getOrCreateDevnetID() {
return `
┌──────────────────┬───────────────────────────────────────────────────────────────────────────────┐
│ Mnemonic Words │ denial kitchen pet squirrel other broom bar gas better priority spoil cross │
From 98228a590455a0122a223591e4504320db9b9380 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:22:42 +0000
Subject: [PATCH 18/25] added more dashboard
---
chain/repository.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/chain/repository.go b/chain/repository.go
index 62b440fd2..ed959faae 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -231,6 +231,7 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl
if _, err := r.saveBlock(newBlock, receipts, conflicts, asBest); err != nil {
return err
}
+ metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "write", "target": "db"})
return nil
}
From 2b7e90988e9b6fe68ae327649f7af80376778ae5 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Tue, 10 Dec 2024 11:34:51 +0000
Subject: [PATCH 19/25] added target
---
chain/chain.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/chain/chain.go b/chain/chain.go
index 107665cd8..52a8bc0cd 100644
--- a/chain/chain.go
+++ b/chain/chain.go
@@ -241,7 +241,7 @@ func (c *Chain) GetTransaction(id thor.Bytes32) (*tx.Transaction, *TxMeta, error
if err != nil {
return nil, nil, err
}
- metricTransactionRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read"})
+ metricTransactionRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return tx, txMeta, nil
}
@@ -257,7 +257,7 @@ func (c *Chain) GetTransactionReceipt(txID thor.Bytes32) (*tx.Receipt, error) {
if err != nil {
return nil, err
}
- metricReceiptRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read"})
+ metricReceiptRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return receipt, nil
}
From f26c23a96cf8fb2021a9b83f4251c26802780e64 Mon Sep 17 00:00:00 2001
From: Darren Kelly
Date: Wed, 18 Dec 2024 09:07:49 +0000
Subject: [PATCH 20/25] fix: compile errors
---
muxdb/cache.go | 8 ++++----
thorclient/api_test.go | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/muxdb/cache.go b/muxdb/cache.go
index ea73b47f7..23a0b08d3 100644
--- a/muxdb/cache.go
+++ b/muxdb/cache.go
@@ -69,10 +69,10 @@ func (c *cache) log() {
}
// metrics will reported every 20 seconds
- metricCacheHitMissGaugeVec().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"})
- metricCacheHitMissGaugeVec().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"})
- metricCacheHitMissGaugeVec().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"})
- metricCacheHitMissGaugeVec().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"})
+ metricCacheHitMiss().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"})
+ metricCacheHitMiss().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"})
+ metricCacheHitMiss().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"})
+ metricCacheHitMiss().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"})
} else {
c.lastLogTime.CompareAndSwap(now, last)
}
diff --git a/thorclient/api_test.go b/thorclient/api_test.go
index df72b1296..2ff52fb28 100644
--- a/thorclient/api_test.go
+++ b/thorclient/api_test.go
@@ -375,7 +375,7 @@ func testEventsEndpoint(t *testing.T, _ *testchain.Chain, ts *httptest.Server) {
},
},
Range: nil,
- Options: &logdb.Options{
+ Options: &events.Options{
Offset: 0,
Limit: 10,
},
From 6dd54f7d42082b99f6d74db0495fa33f5300f35e Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Mon, 13 Jan 2025 15:46:25 +0000
Subject: [PATCH 21/25] linter
---
chain/metrics.go | 6 +++---
state/metrics.go | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/chain/metrics.go b/chain/metrics.go
index f0684e073..6b17fd913 100644
--- a/chain/metrics.go
+++ b/chain/metrics.go
@@ -8,8 +8,8 @@ package chain
import "github.com/vechain/thor/v2/metrics"
var (
- metricCacheHitMiss = metrics.LazyLoadGaugeVec("repo_cache_hit_miss_count", []string{"type", "event"})
- metricBlockRepositoryCounter = metrics.LazyLoadCounterVec("block_repository_count", []string{"type", "target"})
+ metricCacheHitMiss = metrics.LazyLoadGaugeVec("repo_cache_hit_miss_count", []string{"type", "event"})
+ metricBlockRepositoryCounter = metrics.LazyLoadCounterVec("block_repository_count", []string{"type", "target"})
metricTransactionRepositoryCounter = metrics.LazyLoadCounterVec("transaction_repository_count", []string{"type", "target"})
- metricReceiptRepositoryCounter = metrics.LazyLoadCounterVec("receipt_repository_count", []string{"type", "target"})
+ metricReceiptRepositoryCounter = metrics.LazyLoadCounterVec("receipt_repository_count", []string{"type", "target"})
)
diff --git a/state/metrics.go b/state/metrics.go
index 6ddfd4af7..b40edd98c 100644
--- a/state/metrics.go
+++ b/state/metrics.go
@@ -7,4 +7,4 @@ package state
import "github.com/vechain/thor/v2/metrics"
-var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type", "target"})
\ No newline at end of file
+var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type", "target"})
From 1d1f13967d626c2771d1fa82a1814d0c0d8f2247 Mon Sep 17 00:00:00 2001
From: Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
Date: Fri, 17 Jan 2025 08:56:01 +0000
Subject: [PATCH 22/25] chore(maindbv4): add a migration guide" (#944)
---
docs/v2-2-0-migration-guide.md | 181 +++++++++++++++++++++++++++++++++
1 file changed, 181 insertions(+)
create mode 100644 docs/v2-2-0-migration-guide.md
diff --git a/docs/v2-2-0-migration-guide.md b/docs/v2-2-0-migration-guide.md
new file mode 100644
index 000000000..3f2ecbed4
--- /dev/null
+++ b/docs/v2-2-0-migration-guide.md
@@ -0,0 +1,181 @@
+# MainDB v4 Migration Paths
+
+## Introduction
+
+The `v2.2.0` release introduces database and SQLite changes to improve performance and storage. This document outlines the possible
+migration paths.
+
+**Note:** The examples below assume you are operating a node on mainnet.
+
+## Table of Contents
+
+- [Blue / Green Deployment](#blue--green-deployment)
+- [Sync in Parallel](#sync-in-parallel)
+ - [1. Docker Migration](#1-docker-migration)
+ - [2. Manual Migration](#2-manual-migration)
+- [Install Latest Version](#install-latest-version)
+ - [Using Docker](#using-docker)
+ - [Install From Source](#install-from-source)
+
+## Blue / Green Deployment
+
+- For environments implementing a blue/green deployment strategy , starting a new node with the update image and allowing it to
+ sync before a switching traffic is a seamless approach. Once synced, traffic can be directed towards to the new node, and the
+ old node can be stopped.
+
+## Sync in Parallel
+
+- Syncing in parallel minimizes downtime but requires additional CPU, RAM and storage resources.
+
+### 1. Docker Migration
+
+For setups where Docker volumes are mapped to a location on the host machine.
+
+**Note**: The examples assume the default data directory within the container is used. If a custom directory is configured,
+adjustments to the examples are required.
+
+For an existing node with a host instance directory of `/path/to/thor`:
+
+```html
+docker run -d \
+ -v /path/to/thor:/home/thor/.org.vechain.thor
+ -p 8669:8669 \
+ -p 11235:11235 \
+ --name \
+ vechain/thor:v2.1.4 --network main
+```
+
+Start a new container with `v2.2.0`, without exposing the ports:
+
+```html
+docker run -d \
+ -v /path/to/thor:/home/thor/.org.vechain.thor
+ --name node-new \
+ vechain/thor:v2.2.0 --network main
+```
+
+- The `v2.1.4` node will continue to operate and write data to the directory `/path/to/thor/instance-39627e6be7ec1b4a-v3`, while the
+ `v2.2.0` node will write the new databases to `/path/to/thor/instance-39627e6be7ec1b4a-v4`.
+- Allow some time for the new node to sync.
+- You can inspect the logs using `docker logs --tail 25 node-new`.
+- After the new node is fully synced, stop both nodes and restart the original container with the updated image.
+
+```html
+docker stop node-new
+docker rm node-new
+docker stop
+docker rm
+
+docker run -d \
+ -v /path/to/thor:/home/thor/.org.vechain.thor
+ -p 8669:8669 \
+ -p 11235:11235 \
+ --name \
+ vechain/thor:v2.2.0 --network main
+```
+
+- Confirm that the node is functioning as expected, before cleaning up the old databases:
+
+```bash
+rm -rf /path/to/thor/instance-39627e6be7ec1b4a-v3
+```
+
+### 2. Manual Migration
+
+For nodes that installed from the source, follow the steps below:
+
+- Assuming the old nodes was started with:
+
+```html
+/previous/executable/thor --network main
+```
+
+- Build the new `thor` binary as outlined in [Install From Source](#install-from-source)
+
+- Start the new node with different API, Metrics, Admin and P2P ports:
+
+```html
+./bin/thor --network main \
+ --api-addr localhost:8668 \
+ --metrics-addr localhost:2102 \
+ --admin-addr localhost:2103 \
+ --p2p-port 11222 \
+
+```
+
+- The `v2.1.4` node will continue to operate and write data to the data directory under `/data/dir/instance-39627e6be7ec1b4a-v3`, while
+ `v2.2.0` node writes to `/data/dir/instance-39627e6be7ec1b4a-v4`.
+- Allow the new node to sync before switching traffic.
+
+#### Stopping and Switching Nodes
+
+##### 1. Get the PID of the new node:
+
+```html
+lsof -n -i:8668
+```
+
+##### 2. Stop the new node:
+
+```html
+kill
+```
+
+##### 3. Get the PID of the old node:
+
+```html
+lsof -n -i:8669
+```
+
+##### 4. Stop the old node:
+
+```html
+kill
+```
+
+##### 5. Restart the original node command with the new binary:
+
+```html
+/new/executable/thor --network main
+```
+
+##### 6. Remove the old databases:
+
+```bash
+rm -rf /data/dir/instance-39627e6be7ec1b4a-v3
+```
+
+## Install Latest Version
+
+### Using Docker
+
+```bash
+docker pull vechain/thor:v2.2.0
+```
+
+### Install From Source
+
+- Clone the repository and checkout the `v2.2.0` tag:
+
+```bash
+git clone https://github.com/vechain/thor.git --branch v2.2.0 --depth 1
+```
+
+- Build the `thor` binary:
+
+```bash
+cd thor
+make thor
+```
+
+- Verify the binary:
+
+```bash
+./bin/thor --version
+```
+
+- (Optional), Copy the binary to a location in your `$PATH`:
+
+```bash
+sudo cp ./bin/thor /usr/local/bin
+```
From ab157f3d4e2eb5919846c85bc26b4920f4f5e55e Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo
Date: Mon, 20 Jan 2025 14:14:03 +0000
Subject: [PATCH 23/25] Update metrics/prometheus.go
Co-authored-by: Pedro Gomes
---
metrics/prometheus.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
index b048189ea..8c498fd6f 100644
--- a/metrics/prometheus.go
+++ b/metrics/prometheus.go
@@ -204,7 +204,7 @@ func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) Hist
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "metric", name, "err", err)
}
return &promHistogramMeter{
From 0dc3ce88d7d9291ea7ff61da0cfd21b146ad628b Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Mon, 20 Jan 2025 15:53:26 +0000
Subject: [PATCH 24/25] modifications after call with pedro
---
chain/cache.go | 1 -
chain/chain.go | 2 --
chain/metrics.go | 5 +----
chain/persist.go | 1 -
chain/repository.go | 5 -----
metrics/prometheus.go | 12 ++++++------
state/metrics.go | 2 +-
state/state.go | 5 ++---
8 files changed, 10 insertions(+), 23 deletions(-)
diff --git a/chain/cache.go b/chain/cache.go
index 3923f25a2..48ba24d23 100644
--- a/chain/cache.go
+++ b/chain/cache.go
@@ -20,7 +20,6 @@ func newCache(maxSize int) *cache {
func (c *cache) GetOrLoad(key interface{}, load func() (interface{}, error)) (interface{}, bool, error) {
if value, ok := c.Get(key); ok {
- metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "cache"})
return value, true, nil
}
value, err := load()
diff --git a/chain/chain.go b/chain/chain.go
index 52a8bc0cd..c7e30d9e9 100644
--- a/chain/chain.go
+++ b/chain/chain.go
@@ -241,7 +241,6 @@ func (c *Chain) GetTransaction(id thor.Bytes32) (*tx.Transaction, *TxMeta, error
if err != nil {
return nil, nil, err
}
- metricTransactionRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return tx, txMeta, nil
}
@@ -257,7 +256,6 @@ func (c *Chain) GetTransactionReceipt(txID thor.Bytes32) (*tx.Receipt, error) {
if err != nil {
return nil, err
}
- metricReceiptRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return receipt, nil
}
diff --git a/chain/metrics.go b/chain/metrics.go
index 6b17fd913..7532aaa0e 100644
--- a/chain/metrics.go
+++ b/chain/metrics.go
@@ -8,8 +8,5 @@ package chain
import "github.com/vechain/thor/v2/metrics"
var (
- metricCacheHitMiss = metrics.LazyLoadGaugeVec("repo_cache_hit_miss_count", []string{"type", "event"})
- metricBlockRepositoryCounter = metrics.LazyLoadCounterVec("block_repository_count", []string{"type", "target"})
- metricTransactionRepositoryCounter = metrics.LazyLoadCounterVec("transaction_repository_count", []string{"type", "target"})
- metricReceiptRepositoryCounter = metrics.LazyLoadCounterVec("receipt_repository_count", []string{"type", "target"})
+ metricCacheHitMiss = metrics.LazyLoadGaugeVec("repo_cache_hit_miss_count", []string{"type", "event"})
)
diff --git a/chain/persist.go b/chain/persist.go
index 7b9e8e75c..0a73b98ac 100644
--- a/chain/persist.go
+++ b/chain/persist.go
@@ -90,6 +90,5 @@ func loadBlockSummary(r kv.Getter, id thor.Bytes32) (*BlockSummary, error) {
if err := loadRLP(r, id[:], &summary); err != nil {
return nil, err
}
- metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "db"})
return &summary, nil
}
diff --git a/chain/repository.go b/chain/repository.go
index 87b71a6ab..58ce6ae17 100644
--- a/chain/repository.go
+++ b/chain/repository.go
@@ -184,7 +184,6 @@ func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflic
}
r.caches.txs.Add(string(keyBuf), tx)
}
- metricTransactionRepositoryCounter().AddWithLabel(int64(len(txs)), map[string]string{"type": "write", "target": "db"})
// save receipts
for i, receipt := range receipts {
@@ -194,7 +193,6 @@ func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflic
}
r.caches.receipts.Add(string(keyBuf), receipt)
}
- metricReceiptRepositoryCounter().AddWithLabel(int64(len(receipts)), map[string]string{"type": "write", "target": "db"})
}
if err := indexChainHead(headPutter, header); err != nil {
return nil, err
@@ -238,7 +236,6 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl
if _, err := r.saveBlock(newBlock, receipts, conflicts, asBest); err != nil {
return err
}
- metricBlockRepositoryCounter().AddWithLabel(1, map[string]string{"type": "write", "target": "db"})
return nil
}
@@ -358,7 +355,6 @@ func (r *Repository) GetBlockTransactions(id thor.Bytes32) (tx.Transactions, err
return nil, err
}
}
- metricTransactionRepositoryCounter().AddWithLabel(int64(n), map[string]string{"type": "read"})
return txs, nil
}
return nil, nil
@@ -421,7 +417,6 @@ func (r *Repository) GetBlockReceipts(id thor.Bytes32) (tx.Receipts, error) {
return nil, err
}
}
- metricReceiptRepositoryCounter().AddWithLabel(int64(n), map[string]string{"type": "read", "target": "db"})
return receipts, nil
}
return nil, nil
diff --git a/metrics/prometheus.go b/metrics/prometheus.go
index 8c498fd6f..15447f6dc 100644
--- a/metrics/prometheus.go
+++ b/metrics/prometheus.go
@@ -204,7 +204,7 @@ func (o *prometheusMetrics) newHistogramMeter(name string, buckets []int64) Hist
err := prometheus.Register(meter)
if err != nil {
- logger.Warn("unable to register metric", "metric", name, "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promHistogramMeter{
@@ -237,7 +237,7 @@ func (o *prometheusMetrics) newHistogramVecMeter(name string, labels []string, b
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promHistogramVecMeter{
@@ -263,7 +263,7 @@ func (o *prometheusMetrics) newCountMeter(name string) CountMeter {
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promCountMeter{
counter: meter,
@@ -281,7 +281,7 @@ func (o *prometheusMetrics) newCountVecMeter(name string, labels []string) Count
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promCountVecMeter{
counter: meter,
@@ -298,7 +298,7 @@ func (o *prometheusMetrics) newGaugeMeter(name string) GaugeMeter {
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promGaugeMeter{
gauge: meter,
@@ -316,7 +316,7 @@ func (o *prometheusMetrics) newGaugeVecMeter(name string, labels []string) Gauge
err := prometheus.Register(meter)
if err != nil {
- logger.Warn(fmt.Sprintf("unable to register metric %s", name), "err", err)
+ logger.Warn("unable to register metric", "err", err)
}
return &promGaugeVecMeter{
gauge: meter,
diff --git a/state/metrics.go b/state/metrics.go
index b40edd98c..1793f80a7 100644
--- a/state/metrics.go
+++ b/state/metrics.go
@@ -7,4 +7,4 @@ package state
import "github.com/vechain/thor/v2/metrics"
-var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type", "target"})
+var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type"})
diff --git a/state/state.go b/state/state.go
index ef81f6a16..e1cda903a 100644
--- a/state/state.go
+++ b/state/state.go
@@ -120,7 +120,7 @@ func (s *State) getCachedObject(addr thor.Address) (*cachedObject, error) {
if err != nil {
return nil, err
}
- metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "trie"})
+ metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read"})
co := newCachedObject(s.db, addr, a, am)
s.cache[addr] = co
return co, nil
@@ -132,7 +132,6 @@ func (s *State) getAccount(addr thor.Address) (*Account, error) {
if err != nil {
return nil, err
}
- metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read", "target": "stackedmap"})
return v.(*Account), nil
}
@@ -541,7 +540,7 @@ func (s *State) Stage(newVer trie.Version) (*Stage, error) {
}
}
// Just once for the account trie.
- metricAccountCounter().AddWithLabel(int64(len(changes)), map[string]string{"type": "write", "target": "trie"})
+ metricAccountCounter().AddWithLabel(int64(len(changes)), map[string]string{"type": "write"})
return nil
},
}, nil
From bc20e7ff173df7a8a0955d5d9c86bedc744821c9 Mon Sep 17 00:00:00 2001
From: Miguel Angel Rojo Fernandez
Date: Mon, 20 Jan 2025 18:06:43 +0000
Subject: [PATCH 25/25] just getting state changes in metrics
---
state/metrics.go | 2 +-
state/state.go | 3 +--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/state/metrics.go b/state/metrics.go
index 1793f80a7..e1667668a 100644
--- a/state/metrics.go
+++ b/state/metrics.go
@@ -7,4 +7,4 @@ package state
import "github.com/vechain/thor/v2/metrics"
-var metricAccountCounter = metrics.LazyLoadCounterVec("account_state_count", []string{"type"})
+var metricAccountChanges = metrics.LazyLoadCounter("account_state_changes_count")
diff --git a/state/state.go b/state/state.go
index e1cda903a..119546d3e 100644
--- a/state/state.go
+++ b/state/state.go
@@ -120,7 +120,6 @@ func (s *State) getCachedObject(addr thor.Address) (*cachedObject, error) {
if err != nil {
return nil, err
}
- metricAccountCounter().AddWithLabel(1, map[string]string{"type": "read"})
co := newCachedObject(s.db, addr, a, am)
s.cache[addr] = co
return co, nil
@@ -540,7 +539,7 @@ func (s *State) Stage(newVer trie.Version) (*Stage, error) {
}
}
// Just once for the account trie.
- metricAccountCounter().AddWithLabel(int64(len(changes)), map[string]string{"type": "write"})
+ metricAccountChanges().Add(int64(len(changes)))
return nil
},
}, nil