Skip to content

Commit

Permalink
Merge branch 'main' into implement_eth_get_proof
Browse files Browse the repository at this point in the history
  • Loading branch information
shotasilagadze authored Jan 14, 2025
2 parents 064242e + 85844e6 commit 06f3e13
Show file tree
Hide file tree
Showing 107 changed files with 3,444 additions and 842 deletions.
1 change: 0 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ jobs:
- name: Install dependencies
run: |
choco upgrade mingw -y --no-progress --version 13.2.0
choco install cmake -y --no-progress --version 3.27.8
- name: Build
run: .\wmake.ps1 all
Expand Down
18 changes: 15 additions & 3 deletions .github/workflows/qa-tip-tracking-polygon.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,21 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-results
path: |
${{ github.workspace }}/result-${{ env.CHAIN }}.json
${{ env.ERIGON_TESTBED_DATA_DIR }}/logs/erigon.log
path: ${{ github.workspace }}/result-${{ env.CHAIN }}.json

- name: Upload erigon log
if: steps.test_step.outputs.test_executed == 'true'
uses: actions/upload-artifact@v4
with:
name: erigon-log
path: ${{ env.ERIGON_TESTBED_DATA_DIR }}/logs/erigon.log

- name: Upload metric plots
if: steps.test_step.outputs.test_executed == 'true'
uses: actions/upload-artifact@v4
with:
name: metric-plots
path: ${{ github.workspace }}/metrics-${{ env.CHAIN }}-plots*

- name: Delete Erigon Testbed Data Directory
if: always()
Expand Down
18 changes: 15 additions & 3 deletions .github/workflows/qa-tip-tracking.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,21 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: test-results
path: |
${{ github.workspace }}/result-${{ env.CHAIN }}.json
${{ env.ERIGON_TESTBED_DATA_DIR }}/logs/erigon.log
path: ${{ github.workspace }}/result-${{ env.CHAIN }}.json

- name: Upload erigon log
if: steps.test_step.outputs.test_executed == 'true'
uses: actions/upload-artifact@v4
with:
name: erigon-log
path: ${{ env.ERIGON_TESTBED_DATA_DIR }}/logs/erigon.log

- name: Upload metric plots
if: steps.test_step.outputs.test_executed == 'true'
uses: actions/upload-artifact@v4
with:
name: metric-plots
path: ${{ github.workspace }}/metrics-${{ env.CHAIN }}-plots*

- name: Delete Erigon Testbed Data Directory
if: always()
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/test-integration-caplin.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ jobs:
- name: Install dependencies
run: |
choco upgrade mingw -y --no-progress --version 13.2.0
choco install cmake -y --no-progress --version 3.27.8
- name: test-integration-caplin
run: cd ./cl/spectest/ && .\wmake.ps1 Tests Mainnet
1 change: 0 additions & 1 deletion .github/workflows/test-integration-erigon.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ jobs:
- name: Install dependencies
run: |
choco upgrade mingw -y --no-progress --version 13.2.0
choco install cmake -y --no-progress --version 3.27.8
- name: test-integration
run: .\wmake.ps1 test-integration
Expand Down
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ frontier.

- [Erigon](#erigon)
- [System Requirements](#system-requirements)
- [Sync Times](#sync-times)
- [Usage](#usage)
- [Getting Started](#getting-started)
- [Datadir structure](#datadir-structure)
Expand Down Expand Up @@ -97,6 +98,18 @@ on [cloud-network-drives](https://github.com/erigontech/erigon?tab=readme-ov-fil

🔬 More details on what type of data stored [here](https://ledgerwatch.github.io/turbo_geth_release.html#Disk-space)

Sync Times
==========

These are the approximate sync times syncing from scratch to the tip of the chain (results may vary depending on hardware and bandwidth).


| Chain | Archive | Full | Minimal |
|------------|-----------------|----------------|----------------|
| Ethereum | 7 Hours, 55 Minutes | 4 Hours, 23 Minutes | 1 Hour, 41 Minutes |
| Gnosis | 2 Hours, 10 Minutes | 1 Hour, 5 Minutes | 33 Minutes |
| Polygon | 1 Day, 21 Hours | 21 Hours, 41 Minutes | 11 Hours, 54 Minutes |

Usage
=====

Expand Down
8 changes: 8 additions & 0 deletions RELEASE_INSTRUCTIONS.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,11 @@ In most cases, it is enough to bump minor version.
In the file `ethdb/remote/remotedbserver/server.go` there is variable `KvServiceAPIVersion` that needs to be updated if there are any changes in the remote KV interface, or
database schema, leading to data migrations.
In most cases, it is enough to bump minor version. It is best to change both DB schema version and remove KV version together.

## Purify the state domains if a regenration is done

If a regenration is done, the state domains need to be purified. This can be done by running the following command:
````
make integration
./build/bin/integration purify_domains --datadir=<path to datadir> --replace-in-datadir
````
71 changes: 52 additions & 19 deletions cl/antiquary/beacon_states_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ import (
"bytes"
"context"
"io"
"sync"

"github.com/c2h5oh/datasize"
"github.com/klauspost/compress/zstd"

libcommon "github.com/erigontech/erigon-lib/common"
Expand All @@ -36,7 +38,20 @@ import (
"github.com/erigontech/erigon/cl/transition/impl/eth2"
)

var stateAntiquaryBufSz = etl.BufferOptimalSize / 8 // 18 collectors * 256mb / 8 = 512mb in worst case
var stateAntiquaryBufSz = etl.BufferOptimalSize / 16 // 18 collectors * 256mb / 16 = 256mb in worst case

const EnabledPreAllocate = true

var etlBufferPool = &sync.Pool{
New: func() interface{} {
buf := etl.NewSortableBuffer(stateAntiquaryBufSz)
// preallocate 20_000 items with a 2MB overflow buffer
if EnabledPreAllocate {
buf.Prealloc(20_000, int(stateAntiquaryBufSz+2*datasize.MB))
}
return buf
},
}

// RATIONALE: MDBX locks the entire database when writing to it, so we need to minimize the time spent in the write lock.
// so instead of writing the historical states on write transactions, we accumulate them in memory and write them in a single write transaction.
Expand All @@ -63,6 +78,8 @@ type beaconStatesCollector struct {
balancesDumpsCollector *etl.Collector
effectiveBalancesDumpCollector *etl.Collector

buffers []etl.Buffer

buf *bytes.Buffer
compressor *zstd.Encoder

Expand All @@ -76,30 +93,40 @@ func newBeaconStatesCollector(beaconCfg *clparams.BeaconChainConfig, tmpdir stri
if err != nil {
panic(err)
}

var buffers []etl.Buffer
makeETLBuffer := func() etl.Buffer {
b := etlBufferPool.Get().(etl.Buffer)
b.Reset()
buffers = append(buffers, b)
return b
}

return &beaconStatesCollector{
effectiveBalanceCollector: etl.NewCollector(kv.ValidatorEffectiveBalance, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
balancesCollector: etl.NewCollector(kv.ValidatorBalance, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
randaoMixesCollector: etl.NewCollector(kv.RandaoMixes, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
intraRandaoMixesCollector: etl.NewCollector(kv.IntraRandaoMixes, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
proposersCollector: etl.NewCollector(kv.Proposers, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
slashingsCollector: etl.NewCollector(kv.ValidatorSlashings, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
blockRootsCollector: etl.NewCollector(kv.BlockRoot, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
stateRootsCollector: etl.NewCollector(kv.StateRoot, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
slotDataCollector: etl.NewCollector(kv.SlotData, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
epochDataCollector: etl.NewCollector(kv.EpochData, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
inactivityScoresCollector: etl.NewCollector(kv.InactivityScores, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
nextSyncCommitteeCollector: etl.NewCollector(kv.NextSyncCommittee, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
currentSyncCommitteeCollector: etl.NewCollector(kv.CurrentSyncCommittee, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
eth1DataVotesCollector: etl.NewCollector(kv.Eth1DataVotes, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
stateEventsCollector: etl.NewCollector(kv.StateEvents, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
activeValidatorIndiciesCollector: etl.NewCollector(kv.ActiveValidatorIndicies, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
balancesDumpsCollector: etl.NewCollector(kv.BalancesDump, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
effectiveBalancesDumpCollector: etl.NewCollector(kv.EffectiveBalancesDump, tmpdir, etl.NewSortableBuffer(stateAntiquaryBufSz), logger).LogLvl(log.LvlTrace),
effectiveBalanceCollector: etl.NewCollector(kv.ValidatorEffectiveBalance, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
balancesCollector: etl.NewCollector(kv.ValidatorBalance, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
randaoMixesCollector: etl.NewCollector(kv.RandaoMixes, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
intraRandaoMixesCollector: etl.NewCollector(kv.IntraRandaoMixes, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
proposersCollector: etl.NewCollector(kv.Proposers, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
slashingsCollector: etl.NewCollector(kv.ValidatorSlashings, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
blockRootsCollector: etl.NewCollector(kv.BlockRoot, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
stateRootsCollector: etl.NewCollector(kv.StateRoot, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
slotDataCollector: etl.NewCollector(kv.SlotData, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
epochDataCollector: etl.NewCollector(kv.EpochData, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
inactivityScoresCollector: etl.NewCollector(kv.InactivityScores, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
nextSyncCommitteeCollector: etl.NewCollector(kv.NextSyncCommittee, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
currentSyncCommitteeCollector: etl.NewCollector(kv.CurrentSyncCommittee, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
eth1DataVotesCollector: etl.NewCollector(kv.Eth1DataVotes, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
stateEventsCollector: etl.NewCollector(kv.StateEvents, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
activeValidatorIndiciesCollector: etl.NewCollector(kv.ActiveValidatorIndicies, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
balancesDumpsCollector: etl.NewCollector(kv.BalancesDump, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
effectiveBalancesDumpCollector: etl.NewCollector(kv.EffectiveBalancesDump, tmpdir, makeETLBuffer(), logger).LogLvl(log.LvlTrace),
logger: logger,
beaconCfg: beaconCfg,

buf: buf,
compressor: compressor,
buffers: buffers,
}
}

Expand Down Expand Up @@ -361,6 +388,12 @@ func (i *beaconStatesCollector) close() {
i.activeValidatorIndiciesCollector.Close()
i.balancesDumpsCollector.Close()
i.effectiveBalancesDumpCollector.Close()
for _, b := range i.buffers {
b.Reset()
}
for _, b := range i.buffers {
etlBufferPool.Put(b)
}
}

// antiquateFullUint64List goes on mdbx as it is full of common repeated patter always and thus fits with 16KB pages.
Expand Down
14 changes: 11 additions & 3 deletions cl/beacon/beaconhttp/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,13 +103,21 @@ func HandleEndpoint[T any](h EndpointHandler[T]) http.HandlerFunc {
ans, err := h.Handle(w, r)
if err != nil {
var endpointError *EndpointError
var e *EndpointError
if errors.As(err, &e) {
if e, ok := err.(*EndpointError); ok {
// Directly use the error if it's already an *EndpointError
endpointError = e
} else {
// Wrap the error in an EndpointError otherwise
endpointError = WrapEndpointError(err)
}
endpointError.WriteTo(w)

// Write the endpoint error to the response writer
if endpointError != nil {
endpointError.WriteTo(w)
} else {
// Failsafe: If the error is nil, write a generic 500 error
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
}
return
}
// TODO: potentially add a context option to buffer these
Expand Down
6 changes: 2 additions & 4 deletions cl/beacon/handler/block_production.go
Original file line number Diff line number Diff line change
Expand Up @@ -1144,8 +1144,7 @@ func (a *ApiHandler) broadcastBlock(ctx context.Context, blk *cltypes.SignedBeac
Name: gossip.TopicNameBeaconBlock,
Data: blkSSZ,
}); err != nil {
log.Error("Failed to publish block", "err", err)
return err
a.logger.Error("Failed to publish block", "err", err)
}
for idx, blob := range blobsSidecarsBytes {
idx64 := uint64(idx)
Expand All @@ -1154,8 +1153,7 @@ func (a *ApiHandler) broadcastBlock(ctx context.Context, blk *cltypes.SignedBeac
Data: blob,
SubnetId: &idx64,
}); err != nil {
log.Error("Failed to publish blob sidecar", "err", err)
return err
a.logger.Error("Failed to publish blob sidecar", "err", err)
}
}
return nil
Expand Down
4 changes: 4 additions & 0 deletions cl/beacon/handler/duties_attester.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ func (a *ApiHandler) getDependentRoot(epoch uint64, attester bool) (libcommon.Ha
if attester {
dependentRootSlot = ((epoch - 1) * a.beaconChainCfg.SlotsPerEpoch) - 1
}
if !a.syncedData.Syncing() && dependentRootSlot == a.syncedData.HeadSlot() {
dependentRoot = a.syncedData.HeadRoot()
return nil
}
maxIterations := 2048
for i := 0; i < maxIterations; i++ {
if dependentRootSlot > epoch*a.beaconChainCfg.SlotsPerEpoch {
Expand Down
Loading

0 comments on commit 06f3e13

Please sign in to comment.