diff --git a/.github/workflows/qa-clean-exit-block-downloading.yml b/.github/workflows/qa-clean-exit-block-downloading.yml
index 5ef2c7ddfec..d5135e1116a 100644
--- a/.github/workflows/qa-clean-exit-block-downloading.yml
+++ b/.github/workflows/qa-clean-exit-block-downloading.yml
@@ -8,6 +8,10 @@ on:
# - cron: '0 8 * * 1-6' # Run every day at 08:00 AM UTC except Sunday
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
clean-exit-bd-test:
runs-on: [self-hosted, Erigon3]
diff --git a/.github/workflows/qa-clean-exit-snapshot-downloading.yml b/.github/workflows/qa-clean-exit-snapshot-downloading.yml
index 1178de63c24..9a93016fe04 100644
--- a/.github/workflows/qa-clean-exit-snapshot-downloading.yml
+++ b/.github/workflows/qa-clean-exit-snapshot-downloading.yml
@@ -13,6 +13,10 @@ on:
# - ready_for_review
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
clean-exit-sd-test:
runs-on: self-hosted
diff --git a/.github/workflows/qa-rpc-integration-tests.yml b/.github/workflows/qa-rpc-integration-tests.yml
index 769e91b9cf8..b9d65913ae8 100644
--- a/.github/workflows/qa-rpc-integration-tests.yml
+++ b/.github/workflows/qa-rpc-integration-tests.yml
@@ -1,7 +1,6 @@
name: QA - RPC Integration Tests
on:
- workflow_dispatch: # Run manually
# push:
# branches:
# - main
@@ -11,10 +10,8 @@ on:
# - main
# - 'release/3.*'
# types:
-# - opened
-# - reopened
-# - synchronize
# - ready_for_review
+# workflow_dispatch: # Run manually
jobs:
integration-test-suite:
@@ -155,4 +152,3 @@ jobs:
run: |
echo "::error::Error detected during tests"
exit 1
-
diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml
index d685d9d713a..c26bb673b3b 100644
--- a/.github/workflows/qa-snap-download.yml
+++ b/.github/workflows/qa-snap-download.yml
@@ -8,6 +8,10 @@ on:
# - cron: '0 20 * * 1-6' # Run every night at 20:00 (08:00 PM) UTC except Sunday
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
jobs:
snap-download-test:
runs-on: self-hosted
diff --git a/.github/workflows/qa-sync-from-scratch-minimal-node.yml b/.github/workflows/qa-sync-from-scratch-minimal-node.yml
index 5c373f16691..851b41bbea8 100644
--- a/.github/workflows/qa-sync-from-scratch-minimal-node.yml
+++ b/.github/workflows/qa-sync-from-scratch-minimal-node.yml
@@ -8,6 +8,10 @@ on:
# - cron: '0 0 * * *' # Run every night at 00:00 AM UTC
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: false
+
jobs:
minimal-node-sync-from-scratch-test:
runs-on: self-hosted
diff --git a/.github/workflows/qa-sync-with-externalcl.yml b/.github/workflows/qa-sync-with-externalcl.yml
index b137a66644e..e67c1e0cdc6 100644
--- a/.github/workflows/qa-sync-with-externalcl.yml
+++ b/.github/workflows/qa-sync-with-externalcl.yml
@@ -1,20 +1,34 @@
name: QA - Sync with external CL
on:
- # schedule:
- # - cron: '0 0 * * *' # Run every night at 00:00 AM UTC
+# push:
+# branches:
+# - 'release/3.*'
+# schedule:
+# - cron: '0 8 * * 0' # Run on Sunday at 08:00 AM UTC
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: false
+
jobs:
- prysm-minimal-node-sync-from-scratch-test:
- runs-on: self-hosted
- timeout-minutes: 360 # 6 hours
+ sync-with-externalcl:
+ runs-on: [self-hosted, linux, X64]
+ timeout-minutes: 500 # 8+ hours
+ strategy:
+ matrix:
+ client: [lighthouse, prysm]
+ chain: [mainnet, gnosis]
+ exclude:
+ - client: prysm
+ chain: gnosis
env:
ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data
+ CL_DATA_DIR: ${{ github.workspace }}/consensus
ERIGON_QA_PATH: /home/qarunner/erigon-qa
- TRACKING_TIME_SECONDS: 7200 # 2 hours
- TOTAL_TIME_SECONDS: 18000 # 5 hours
- CHAIN: mainnet
+ TRACKING_TIME_SECONDS: 3600 # 1 hour
+ TOTAL_TIME_SECONDS: 25200 # 7 hours
steps:
- name: Check out repository
@@ -25,6 +39,19 @@ jobs:
make clean
rm -rf $ERIGON_DATA_DIR
+ - name: Install ${{ matrix.client }} and generate JWT secret
+ run: |
+ mkdir -p $CL_DATA_DIR
+ if [ "${{ matrix.client }}" == "lighthouse" ]; then
+ curl -LO https://github.com/sigp/lighthouse/releases/download/v7.0.0-beta.0/lighthouse-v7.0.0-beta.0-x86_64-unknown-linux-gnu.tar.gz
+ tar -xvf lighthouse-v7.0.0-beta.0-x86_64-unknown-linux-gnu.tar.gz -C $CL_DATA_DIR
+ rm lighthouse-v7.0.0-beta.0-x86_64-unknown-linux-gnu.tar.gz
+ elif [ "${{ matrix.client }}" == "prysm" ]; then
+ curl -L https://raw.githubusercontent.com/prysmaticlabs/prysm/master/prysm.sh -o $CL_DATA_DIR/prysm.sh
+ chmod +x $CL_DATA_DIR/prysm.sh
+ fi
+ openssl rand -hex 32 > $CL_DATA_DIR/jwt.hex
+
- name: Build Erigon
run: |
make erigon
@@ -41,7 +68,7 @@ jobs:
# Run Erigon, wait sync and check ability to maintain sync
python3 $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/run_and_check_tip_tracking.py \
- ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS Erigon3 $CHAIN minimal_node no_statistics prysm
+ ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS Erigon3 ${{ matrix.chain }} minimal_node no_statistics ${{ matrix.client }} $CL_DATA_DIR
# Capture monitoring script exit status
test_exit_status=$?
@@ -67,26 +94,33 @@ jobs:
--repo erigon \
--commit $(git rev-parse HEAD) \
--branch ${{ github.ref_name }} \
- --test_name sync-from-scratch-prysm-minimal-node \
- --chain $CHAIN \
+ --test_name sync-from-scratch-${{ matrix.client }}-minimal-node \
+ --chain ${{ matrix.chain }} \
--runner ${{ runner.name }} \
--outcome $TEST_RESULT \
- --result_file ${{ github.workspace }}/result-$CHAIN.json
+ --result_file ${{ github.workspace }}/result-${{ matrix.chain }}.json
- name: Upload test results
if: steps.test_step.outputs.test_executed == 'true'
uses: actions/upload-artifact@v4
with:
- name: test-results
+ name: test-results-${{ matrix.client }}-${{ matrix.chain }}
path: |
- ${{ github.workspace }}/result-${{ env.CHAIN }}.json
+ ${{ github.workspace }}/result-${{ matrix.chain }}.json
${{ github.workspace }}/erigon_data/logs/erigon.log
+ ${{ matrix.client == 'lighthouse' && '$CL_DATA_DIR/data/beacon/logs/beacon.log' || '' }}
+ ${{ matrix.client == 'prysm' && '$CL_DATA_DIR/data/beacon.log' || '' }}
- name: Clean up Erigon data directory
if: always()
run: |
rm -rf $ERIGON_DATA_DIR
+ - name: Cleanup consensus runner directory
+ if: always()
+ run: |
+ rm -rf $CL_DATA_DIR
+
- name: Resume the Erigon instance dedicated to db maintenance
run: |
python3 $ERIGON_QA_PATH/test_system/db-producer/resume_production.py || true
@@ -100,3 +134,4 @@ jobs:
run: |
echo "::error::Error detected during tests"
exit 1
+
diff --git a/.github/workflows/qa-tip-tracking-gnosis.yml b/.github/workflows/qa-tip-tracking-gnosis.yml
index 86519396bd5..2b909216169 100644
--- a/.github/workflows/qa-tip-tracking-gnosis.yml
+++ b/.github/workflows/qa-tip-tracking-gnosis.yml
@@ -8,6 +8,10 @@ on:
# - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: false
+
jobs:
tip-tracking-test:
runs-on: [self-hosted, Gnosis]
diff --git a/.github/workflows/qa-tip-tracking-polygon.yml b/.github/workflows/qa-tip-tracking-polygon.yml
index 90804298bec..8c0139020c6 100644
--- a/.github/workflows/qa-tip-tracking-polygon.yml
+++ b/.github/workflows/qa-tip-tracking-polygon.yml
@@ -8,10 +8,14 @@ on:
# - cron: '0 0 * * 1-6' # Run every night at 00:00 AM UTC except Sunday
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: false
+
jobs:
tip-tracking-test:
runs-on: [self-hosted, Polygon]
- timeout-minutes: 600
+ timeout-minutes: 800
env:
ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir
ERIGON_TESTBED_DATA_DIR: /opt/erigon-testbed/datadir
diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml
index 15c22d5f372..a4b5d459462 100644
--- a/.github/workflows/qa-tip-tracking.yml
+++ b/.github/workflows/qa-tip-tracking.yml
@@ -8,12 +8,17 @@ on:
# - cron: '0 20 * * 1-6' # Run every night at 08:00 PM UTC except Sunday
workflow_dispatch: # Run manually
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: false
+
jobs:
tip-tracking-test:
runs-on: [self-hosted, Erigon3]
timeout-minutes: 600
env:
ERIGON_REFERENCE_DATA_DIR: /opt/erigon-versions/reference-version/datadir
+ ERIGON_TESTBED_AREA: /opt/erigon-testbed
ERIGON_QA_PATH: /home/qarunner/erigon-qa
TRACKING_TIME_SECONDS: 14400 # 4 hours
TOTAL_TIME_SECONDS: 28800 # 8 hours
@@ -36,9 +41,10 @@ jobs:
run: |
python3 $ERIGON_QA_PATH/test_system/db-producer/pause_production.py || true
- - name: Clean Erigon Chaindata Directory
+ - name: Save Erigon Chaindata Directory
+ id: save_chaindata_step
run: |
- rm -rf $ERIGON_REFERENCE_DATA_DIR/chaindata
+ mv $ERIGON_REFERENCE_DATA_DIR/chaindata $ERIGON_TESTBED_AREA/chaindata-prev
- name: Run Erigon, wait sync and check ability to maintain sync
id: test_step
@@ -115,12 +121,16 @@ jobs:
name: metric-plots
path: ${{ github.workspace }}/metrics-${{ env.CHAIN }}-plots*
- - name: Clean Erigon Chaindata Directory
- if: always()
+ - name: Restore Erigon Chaindata Directory
+ if: ${{ always() }}
run: |
- rm -rf $ERIGON_REFERENCE_DATA_DIR/chaindata
+ if [ -d "$ERIGON_TESTBED_AREA/chaindata-prev" ]; then
+ rm -rf $ERIGON_REFERENCE_DATA_DIR/chaindata
+ mv $ERIGON_TESTBED_AREA/chaindata-prev $ERIGON_REFERENCE_DATA_DIR/chaindata
+ fi
- name: Resume the Erigon instance dedicated to db maintenance
+ if: ${{ always() }}
run: |
python3 $ERIGON_QA_PATH/test_system/db-producer/resume_production.py || true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a08021342f9..fa79a0072e8 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -7,7 +7,7 @@ env:
TEST_TRACKING_TIME_SECONDS: 7200 # 2 hours
TEST_TOTAL_TIME_SECONDS: 432000 # 5 days
TEST_CHAIN: "bsc"
- BUILDER_IMAGE: "golang:1.22-bookworm"
+ BUILDER_IMAGE: "golang:1.23-bookworm"
DOCKER_BASE_IMAGE: "debian:12.8-slim"
APP_REPO: "node-real/bsc-erigon"
PACKAGE: "github.com/erigontech/erigon"
diff --git a/.github/workflows/scripts/run_rpc_tests.sh b/.github/workflows/scripts/run_rpc_tests.sh
index 8dd21aca744..83bb5e32da1 100755
--- a/.github/workflows/scripts/run_rpc_tests.sh
+++ b/.github/workflows/scripts/run_rpc_tests.sh
@@ -4,6 +4,8 @@ set +e # Disable exit on error
# Array of disabled tests
disabled_tests=(
+ # Failing after the PR https://github.com/erigontech/erigon/pull/13903 - diff is only an error message in the result
+ eth_estimateGas/test_14.json
# Failing after the PR https://github.com/erigontech/erigon/pull/13617 that fixed this incompatibility
# issues https://hive.pectra-devnet-5.ethpandaops.io/suite.html?suiteid=1738266984-51ae1a2f376e5de5e9ba68f034f80e32.json&suitename=rpc-compat
net_listening/test_1.json
diff --git a/ChangeLog.md b/ChangeLog.md
index cea63061f1d..96f761ff221 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -1,7 +1,32 @@
ChangeLog
---------
-## v3.0.0-beta2 (in development)
+## v3.0.0 (in development)
+
+**Improvements:**
+
+- Schedule Pectra for Chiado by @yperbasis in https://github.com/erigontech/erigon/pull/13898
+- stagedsync: dbg option to log receipts on receipts hash mismatch (#13905) by @taratorio in https://github.com/erigontech/erigon/pull/13940
+- Introduces a new method for estimating transaction gas that targets the maximum gas a contract could use (#13913). Fixes eth_estimateGas for historical blocks (#13903) by @somnathb1 in https://github.com/erigontech/erigon/pull/13916
+
+**Bugfixes:**
+
+- rpcdaemon: Show state sync transactions in eth_getLogs (#13924) by @shohamc1 in https://github.com/erigontech/erigon/pull/13951
+- polygon/heimdall: fix snapshot store last entity to check in snapshots too (#13845) by @taratorio in https://github.com/erigontech/erigon/pull/13938
+- Implemented wait if heimdall is not synced to the chain (#13807) by @taratorio in https://github.com/erigontech/erigon/pull/13939
+
+**Bugfixes:**
+
+- polygon: `eth_getLogs` if search by filters - doesn't return state-sync (state-sync events are not indexed yet). Without filter can see state-sync events. In `eth_getReceipts` also can see. [Will](https://github.com/erigontech/erigon/issues/14003) release fixed files in E3.1
+- polygon: `eth_getLogs` state-sync events have incorrect `index` field. [Will](https://github.com/erigontech/erigon/issues/14003) release fixed files in E3.1
+
+### TODO
+
+- milestones:
+https://github.com/erigontech/erigon/milestone/34
+https://github.com/erigontech/erigon/milestone/30
+
+## v3.0.0-beta2
### Breaking changes
- Reverts Optimize gas by default in eth_createAccessList #8337
diff --git a/cl/aggregation/pool_impl.go b/cl/aggregation/pool_impl.go
index df55512b9fc..55ca8654332 100644
--- a/cl/aggregation/pool_impl.go
+++ b/cl/aggregation/pool_impl.go
@@ -19,12 +19,12 @@ package aggregation
import (
"context"
"errors"
- "fmt"
"sync"
"time"
"github.com/Giulio2002/bls"
"github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/cltypes/solid"
"github.com/erigontech/erigon/cl/phase1/core/state/lru"
@@ -44,7 +44,7 @@ type aggregationPoolImpl struct {
netConfig *clparams.NetworkConfig
ethClock eth_clock.EthereumClock
aggregatesLock sync.RWMutex
- aggregates map[common.Hash]*solid.Attestation
+ aggregates map[common.Hash]*solid.Attestation // don't need this anymore after electra upgrade
// aggregationInCommittee is a cache for aggregation in committee, which is used after electra upgrade
aggregatesInCommittee *lru.CacheWithTTL[keyAggrInCommittee, *solid.Attestation]
}
@@ -78,68 +78,48 @@ func (p *aggregationPoolImpl) AddAttestation(inAtt *solid.Attestation) error {
if err != nil {
return err
}
- p.aggregatesLock.Lock()
- defer p.aggregatesLock.Unlock()
- att, ok := p.aggregates[hashRoot]
- if !ok {
- p.aggregates[hashRoot] = inAtt.Copy()
- return nil
- }
-
- if utils.IsOverlappingSSZBitlist(att.AggregationBits.Bytes(), inAtt.AggregationBits.Bytes()) {
- // the on bit is already set, so ignore
- return ErrIsSuperset
- }
- // merge signature
- baseSig := att.Signature
- inSig := inAtt.Signature
- merged, err := blsAggregate([][]byte{baseSig[:], inSig[:]})
- if err != nil {
- return err
- }
- if len(merged) != 96 {
- return errors.New("merged signature is too long")
- }
- var mergedSig [96]byte
- copy(mergedSig[:], merged)
-
- epoch := p.ethClock.GetEpochAtSlot(att.Data.Slot)
+ epoch := p.ethClock.GetEpochAtSlot(inAtt.Data.Slot)
clversion := p.ethClock.StateVersionByEpoch(epoch)
if clversion.BeforeOrEqual(clparams.DenebVersion) {
- // merge aggregation bits
- mergedBits, err := att.AggregationBits.Union(inAtt.AggregationBits)
- if err != nil {
- return err
+ p.aggregatesLock.Lock()
+ defer p.aggregatesLock.Unlock()
+ att, ok := p.aggregates[hashRoot]
+ if !ok {
+ p.aggregates[hashRoot] = inAtt.Copy()
+ return nil
}
- // update attestation
- p.aggregates[hashRoot] = &solid.Attestation{
- AggregationBits: mergedBits,
- Data: att.Data,
- Signature: mergedSig,
+
+ if utils.IsOverlappingSSZBitlist(att.AggregationBits.Bytes(), inAtt.AggregationBits.Bytes()) {
+ // the on bit is already set, so ignore
+ return ErrIsSuperset
}
- } else {
- // Electra and after case
- aggrBitSize := p.beaconConfig.MaxCommitteesPerSlot * p.beaconConfig.MaxValidatorsPerCommittee
- mergedAggrBits, err := att.AggregationBits.Union(inAtt.AggregationBits)
+ // merge signature
+ baseSig := att.Signature
+ inSig := inAtt.Signature
+ merged, err := blsAggregate([][]byte{baseSig[:], inSig[:]})
if err != nil {
return err
}
- if mergedAggrBits.Cap() != int(aggrBitSize) {
- return fmt.Errorf("incorrect aggregation bits size: %d", mergedAggrBits.Cap())
+ if len(merged) != 96 {
+ return errors.New("merged signature is too long")
}
- mergedCommitteeBits, err := att.CommitteeBits.Union(inAtt.CommitteeBits)
+ var mergedSig [96]byte
+ copy(mergedSig[:], merged)
+
+ // merge aggregation bits
+ mergedBits, err := att.AggregationBits.Merge(inAtt.AggregationBits)
if err != nil {
return err
}
+ // update attestation
p.aggregates[hashRoot] = &solid.Attestation{
- AggregationBits: mergedAggrBits,
- CommitteeBits: mergedCommitteeBits,
+ AggregationBits: mergedBits,
Data: att.Data,
Signature: mergedSig,
}
-
- // aggregate by committee
+ } else {
+ // Electra and after case, aggregate by committee
p.aggregateByCommittee(inAtt)
}
return nil
@@ -166,9 +146,15 @@ func (p *aggregationPoolImpl) aggregateByCommittee(inAtt *solid.Attestation) err
return nil
}
- // merge aggregation bits and signature
- mergedAggrBits, err := att.AggregationBits.Union(inAtt.AggregationBits)
+ if utils.IsOverlappingSSZBitlist(att.AggregationBits.Bytes(), inAtt.AggregationBits.Bytes()) {
+ // the on bit is already set, so ignore
+ return ErrIsSuperset
+ }
+
+ // It's fine to directly merge aggregation bits here, because the attestation is from the same committee
+ mergedAggrBits, err := att.AggregationBits.Merge(inAtt.AggregationBits)
if err != nil {
+ log.Debug("failed to merge aggregation bits", "err", err)
return err
}
merged, err := blsAggregate([][]byte{att.Signature[:], inAtt.Signature[:]})
diff --git a/cl/aggregation/pool_test.go b/cl/aggregation/pool_test.go
index 8dd22dbbd13..6b294b8cd93 100644
--- a/cl/aggregation/pool_test.go
+++ b/cl/aggregation/pool_test.go
@@ -40,22 +40,22 @@ var (
},
}
att1_1 = &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000001, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00000011}, 2048),
Data: attData1,
Signature: [96]byte{'a', 'b', 'c', 'd', 'e', 'f'},
}
att1_2 = &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000001, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00000011}, 2048),
Data: attData1,
Signature: [96]byte{'d', 'e', 'f', 'g', 'h', 'i'},
}
att1_3 = &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000100, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00001100}, 2048),
Data: attData1,
Signature: [96]byte{'g', 'h', 'i', 'j', 'k', 'l'},
}
att1_4 = &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00100000, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b01100000}, 2048),
Data: attData1,
Signature: [96]byte{'m', 'n', 'o', 'p', 'q', 'r'},
}
@@ -72,7 +72,7 @@ var (
},
}
att2_1 = &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000001, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00000001}, 2048),
Data: attData2,
Signature: [96]byte{'t', 'e', 's', 't', 'i', 'n'},
}
@@ -107,21 +107,21 @@ func (t *PoolTestSuite) TearDownTest() {
func (t *PoolTestSuite) TestAddAttestationElectra() {
cBits1 := solid.NewBitVector(64)
- cBits1.SetBitAt(0, true)
+ cBits1.SetBitAt(10, true)
cBits2 := solid.NewBitVector(64)
cBits2.SetBitAt(10, true)
expectedCommitteeBits := solid.NewBitVector(64)
- expectedCommitteeBits.SetBitAt(0, true)
+ expectedCommitteeBits.SetBitAt(10, true)
expectedCommitteeBits.SetBitAt(10, true)
att1 := &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000001, 0, 0, 0}, 2048*64),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00000011}, 2048*64),
Data: attData1,
Signature: [96]byte{'a', 'b', 'c', 'd', 'e', 'f'},
CommitteeBits: cBits1,
}
att2 := &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00000000, 0b00001000, 0, 0}, 2048*64),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00001100}, 2048*64),
Data: attData1,
Signature: [96]byte{'d', 'e', 'f', 'g', 'h', 'i'},
CommitteeBits: cBits2,
@@ -141,11 +141,11 @@ func (t *PoolTestSuite) TestAddAttestationElectra() {
},
hashRoot: attData1Root,
mockFunc: func() {
- t.mockEthClock.EXPECT().GetEpochAtSlot(gomock.Any()).Return(uint64(1)).Times(1)
- t.mockEthClock.EXPECT().StateVersionByEpoch(gomock.Any()).Return(clparams.ElectraVersion).Times(1)
+ t.mockEthClock.EXPECT().GetEpochAtSlot(gomock.Any()).Return(uint64(1)).Times(2)
+ t.mockEthClock.EXPECT().StateVersionByEpoch(gomock.Any()).Return(clparams.ElectraVersion).Times(2)
},
expect: &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b0000001, 0b00001000, 0, 0}, 2048*64),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b00001101}, 2048*64),
Data: attData1,
Signature: mockAggrResult,
CommitteeBits: expectedCommitteeBits,
@@ -162,9 +162,7 @@ func (t *PoolTestSuite) TestAddAttestationElectra() {
for i := range tc.atts {
pool.AddAttestation(tc.atts[i])
}
- att := pool.GetAggregatationByRoot(tc.hashRoot)
- //h1, _ := tc.expect.HashSSZ()
- //h2, _ := att.HashSSZ()
+ att := pool.GetAggregatationByRootAndCommittee(tc.hashRoot, 10)
t.Equal(tc.expect, att, tc.name)
}
}
@@ -184,7 +182,11 @@ func (t *PoolTestSuite) TestAddAttestation() {
att2_1,
},
hashRoot: attData1Root,
- expect: att1_1,
+ mockFunc: func() {
+ t.mockEthClock.EXPECT().GetEpochAtSlot(gomock.Any()).Return(uint64(1)).AnyTimes()
+ t.mockEthClock.EXPECT().StateVersionByEpoch(gomock.Any()).Return(clparams.DenebVersion).AnyTimes()
+ },
+ expect: att1_1,
},
{
name: "att1_2 is a super set of att1_1. skip att1_1",
@@ -194,7 +196,11 @@ func (t *PoolTestSuite) TestAddAttestation() {
att2_1, // none of its business
},
hashRoot: attData1Root,
- expect: att1_2,
+ mockFunc: func() {
+ t.mockEthClock.EXPECT().GetEpochAtSlot(gomock.Any()).Return(uint64(1)).AnyTimes()
+ t.mockEthClock.EXPECT().StateVersionByEpoch(gomock.Any()).Return(clparams.DenebVersion).AnyTimes()
+ },
+ expect: att1_2,
},
{
name: "merge att1_2, att1_3, att1_4",
@@ -209,7 +215,7 @@ func (t *PoolTestSuite) TestAddAttestation() {
t.mockEthClock.EXPECT().StateVersionByEpoch(gomock.Any()).Return(clparams.DenebVersion).AnyTimes()
},
expect: &solid.Attestation{
- AggregationBits: solid.BitlistFromBytes([]byte{0b00100101, 0, 0, 0}, 2048),
+ AggregationBits: solid.BitlistFromBytes([]byte{0b01100101}, 2048),
Data: attData1,
Signature: mockAggrResult,
},
@@ -226,8 +232,6 @@ func (t *PoolTestSuite) TestAddAttestation() {
pool.AddAttestation(tc.atts[i])
}
att := pool.GetAggregatationByRoot(tc.hashRoot)
- //h1, _ := tc.expect.HashSSZ()
- //h2, _ := att.HashSSZ()
t.Equal(tc.expect, att, tc.name)
}
}
diff --git a/cl/beacon/handler/block_production.go b/cl/beacon/handler/block_production.go
index a1a43c00bd8..fbf778b0e0f 100644
--- a/cl/beacon/handler/block_production.go
+++ b/cl/beacon/handler/block_production.go
@@ -352,6 +352,7 @@ func (a *ApiHandler) GetEthV3ValidatorBlock(
"proposerIndex", block.ProposerIndex,
"slot", targetSlot,
"state_root", block.StateRoot,
+ "attestations", block.BeaconBody.Attestations.Len(),
"execution_value", block.GetExecutionValue().Uint64(),
"version", block.Version(),
"blinded", block.IsBlinded(),
@@ -624,11 +625,11 @@ func (a *ApiHandler) produceBeaconBody(
retryTime := 10 * time.Millisecond
secsDiff := (targetSlot - baseBlock.Slot) * a.beaconChainCfg.SecondsPerSlot
feeRecipient, _ := a.validatorParams.GetFeeRecipient(proposerIndex)
- var withdrawals []*types.Withdrawal
clWithdrawals, _ := state.ExpectedWithdrawals(
baseState,
targetSlot/a.beaconChainCfg.SlotsPerEpoch,
)
+ withdrawals := []*types.Withdrawal{}
for _, w := range clWithdrawals {
withdrawals = append(withdrawals, &types.Withdrawal{
Index: w.Index,
@@ -1067,7 +1068,6 @@ func (a *ApiHandler) parseRequestBeaconBlock(
if err := json.NewDecoder(r.Body).Decode(block); err != nil {
return nil, err
}
- block.SignedBlock.Block.SetVersion(version)
return block, nil
case "application/octet-stream":
octect, err := io.ReadAll(r.Body)
@@ -1077,7 +1077,6 @@ func (a *ApiHandler) parseRequestBeaconBlock(
if err := block.DecodeSSZ(octect, int(version)); err != nil {
return nil, err
}
- block.SignedBlock.Block.SetVersion(version)
return block, nil
}
return nil, errors.New("invalid content type")
@@ -1216,12 +1215,7 @@ type attestationCandidate struct {
func (a *ApiHandler) findBestAttestationsForBlockProduction(
s abstract.BeaconState,
) *solid.ListSSZ[*solid.Attestation] {
- currentVersion := s.Version()
- aggBitsSize := int(a.beaconChainCfg.MaxValidatorsPerCommittee)
- if currentVersion.AfterOrEqual(clparams.ElectraVersion) {
- aggBitsSize = int(a.beaconChainCfg.MaxValidatorsPerCommittee *
- a.beaconChainCfg.MaxCommitteesPerSlot)
- }
+ stateVersion := s.Version()
// Group attestations by their data root
hashToAtts := make(map[libcommon.Hash][]*solid.Attestation)
for _, candidate := range a.operationsPool.AttestationsPool.Raw() {
@@ -1230,7 +1224,7 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction(
}
attVersion := a.beaconChainCfg.GetCurrentStateVersion(candidate.Data.Slot / a.beaconChainCfg.SlotsPerEpoch)
- if currentVersion.AfterOrEqual(clparams.ElectraVersion) &&
+ if stateVersion.AfterOrEqual(clparams.ElectraVersion) &&
attVersion.Before(clparams.ElectraVersion) {
// Because the on chain Attestation container changes, attestations from the prior fork can’t be included
// into post-electra blocks. Therefore the first block after the fork may have zero attestations.
@@ -1252,7 +1246,8 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction(
candidateAggregationBits := candidate.AggregationBits.Bytes()
for _, curAtt := range hashToAtts[dataRoot] {
currAggregationBitsBytes := curAtt.AggregationBits.Bytes()
- if !utils.IsOverlappingSSZBitlist(currAggregationBitsBytes, candidateAggregationBits) {
+ if stateVersion <= clparams.DenebVersion &&
+ !utils.IsOverlappingSSZBitlist(currAggregationBitsBytes, candidateAggregationBits) {
// merge signatures
candidateSig := candidate.Signature
curSig := curAtt.Signature
@@ -1262,24 +1257,38 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction(
continue
}
// merge aggregation bits
- mergedAggBits := solid.NewBitList(0, aggBitsSize)
- for i := 0; i < len(currAggregationBitsBytes); i++ {
- mergedAggBits.Append(currAggregationBitsBytes[i] | candidateAggregationBits[i])
+ mergedAggBits, err := curAtt.AggregationBits.Merge(candidate.AggregationBits)
+ if err != nil {
+ log.Warn("[Block Production] Cannot merge aggregation bits", "err", err)
+ continue
}
var buf [96]byte
copy(buf[:], mergeSig)
curAtt.Signature = buf
curAtt.AggregationBits = mergedAggBits
- if attVersion.AfterOrEqual(clparams.ElectraVersion) {
- // merge committee_bits for electra
- mergedCommitteeBits, err := curAtt.CommitteeBits.Union(candidate.CommitteeBits)
- if err != nil {
- log.Warn("[Block Production] Cannot merge committee bits", "err", err)
- continue
- }
- curAtt.CommitteeBits = mergedCommitteeBits
+ mergeAny = true
+ }
+ if stateVersion >= clparams.ElectraVersion {
+ // merge in electra way
+ mergedAggrBits, ok := a.tryMergeAggregationBits(s, curAtt, candidate)
+ if !ok {
+ continue
}
-
+ mergedCommitteeBits, err := curAtt.CommitteeBits.Union(candidate.CommitteeBits)
+ if err != nil {
+ continue
+ }
+ // merge signatures
+ candidateSig := candidate.Signature
+ curSig := curAtt.Signature
+ mergeSig, err := bls.AggregateSignatures([][]byte{candidateSig[:], curSig[:]})
+ if err != nil {
+ log.Warn("[Block Production] Cannot merge signatures", "err", err)
+ continue
+ }
+ curAtt.AggregationBits = mergedAggrBits
+ curAtt.CommitteeBits = mergedCommitteeBits
+ copy(curAtt.Signature[:], mergeSig)
mergeAny = true
}
}
@@ -1327,6 +1336,74 @@ func (a *ApiHandler) findBestAttestationsForBlockProduction(
return ret
}
+func (a *ApiHandler) tryMergeAggregationBits(state abstract.BeaconState, att1, att2 *solid.Attestation) (*solid.BitList, bool) {
+ // after electra fork, aggregation_bits contains only the attester bit map of those committee appearing in committee_bits
+ // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/validator.md#attestations
+ slot := att1.Data.Slot
+ committees1 := att1.CommitteeBits.GetOnIndices()
+ committees2 := att2.CommitteeBits.GetOnIndices()
+ bitSlice := solid.NewBitSlice()
+ index1, index2 := 0, 0
+ committeeOffset1, committeeOffset2 := 0, 0
+
+ // appendBits is a helper func to append the aggregation bits of the committee to the bitSlice
+ appendBits := func(bitSlice *solid.BitSlice, committeeIndex int, att *solid.Attestation, offset int) (*solid.BitSlice, int) {
+ members, err := state.GetBeaconCommitee(slot, uint64(committeeIndex))
+ if err != nil {
+ log.Warn("[Block Production] Cannot get committee members", "err", err)
+ return nil, 0
+ }
+ for i := range members {
+ bitSlice.AppendBit(att.AggregationBits.GetBitAt(offset + i))
+ }
+ return bitSlice, offset + len(members)
+ }
+
+ // similar to merge sort
+ for index1 < len(committees1) || index2 < len(committees2) {
+ if index1 < len(committees1) && index2 < len(committees2) {
+ if committees1[index1] < committees2[index2] {
+ bitSlice, committeeOffset1 = appendBits(bitSlice, committees1[index1], att1, committeeOffset1)
+ index1++
+ } else if committees1[index1] > committees2[index2] {
+ bitSlice, committeeOffset2 = appendBits(bitSlice, committees2[index2], att2, committeeOffset2)
+ index2++
+ } else {
+ // check overlapping when the committee is the same
+ members, err := state.GetBeaconCommitee(slot, uint64(committees1[index1]))
+ if err != nil {
+ log.Warn("[Block Production] Cannot get committee members", "err", err)
+ return nil, false
+ }
+ bits1 := att1.AggregationBits
+ bits2 := att2.AggregationBits
+ for i := range members {
+ if bits1.GetBitAt(committeeOffset1+i) && bits2.GetBitAt(committeeOffset2+i) {
+ // overlapping
+ return nil, false
+ } else {
+ bitSlice.AppendBit(bits1.GetBitAt(committeeOffset1+i) || bits2.GetBitAt(committeeOffset2+i))
+ }
+ }
+ committeeOffset1 += len(members)
+ committeeOffset2 += len(members)
+ index1++
+ index2++
+ }
+ } else if index1 < len(committees1) {
+ bitSlice, committeeOffset1 = appendBits(bitSlice, committees1[index1], att1, committeeOffset1)
+ index1++
+ } else {
+ bitSlice, committeeOffset2 = appendBits(bitSlice, committees2[index2], att2, committeeOffset2)
+ index2++
+ }
+ }
+
+ bitSlice.AppendBit(true) // mark the end of the bitlist
+ mergedAggregationBits := solid.BitlistFromBytes(bitSlice.Bytes(), int(a.beaconChainCfg.MaxCommitteesPerSlot)*int(a.beaconChainCfg.MaxValidatorsPerCommittee))
+ return mergedAggregationBits, true
+}
+
// computeAttestationReward computes the reward for a specific attestation.
func computeAttestationReward(
s abstract.BeaconState,
diff --git a/cl/beacon/handler/committees.go b/cl/beacon/handler/committees.go
index 2a2b324b546..3c27eb16ac3 100644
--- a/cl/beacon/handler/committees.go
+++ b/cl/beacon/handler/committees.go
@@ -91,8 +91,8 @@ func (a *ApiHandler) getCommittees(w http.ResponseWriter, r *http.Request) (*bea
if a.forkchoiceStore.LowestAvailableSlot() <= slot {
// non-finality case
if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error {
- if epoch > state.Epoch(s)+1 {
- return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch))
+ if epoch > state.Epoch(s)+maxEpochsLookaheadForDuties {
+ return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("sync committees duties: epoch %d is too far in the future", epoch))
}
// get active validator indicies
committeeCount := s.CommitteeCount(epoch)
diff --git a/cl/beacon/handler/duties_attester.go b/cl/beacon/handler/duties_attester.go
index 84a47232a84..a68377640e6 100644
--- a/cl/beacon/handler/duties_attester.go
+++ b/cl/beacon/handler/duties_attester.go
@@ -28,6 +28,8 @@ import (
"github.com/erigontech/erigon/cl/phase1/core/state"
)
+const maxEpochsLookaheadForDuties = 32
+
type attesterDutyResponse struct {
Pubkey libcommon.Bytes48 `json:"pubkey"`
ValidatorIndex uint64 `json:"validator_index,string"`
@@ -52,7 +54,7 @@ func (a *ApiHandler) getDependentRoot(epoch uint64, attester bool) (libcommon.Ha
dependentRoot = a.syncedData.HeadRoot()
return nil
}
- maxIterations := 2048
+ maxIterations := int(maxEpochsLookaheadForDuties * 2 * a.beaconChainCfg.SlotsPerEpoch)
for i := 0; i < maxIterations; i++ {
if dependentRootSlot > epoch*a.beaconChainCfg.SlotsPerEpoch {
return nil
@@ -107,8 +109,8 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) (
if a.forkchoiceStore.LowestAvailableSlot() <= epoch*a.beaconChainCfg.SlotsPerEpoch {
// non-finality case
if err := a.syncedData.ViewHeadState(func(s *state.CachingBeaconState) error {
- if epoch > state.Epoch(s)+3 {
- return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch))
+ if epoch > state.Epoch(s)+maxEpochsLookaheadForDuties {
+ return beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation duties: epoch %d is too far in the future", epoch))
}
// get active validator indicies
@@ -160,7 +162,7 @@ func (a *ApiHandler) getAttesterDuties(w http.ResponseWriter, r *http.Request) (
return nil, err
}
if (epoch)*a.beaconChainCfg.SlotsPerEpoch >= stageStateProgress {
- return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("epoch %d is too far in the future", epoch))
+ return nil, beaconhttp.NewEndpointError(http.StatusBadRequest, fmt.Errorf("attestation duties: epoch %d is not yet reconstructed", epoch))
}
snRoTx := a.caplinStateSnapshots.View()
diff --git a/cl/beacon/handler/handler.go b/cl/beacon/handler/handler.go
index 34164a9c857..01f5d048b51 100644
--- a/cl/beacon/handler/handler.go
+++ b/cl/beacon/handler/handler.go
@@ -36,7 +36,6 @@ import (
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/cltypes"
"github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
"github.com/erigontech/erigon/cl/persistence/state/historical_states_reader"
"github.com/erigontech/erigon/cl/phase1/core/state/lru"
@@ -110,7 +109,6 @@ type ApiHandler struct {
blsToExecutionChangeService services.BLSToExecutionChangeService
proposerSlashingService services.ProposerSlashingService
builderClient builder.BuilderClient
- validatorsMonitor monitor.ValidatorMonitor
enableMemoizedHeadState bool
}
@@ -145,7 +143,6 @@ func NewApiHandler(
blsToExecutionChangeService services.BLSToExecutionChangeService,
proposerSlashingService services.ProposerSlashingService,
builderClient builder.BuilderClient,
- validatorMonitor monitor.ValidatorMonitor,
caplinStateSnapshots *snapshotsync.CaplinStateSnapshots,
enableMemoizedHeadState bool,
) *ApiHandler {
@@ -195,7 +192,6 @@ func NewApiHandler(
blsToExecutionChangeService: blsToExecutionChangeService,
proposerSlashingService: proposerSlashingService,
builderClient: builderClient,
- validatorsMonitor: validatorMonitor,
enableMemoizedHeadState: enableMemoizedHeadState,
}
}
diff --git a/cl/beacon/handler/utils_test.go b/cl/beacon/handler/utils_test.go
index 6198ab3cd61..1399d04fc66 100644
--- a/cl/beacon/handler/utils_test.go
+++ b/cl/beacon/handler/utils_test.go
@@ -39,7 +39,6 @@ import (
"github.com/erigontech/erigon/cl/clparams/initial_state"
"github.com/erigontech/erigon/cl/cltypes"
"github.com/erigontech/erigon/cl/cltypes/solid"
- mockMonitor "github.com/erigontech/erigon/cl/monitor/mock_services"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
state_accessors "github.com/erigontech/erigon/cl/persistence/state"
"github.com/erigontech/erigon/cl/persistence/state/historical_states_reader"
@@ -117,7 +116,6 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge
voluntaryExitService := mock_services.NewMockVoluntaryExitService(ctrl)
blsToExecutionChangeService := mock_services.NewMockBLSToExecutionChangeService(ctrl)
proposerSlashingService := mock_services.NewMockProposerSlashingService(ctrl)
- mockValidatorMonitor := mockMonitor.NewMockValidatorMonitor(ctrl)
// ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessage) error
syncCommitteeMessagesService.EXPECT().ProcessMessage(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, subnetID *uint64, msg *services.SyncCommitteeMessageForGossip) error {
@@ -143,7 +141,6 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge
opPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(msg), msg)
return nil
}).AnyTimes()
- mockValidatorMonitor.EXPECT().ObserveValidator(gomock.Any()).AnyTimes()
vp = validator_params.NewValidatorParams()
h = NewApiHandler(
@@ -176,7 +173,6 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge
blsToExecutionChangeService,
proposerSlashingService,
nil,
- mockValidatorMonitor,
nil,
false,
) // TODO: add tests
diff --git a/cl/beacon/handler/validator_registration.go b/cl/beacon/handler/validator_registration.go
index 163439f0fec..28bd190e624 100644
--- a/cl/beacon/handler/validator_registration.go
+++ b/cl/beacon/handler/validator_registration.go
@@ -38,7 +38,6 @@ func (a *ApiHandler) PostEthV1ValidatorPrepareBeaconProposal(w http.ResponseWrit
for _, v := range req {
a.logger.Trace("[Caplin] Registered new proposer", "index", v.ValidatorIndex, "fee_recipient", v.FeeRecipient.String())
a.validatorParams.SetFeeRecipient(v.ValidatorIndex, v.FeeRecipient)
- a.validatorsMonitor.ObserveValidator(v.ValidatorIndex)
}
w.WriteHeader(http.StatusOK)
}
diff --git a/cl/beacon/handler/validator_test.go b/cl/beacon/handler/validator_test.go
index 0a8bcb31f54..638c4d51873 100644
--- a/cl/beacon/handler/validator_test.go
+++ b/cl/beacon/handler/validator_test.go
@@ -75,7 +75,6 @@ func (t *validatorTestSuite) SetupTest() {
nil,
nil,
nil,
- nil,
false,
)
t.gomockCtrl = gomockCtrl
diff --git a/cl/clparams/config.go b/cl/clparams/config.go
index 715e3c152ef..ef4ec905347 100644
--- a/cl/clparams/config.go
+++ b/cl/clparams/config.go
@@ -197,7 +197,6 @@ type NetworkConfig struct {
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT" json:"ATTESTATION_SUBNET_COUNT,string"` // The number of attestation subnets used in the gossipsub protocol.
AttestationPropagationSlotRange uint64 `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE" json:"ATTESTATION_PROPAGATION_SLOT_RANGE,string"` // The maximum number of slots during which an attestation can be propagated.
AttestationSubnetPrefixBits uint64 `yaml:"ATTESTATION_SUBNET_PREFIX_BITS" json:"ATTESTATION_SUBNET_PREFIX_BITS,string"` // The number of bits in the subnet prefix.
- BlobSidecarSubnetCount uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT" json:"BLOB_SIDECAR_SUBNET_COUNT,string"` // The number of blob sidecar subnets used in the gossipsub protocol.
MessageDomainInvalidSnappy ConfigHex4Bytes `yaml:"-" json:"MESSAGE_DOMAIN_INVALID_SNAPPY"` // 4-byte domain for gossip message-id isolation of invalid snappy messages
MessageDomainValidSnappy ConfigHex4Bytes `yaml:"-" json:"MESSAGE_DOMAIN_VALID_SNAPPY"` // 4-byte domain for gossip message-id isolation of valid snappy messages
MaximumGossipClockDisparity ConfigDurationMSec `yaml:"-" json:"MAXIMUM_GOSSIP_CLOCK_DISPARITY_MILLIS"` // The maximum milliseconds of clock disparity assumed between honest nodes.
@@ -210,8 +209,8 @@ type NetworkConfig struct {
SyncCommsSubnetKey string `yaml:"-" json:"-"` // SyncCommsSubnetKey is the ENR key of the sync committee subnet bitfield in the enr.
MinimumPeersInSubnetSearch uint64 `yaml:"-" json:"-"` // PeersInSubnetSearch is the required amount of peers that we need to be able to lookup in a subnet search.
- BootNodes []string
- StaticPeers []string
+ BootNodes []string `yaml:"-" json:"-"`
+ StaticPeers []string `yaml:"-" json:"-"`
}
var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig{
@@ -222,7 +221,6 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
AttestationSubnetCount: 64,
AttestationPropagationSlotRange: 32,
AttestationSubnetPrefixBits: 6,
- BlobSidecarSubnetCount: 6,
TtfbTimeout: ConfigDurationSec(ReqTimeout),
RespTimeout: ConfigDurationSec(RespTimeout),
MaximumGossipClockDisparity: ConfigDurationMSec(500 * time.Millisecond),
@@ -242,7 +240,6 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
AttestationSubnetCount: 64,
AttestationPropagationSlotRange: 32,
AttestationSubnetPrefixBits: 6,
- BlobSidecarSubnetCount: 6,
TtfbTimeout: ConfigDurationSec(ReqTimeout),
RespTimeout: ConfigDurationSec(RespTimeout),
MaximumGossipClockDisparity: ConfigDurationMSec(500 * time.Millisecond),
@@ -262,7 +259,6 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
AttestationSubnetCount: 64,
AttestationPropagationSlotRange: 32,
AttestationSubnetPrefixBits: 6,
- BlobSidecarSubnetCount: 6,
TtfbTimeout: ConfigDurationSec(ReqTimeout),
RespTimeout: ConfigDurationSec(RespTimeout),
MaximumGossipClockDisparity: ConfigDurationMSec(500 * time.Millisecond),
@@ -282,7 +278,6 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
AttestationSubnetCount: 64,
AttestationPropagationSlotRange: 32,
AttestationSubnetPrefixBits: 6,
- BlobSidecarSubnetCount: 6,
TtfbTimeout: ConfigDurationSec(ReqTimeout),
RespTimeout: ConfigDurationSec(RespTimeout),
MaximumGossipClockDisparity: ConfigDurationMSec(500 * time.Millisecond),
@@ -302,7 +297,6 @@ var NetworkConfigs map[NetworkType]NetworkConfig = map[NetworkType]NetworkConfig
AttestationSubnetCount: 64,
AttestationPropagationSlotRange: 32,
AttestationSubnetPrefixBits: 6,
- BlobSidecarSubnetCount: 6,
TtfbTimeout: ConfigDurationSec(ReqTimeout),
RespTimeout: ConfigDurationSec(RespTimeout),
MaximumGossipClockDisparity: ConfigDurationMSec(500 * time.Millisecond),
@@ -533,6 +527,8 @@ type BeaconChainConfig struct {
DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH" spec:"true" json:"DENEB_FORK_EPOCH,string"` // DenebForkEpoch is used to represent the assigned fork epoch for Deneb.
ElectraForkVersion ConfigForkVersion `yaml:"ELECTRA_FORK_VERSION" spec:"true" json:"ELECTRA_FORK_VERSION"` // ElectraForkVersion is used to represent the fork version for Electra.
ElectraForkEpoch uint64 `yaml:"ELECTRA_FORK_EPOCH" spec:"true" json:"ELECTRA_FORK_EPOCH,string"` // ElectraForkEpoch is used to represent the assigned fork epoch for Electra.
+ FuluForkVersion ConfigForkVersion `yaml:"FULU_FORK_VERSION" spec:"true" json:"FULU_FORK_VERSION"` // FuluForkVersion is used to represent the fork version for Fulu.
+ FuluForkEpoch uint64 `yaml:"FULU_FORK_EPOCH" spec:"true" json:"FULU_FORK_EPOCH,string"` // FuluForkEpoch is used to represent the assigned fork epoch for Fulu.
ForkVersionSchedule map[libcommon.Bytes4]VersionScheduleEntry `json:"-"` // Schedule of fork epochs by version.
@@ -583,8 +579,10 @@ type BeaconChainConfig struct {
MaxBuilderConsecutiveMissedSlots uint64 `json:"-"` // MaxBuilderConsecutiveMissedSlots defines the number of consecutive skip slot to fallback from using relay/builder to local execution engine for block construction.
MaxBuilderEpochMissedSlots uint64 `json:"-"` // MaxBuilderEpochMissedSlots is defines the number of total skip slot (per epoch rolling windows) to fallback from using relay/builder to local execution engine for block construction.
- MaxBlobGasPerBlock uint64 `yaml:"MAX_BLOB_GAS_PER_BLOCK" json:"MAX_BLOB_GAS_PER_BLOCK,string"` // MaxBlobGasPerBlock defines the maximum gas limit for blob sidecar per block.
- MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK,string"` // MaxBlobsPerBlock defines the maximum number of blobs per block.
+ MaxBlobGasPerBlock uint64 `yaml:"MAX_BLOB_GAS_PER_BLOCK" json:"MAX_BLOB_GAS_PER_BLOCK,string"` // MaxBlobGasPerBlock defines the maximum gas limit for blob sidecar per block.
+ MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" json:"MAX_BLOBS_PER_BLOCK,string"` // MaxBlobsPerBlock defines the maximum number of blobs per block.
+ BlobSidecarSubnetCount uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT" json:"BLOB_SIDECAR_SUBNET_COUNT,string"` // BlobSidecarSubnetCount defines the number of sidecars in the blob subnet.
+
// Whisk
WhiskEpochsPerShufflingPhase uint64 `yaml:"WHISK_EPOCHS_PER_SHUFFLING_PHASE" spec:"true" json:"WHISK_EPOCHS_PER_SHUFFLING_PHASE,string"` // WhiskEpochsPerShufflingPhase defines the number of epochs per shuffling phase.
WhiskProposerSelectionGap uint64 `yaml:"WHISK_PROPOSER_SELECTION_GAP" spec:"true" json:"WHISK_PROPOSER_SELECTION_GAP,string"` // WhiskProposerSelectionGap defines the proposer selection gap.
@@ -612,6 +610,8 @@ type BeaconChainConfig struct {
PendingPartialWithdrawalsLimit uint64 `yaml:"PENDING_PARTIAL_WITHDRAWALS_LIMIT" spec:"true" json:"PENDING_PARTIAL_WITHDRAWALS_LIMIT,string"` // PendingPartialWithdrawalsLimit defines the maximum number of pending partial withdrawals.
PendingConsolidationsLimit uint64 `yaml:"PENDING_CONSOLIDATIONS_LIMIT" spec:"true" json:"PENDING_CONSOLIDATIONS_LIMIT,string"` // PendingConsolidationsLimit defines the maximum number of pending consolidations.
MaxBlobsPerBlockElectra uint64 `yaml:"MAX_BLOBS_PER_BLOCK_ELECTRA" spec:"true" json:"MAX_BLOBS_PER_BLOCK_ELECTRA,string"` // MaxBlobsPerBlockElectra defines the maximum number of blobs per block for Electra.
+ BlobSidecarSubnetCountElectra uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT_ELECTRA" spec:"true" json:"BLOB_SIDECAR_SUBNET_COUNT_ELECTRA,string"` // BlobSidecarSubnetCountElectra defines the number of sidecars in the blob subnet for Electra.
+
// Constants for the Electra fork.
UnsetDepositRequestsStartIndex uint64 `yaml:"UNSET_DEPOSIT_REQUESTS_START_INDEX" spec:"true" json:"UNSET_DEPOSIT_REQUESTS_START_INDEX,string"` // UnsetDepositRequestsStartIndex defines the start index for unset deposit requests.
FullExitRequestAmount uint64 `yaml:"FULL_EXIT_REQUEST_AMOUNT" spec:"true" json:"FULL_EXIT_REQUEST_AMOUNT,string"` // FullExitRequestAmount defines the amount for a full exit request.
@@ -823,6 +823,8 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{
DenebForkEpoch: 269568,
ElectraForkVersion: 0x05000000,
ElectraForkEpoch: math.MaxUint64,
+ FuluForkVersion: 0x06000000,
+ FuluForkEpoch: math.MaxUint64,
// New values introduced in Altair hard fork 1.
// Participation flag indices.
@@ -870,8 +872,9 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{
MaxBuilderConsecutiveMissedSlots: 3,
MaxBuilderEpochMissedSlots: 8,
- MaxBlobGasPerBlock: 786432,
- MaxBlobsPerBlock: 6,
+ MaxBlobGasPerBlock: 786432,
+ MaxBlobsPerBlock: 6,
+ BlobSidecarSubnetCount: 6,
WhiskEpochsPerShufflingPhase: 256,
WhiskProposerSelectionGap: 2,
@@ -898,6 +901,7 @@ var MainnetBeaconConfig BeaconChainConfig = BeaconChainConfig{
PendingPartialWithdrawalsLimit: 1 << 27,
PendingConsolidationsLimit: 1 << 18,
MaxBlobsPerBlockElectra: 9,
+ BlobSidecarSubnetCountElectra: 9,
// Electra constants.
UnsetDepositRequestsStartIndex: ^uint64(0), // 2**64 - 1
FullExitRequestAmount: 0,
@@ -1021,20 +1025,23 @@ func gnosisConfig() BeaconChainConfig {
cfg.BellatrixForkVersion = 0x02000064
cfg.CapellaForkEpoch = 648704
cfg.CapellaForkVersion = 0x03000064
+ cfg.DenebForkEpoch = 889856
+ cfg.DenebForkVersion = 0x04000064
cfg.TerminalTotalDifficulty = "8626000000000000000000058750000000000000000000"
cfg.DepositContractAddress = "0x0B98057eA310F4d31F2a452B414647007d1645d9"
cfg.BaseRewardFactor = 25
cfg.SlotsPerEpoch = 16
cfg.EpochsPerSyncCommitteePeriod = 512
- cfg.DenebForkEpoch = 889856
- cfg.DenebForkVersion = 0x04000064
cfg.InactivityScoreRecoveryRate = 16
cfg.InactivityScoreBias = 4
cfg.MaxWithdrawalsPerPayload = 8
cfg.MaxValidatorsPerWithdrawalsSweep = 8192
cfg.MaxBlobsPerBlock = 2
+ cfg.MaxBlobsPerBlockElectra = 2
+ cfg.BlobSidecarSubnetCountElectra = 2
cfg.MinEpochsForBlobSidecarsRequests = 16384
cfg.MaxPerEpochActivationChurnLimit = 2
+ cfg.MaxPerEpochActivationExitChurnLimit = 64_000_000_000
cfg.InitializeForkSchedule()
return cfg
}
@@ -1061,6 +1068,8 @@ func chiadoConfig() BeaconChainConfig {
cfg.CapellaForkVersion = 0x0300006f
cfg.DenebForkEpoch = 516608
cfg.DenebForkVersion = 0x0400006f
+ cfg.ElectraForkEpoch = 948224
+ cfg.ElectraForkVersion = 0x0500006f
cfg.TerminalTotalDifficulty = "231707791542740786049188744689299064356246512"
cfg.DepositContractAddress = "0xb97036A26259B7147018913bD58a774cf91acf25"
cfg.BaseRewardFactor = 25
@@ -1069,8 +1078,11 @@ func chiadoConfig() BeaconChainConfig {
cfg.MaxWithdrawalsPerPayload = 8
cfg.MaxValidatorsPerWithdrawalsSweep = 8192
cfg.MaxBlobsPerBlock = 2
+ cfg.MaxBlobsPerBlockElectra = 2
+ cfg.BlobSidecarSubnetCountElectra = 2
cfg.MinEpochsForBlobSidecarsRequests = 16384
cfg.MaxPerEpochActivationChurnLimit = 2
+ cfg.MaxPerEpochActivationExitChurnLimit = 64_000_000_000
cfg.InitializeForkSchedule()
return cfg
}
@@ -1183,6 +1195,16 @@ func (b *BeaconChainConfig) MaxBlobsPerBlockByVersion(v StateVersion) uint64 {
panic("invalid version")
}
+func (b *BeaconChainConfig) BlobSidecarSubnetCountByVersion(v StateVersion) uint64 {
+ switch v {
+ case Phase0Version, AltairVersion, BellatrixVersion, CapellaVersion, DenebVersion:
+ return b.BlobSidecarSubnetCount
+ case ElectraVersion:
+ return b.BlobSidecarSubnetCountElectra
+ }
+ panic("invalid version")
+}
+
func (b *BeaconChainConfig) GetForkVersionByVersion(v StateVersion) uint32 {
switch v {
case Phase0Version:
diff --git a/cl/cltypes/beacon_block.go b/cl/cltypes/beacon_block.go
index 5d4e930ef79..86ed448fb5d 100644
--- a/cl/cltypes/beacon_block.go
+++ b/cl/cltypes/beacon_block.go
@@ -162,10 +162,6 @@ func (b *BeaconBlock) Version() clparams.StateVersion {
return b.Body.Version
}
-func (b *BeaconBlock) SetVersion(version clparams.StateVersion) {
- b.Body.SetVersion(version)
-}
-
func (b *BeaconBlock) EncodeSSZ(buf []byte) (dst []byte, err error) {
return ssz2.MarshalSSZ(buf, b.Slot, b.ProposerIndex, b.ParentRoot[:], b.StateRoot[:], b.Body)
}
@@ -247,8 +243,8 @@ func NewBeaconBody(beaconCfg *clparams.BeaconChainConfig, version clparams.State
)
if version.AfterOrEqual(clparams.ElectraVersion) {
// upgrade to electra
- maxAttSlashing = MaxAttesterSlashingsElectra
- maxAttestation = MaxAttestationsElectra
+ maxAttSlashing = int(beaconCfg.MaxAttesterSlashingsElectra)
+ maxAttestation = int(beaconCfg.MaxAttestationsElectra)
executionRequests = NewExecutionRequests(beaconCfg)
}
@@ -267,15 +263,6 @@ func NewBeaconBody(beaconCfg *clparams.BeaconChainConfig, version clparams.State
Version: version,
}
}
-func (b *BeaconBody) SetVersion(version clparams.StateVersion) {
- b.Version = version
- b.ExecutionPayload.SetVersion(version)
- if version.AfterOrEqual(clparams.ElectraVersion) {
- b.AttesterSlashings = solid.NewDynamicListSSZ[*AttesterSlashing](MaxAttesterSlashingsElectra)
- b.Attestations = solid.NewDynamicListSSZ[*solid.Attestation](MaxAttestationsElectra)
- b.ExecutionRequests = NewExecutionRequests(b.beaconCfg)
- }
-}
func (b *BeaconBody) EncodeSSZ(dst []byte) ([]byte, error) {
return ssz2.MarshalSSZ(dst, b.getSchema(false)...)
@@ -616,7 +603,7 @@ func (b *DenebBeaconBlock) GetParentRoot() libcommon.Hash {
}
func (b *DenebBeaconBlock) GetBody() GenericBeaconBody {
- return b.Block.GetBody()
+ return b.Block.Body
}
type DenebSignedBeaconBlock struct {
diff --git a/cl/cltypes/block_production.go b/cl/cltypes/block_production.go
index 197b5ef9150..d611164be21 100644
--- a/cl/cltypes/block_production.go
+++ b/cl/cltypes/block_production.go
@@ -69,7 +69,6 @@ func (b *BlindOrExecutionBeaconBlock) ToExecution() *DenebBeaconBlock {
}
DenebBeaconBlock := NewDenebBeaconBlock(b.Cfg, b.Version())
DenebBeaconBlock.Block = beaconBlock
- DenebBeaconBlock.Block.SetVersion(b.Version())
for _, kzgProof := range b.KzgProofs {
proof := KZGProof{}
copy(proof[:], kzgProof[:])
diff --git a/cl/cltypes/bls_to_execution_test.go b/cl/cltypes/bls_to_execution_test.go
index 9ba554eb1d3..bd55ff7e171 100644
--- a/cl/cltypes/bls_to_execution_test.go
+++ b/cl/cltypes/bls_to_execution_test.go
@@ -32,7 +32,7 @@ var (
)
func TestBLSToEL(t *testing.T) {
- decompressed, _ := utils.DecompressSnappy(serializedBlsToELSnappy)
+ decompressed, _ := utils.DecompressSnappy(serializedBlsToELSnappy, false)
obj := &cltypes.SignedBLSToExecutionChange{}
require.NoError(t, obj.DecodeSSZ(decompressed, 1))
root, err := obj.HashSSZ()
diff --git a/cl/cltypes/eth1_block.go b/cl/cltypes/eth1_block.go
index 5b4997684c1..111405cf4fd 100644
--- a/cl/cltypes/eth1_block.go
+++ b/cl/cltypes/eth1_block.go
@@ -110,10 +110,6 @@ func NewEth1BlockFromHeaderAndBody(header *types.Header, body *types.RawBody, be
return block
}
-func (b *Eth1Block) SetVersion(version clparams.StateVersion) {
- b.version = version
-}
-
func (*Eth1Block) Static() bool {
return false
}
diff --git a/cl/cltypes/historical_summary_test.go b/cl/cltypes/historical_summary_test.go
index 7a0b571b106..bf74c91fdb2 100644
--- a/cl/cltypes/historical_summary_test.go
+++ b/cl/cltypes/historical_summary_test.go
@@ -32,7 +32,7 @@ var (
)
func TestHistoricalSummary(t *testing.T) {
- decompressed, _ := utils.DecompressSnappy(serializedHistoricalSummarySnappy)
+ decompressed, _ := utils.DecompressSnappy(serializedHistoricalSummarySnappy, false)
obj := &cltypes.HistoricalSummary{}
require.NoError(t, obj.DecodeSSZ(decompressed, 0))
root, err := obj.HashSSZ()
diff --git a/cl/cltypes/solid/attestation.go b/cl/cltypes/solid/attestation.go
index 2205143b91a..aa366182f20 100644
--- a/cl/cltypes/solid/attestation.go
+++ b/cl/cltypes/solid/attestation.go
@@ -173,8 +173,8 @@ func (a *Attestation) UnmarshalJSON(data []byte) error {
// data: AttestationData
// signature: BLSSignature
type SingleAttestation struct {
- CommitteeIndex uint64 `json:"committee_index"`
- AttesterIndex uint64 `json:"attester_index"`
+ CommitteeIndex uint64 `json:"committee_index,string"`
+ AttesterIndex uint64 `json:"attester_index,string"`
Data *AttestationData `json:"data"`
Signature libcommon.Bytes96 `json:"signature"`
}
diff --git a/cl/cltypes/solid/bitlist.go b/cl/cltypes/solid/bitlist.go
index 33d10ee4f44..2bf1392f8ff 100644
--- a/cl/cltypes/solid/bitlist.go
+++ b/cl/cltypes/solid/bitlist.go
@@ -35,8 +35,6 @@ type BitList struct {
c int
// current length of the bitlist
l int
-
- hashBuf
}
// NewBitList creates a brand new BitList, just like when Zordon created the Power Rangers!
@@ -128,6 +126,7 @@ func (u *BitList) Set(index int, v byte) {
u.u[index] = v
}
+// removeMsb removes the most significant bit from the list, but doesn't change the length l.
func (u *BitList) removeMsb() {
for i := len(u.u) - 1; i >= 0; i-- {
if u.u[i] != 0 {
@@ -138,21 +137,26 @@ func (u *BitList) removeMsb() {
}
}
-func (u *BitList) addMsb() {
+// addMsb adds a most significant bit to the list, but doesn't change the length l.
+func (u *BitList) addMsb() int {
+ byteLen := len(u.u)
for i := len(u.u) - 1; i >= 0; i-- {
if u.u[i] != 0 {
msb := bits.Len8(u.u[i])
- if msb == 7 {
+ if msb == 8 {
if i == len(u.u)-1 {
u.u = append(u.u, 0)
}
+ byteLen++
u.u[i+1] |= 1
} else {
- u.u[i] |= 1 << uint(msb+1)
+ u.u[i] |= 1 << uint(msb)
}
break
}
+ byteLen--
}
+ return byteLen
}
func (u *BitList) SetOnBit(bitIndex int) {
@@ -168,8 +172,8 @@ func (u *BitList) SetOnBit(bitIndex int) {
// set the bit
u.u[bitIndex/8] |= 1 << uint(bitIndex%8)
// set last bit
- u.addMsb()
- u.l = len(u.u)
+ byteLen := u.addMsb()
+ u.l = byteLen
}
// Length gives us the length of the bitlist, just like a roll call tells us how many Rangers there are.
@@ -219,7 +223,15 @@ func (u *BitList) Bits() int {
return 0
}
// The most significant bit is present in the last byte in the array.
- last := u.u[u.l-1]
+ var last byte
+ var byteLen int
+ for i := len(u.u) - 1; i >= 0; i-- {
+ if u.u[i] != 0 {
+ last = u.u[i]
+ byteLen = i + 1
+ break
+ }
+ }
// Determine the position of the most significant bit.
msb := bits.Len8(last)
@@ -230,7 +242,7 @@ func (u *BitList) Bits() int {
// The absolute position of the most significant bit will be the number of
// bits in the preceding bytes plus the position of the most significant
// bit. Subtract this value by 1 to determine the length of the bitlist.
- return 8*(u.l-1) + msb - 1
+ return 8*(byteLen-1) + msb - 1
}
func (u *BitList) MarshalJSON() ([]byte, error) {
@@ -249,7 +261,7 @@ func (u *BitList) UnmarshalJSON(input []byte) error {
return u.DecodeSSZ(hex, 0)
}
-func (u *BitList) Union(other *BitList) (*BitList, error) {
+func (u *BitList) Merge(other *BitList) (*BitList, error) {
if u.c != other.c {
return nil, errors.New("bitlist union: different capacity")
}
@@ -263,8 +275,48 @@ func (u *BitList) Union(other *BitList) (*BitList, error) {
unionFrom = other
}
// union
+ unionFrom.removeMsb()
+ ret.removeMsb()
for i := 0; i < unionFrom.l; i++ {
ret.u[i] |= unionFrom.u[i]
}
+ unionFrom.addMsb()
+ byteLen := ret.addMsb()
+ ret.l = byteLen
return ret, nil
}
+
+// BitSlice maintains a slice of bits with underlying byte slice.
+// This is just a auxiliary struct for merging BitList.
+type BitSlice struct {
+ container []byte
+ length int
+}
+
+func NewBitSlice() *BitSlice {
+ return &BitSlice{
+ container: make([]byte, 0),
+ length: 0,
+ }
+}
+
+// AppendBit appends one bit to the BitSlice.
+func (b *BitSlice) AppendBit(bit bool) {
+ if b.length%8 == 0 {
+ b.container = append(b.container, 0)
+ }
+ if bit {
+ b.container[b.length/8] |= 1 << uint(b.length%8)
+ }
+ b.length++
+}
+
+// Bytes returns the underlying byte slice of the BitSlice.
+func (b *BitSlice) Bytes() []byte {
+ return b.container
+}
+
+// Length returns the length of the BitSlice.
+func (b *BitSlice) Length() int {
+ return b.length
+}
diff --git a/cl/cltypes/solid/bitlist_test.go b/cl/cltypes/solid/bitlist_test.go
index 82344b320f0..24f438dea01 100644
--- a/cl/cltypes/solid/bitlist_test.go
+++ b/cl/cltypes/solid/bitlist_test.go
@@ -125,19 +125,32 @@ func TestBitListCap(t *testing.T) {
// Add more tests as needed for other functions in the BitList struct.
-func TestBitlistUnion(t *testing.T) {
+func TestBitlistMerge(t *testing.T) {
require := require.New(t)
- b1 := solid.NewBitList(5, 10)
- b2 := solid.NewBitList(5, 10)
+ b1 := solid.BitlistFromBytes([]byte{0b11010000}, 10)
+ b2 := solid.BitlistFromBytes([]byte{0b00001101}, 10)
- b1.Set(0, byte(0b11010000))
- b2.Set(0, byte(0b00001101))
-
- merged, err := b1.Union(b2)
+ merged, err := b1.Merge(b2)
require.NoError(err)
- require.Equal(5, merged.Length(), "BitList Union did not return the expected length")
- require.Equal(byte(0b11011101), merged.Get(0), "BitList Union did not return the expected value")
- require.Equal(byte(0b00000000), merged.Get(1), "BitList Union did not return the expected value")
+ require.Equal(7, merged.Bits(), "BitList Merge did not return the expected number of bits")
+ require.Equal(1, merged.Length(), "BitList Union did not return the expected length")
+ require.Equal(byte(0b11010101), merged.Get(0), "BitList Union did not return the expected value")
+}
+
+func TestBitSlice(t *testing.T) {
+ require := require.New(t)
+
+ bs := solid.NewBitSlice()
+
+ bs.AppendBit(true)
+ bs.AppendBit(false)
+ bs.AppendBit(true)
+ bs.AppendBit(false)
+
+ bytes := bs.Bytes()
+
+ require.Equal([]byte{0b00000101}, bytes, "BitSlice AppendBit did not append the bits correctly")
+ require.Equal(4, bs.Length(), "BitSlice AppendBit did not increment the length correctly")
}
diff --git a/cl/cltypes/solid/bitvector.go b/cl/cltypes/solid/bitvector.go
index d30e180d456..7ea93e71b16 100644
--- a/cl/cltypes/solid/bitvector.go
+++ b/cl/cltypes/solid/bitvector.go
@@ -168,3 +168,13 @@ func (b *BitVector) Union(other *BitVector) (*BitVector, error) {
}
return new, nil
}
+
+func (b *BitVector) IsOverlap(other *BitVector) bool {
+ // check by bytes
+ for i := 0; i < len(b.container) && i < len(other.container); i++ {
+ if b.container[i]&other.container[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/cl/cltypes/validator_test.go b/cl/cltypes/validator_test.go
index 55285e7e05f..7b60be0f437 100644
--- a/cl/cltypes/validator_test.go
+++ b/cl/cltypes/validator_test.go
@@ -127,7 +127,7 @@ func TestValidatorSlashed(t *testing.T) {
}
func TestValidatorNonSlashed(t *testing.T) {
- encoded, _ := utils.DecompressSnappy(testValidator2Snappified)
+ encoded, _ := utils.DecompressSnappy(testValidator2Snappified, false)
decodedValidator := solid.NewValidator()
require.NoError(t, decodedValidator.DecodeSSZ(encoded, 0))
encoded2, _ := decodedValidator.EncodeSSZ(nil)
diff --git a/cl/monitor/interface.go b/cl/monitor/interface.go
deleted file mode 100644
index dfa89a53b5f..00000000000
--- a/cl/monitor/interface.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package monitor
-
-import (
- "github.com/erigontech/erigon/cl/cltypes"
- "github.com/erigontech/erigon/cl/phase1/core/state"
-)
-
-//go:generate mockgen -typed=true -destination=mock_services/validator_monitor_mock.go -package=mock_services . ValidatorMonitor
-type ValidatorMonitor interface {
- ObserveValidator(vid uint64)
- RemoveValidator(vid uint64)
- OnNewBlock(state *state.CachingBeaconState, block *cltypes.BeaconBlock) error
-}
-
-type dummyValdatorMonitor struct{}
-
-func (d *dummyValdatorMonitor) ObserveValidator(vid uint64) {}
-
-func (d *dummyValdatorMonitor) RemoveValidator(vid uint64) {}
-
-func (d *dummyValdatorMonitor) OnNewBlock(_ *state.CachingBeaconState, _ *cltypes.BeaconBlock) error {
- return nil
-}
diff --git a/cl/monitor/mock_services/validator_monitor_mock.go b/cl/monitor/mock_services/validator_monitor_mock.go
deleted file mode 100644
index cbe9008b104..00000000000
--- a/cl/monitor/mock_services/validator_monitor_mock.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/erigontech/erigon/cl/monitor (interfaces: ValidatorMonitor)
-//
-// Generated by this command:
-//
-// mockgen -typed=true -destination=mock_services/validator_monitor_mock.go -package=mock_services . ValidatorMonitor
-//
-
-// Package mock_services is a generated GoMock package.
-package mock_services
-
-import (
- reflect "reflect"
-
- cltypes "github.com/erigontech/erigon/cl/cltypes"
- state "github.com/erigontech/erigon/cl/phase1/core/state"
- gomock "go.uber.org/mock/gomock"
-)
-
-// MockValidatorMonitor is a mock of ValidatorMonitor interface.
-type MockValidatorMonitor struct {
- ctrl *gomock.Controller
- recorder *MockValidatorMonitorMockRecorder
- isgomock struct{}
-}
-
-// MockValidatorMonitorMockRecorder is the mock recorder for MockValidatorMonitor.
-type MockValidatorMonitorMockRecorder struct {
- mock *MockValidatorMonitor
-}
-
-// NewMockValidatorMonitor creates a new mock instance.
-func NewMockValidatorMonitor(ctrl *gomock.Controller) *MockValidatorMonitor {
- mock := &MockValidatorMonitor{ctrl: ctrl}
- mock.recorder = &MockValidatorMonitorMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockValidatorMonitor) EXPECT() *MockValidatorMonitorMockRecorder {
- return m.recorder
-}
-
-// ObserveValidator mocks base method.
-func (m *MockValidatorMonitor) ObserveValidator(vid uint64) {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "ObserveValidator", vid)
-}
-
-// ObserveValidator indicates an expected call of ObserveValidator.
-func (mr *MockValidatorMonitorMockRecorder) ObserveValidator(vid any) *MockValidatorMonitorObserveValidatorCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObserveValidator", reflect.TypeOf((*MockValidatorMonitor)(nil).ObserveValidator), vid)
- return &MockValidatorMonitorObserveValidatorCall{Call: call}
-}
-
-// MockValidatorMonitorObserveValidatorCall wrap *gomock.Call
-type MockValidatorMonitorObserveValidatorCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockValidatorMonitorObserveValidatorCall) Return() *MockValidatorMonitorObserveValidatorCall {
- c.Call = c.Call.Return()
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockValidatorMonitorObserveValidatorCall) Do(f func(uint64)) *MockValidatorMonitorObserveValidatorCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockValidatorMonitorObserveValidatorCall) DoAndReturn(f func(uint64)) *MockValidatorMonitorObserveValidatorCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
-
-// OnNewBlock mocks base method.
-func (m *MockValidatorMonitor) OnNewBlock(state *state.CachingBeaconState, block *cltypes.BeaconBlock) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "OnNewBlock", state, block)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// OnNewBlock indicates an expected call of OnNewBlock.
-func (mr *MockValidatorMonitorMockRecorder) OnNewBlock(state, block any) *MockValidatorMonitorOnNewBlockCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockValidatorMonitor)(nil).OnNewBlock), state, block)
- return &MockValidatorMonitorOnNewBlockCall{Call: call}
-}
-
-// MockValidatorMonitorOnNewBlockCall wrap *gomock.Call
-type MockValidatorMonitorOnNewBlockCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockValidatorMonitorOnNewBlockCall) Return(arg0 error) *MockValidatorMonitorOnNewBlockCall {
- c.Call = c.Call.Return(arg0)
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockValidatorMonitorOnNewBlockCall) Do(f func(*state.CachingBeaconState, *cltypes.BeaconBlock) error) *MockValidatorMonitorOnNewBlockCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockValidatorMonitorOnNewBlockCall) DoAndReturn(f func(*state.CachingBeaconState, *cltypes.BeaconBlock) error) *MockValidatorMonitorOnNewBlockCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
-
-// RemoveValidator mocks base method.
-func (m *MockValidatorMonitor) RemoveValidator(vid uint64) {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "RemoveValidator", vid)
-}
-
-// RemoveValidator indicates an expected call of RemoveValidator.
-func (mr *MockValidatorMonitorMockRecorder) RemoveValidator(vid any) *MockValidatorMonitorRemoveValidatorCall {
- mr.mock.ctrl.T.Helper()
- call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveValidator", reflect.TypeOf((*MockValidatorMonitor)(nil).RemoveValidator), vid)
- return &MockValidatorMonitorRemoveValidatorCall{Call: call}
-}
-
-// MockValidatorMonitorRemoveValidatorCall wrap *gomock.Call
-type MockValidatorMonitorRemoveValidatorCall struct {
- *gomock.Call
-}
-
-// Return rewrite *gomock.Call.Return
-func (c *MockValidatorMonitorRemoveValidatorCall) Return() *MockValidatorMonitorRemoveValidatorCall {
- c.Call = c.Call.Return()
- return c
-}
-
-// Do rewrite *gomock.Call.Do
-func (c *MockValidatorMonitorRemoveValidatorCall) Do(f func(uint64)) *MockValidatorMonitorRemoveValidatorCall {
- c.Call = c.Call.Do(f)
- return c
-}
-
-// DoAndReturn rewrite *gomock.Call.DoAndReturn
-func (c *MockValidatorMonitorRemoveValidatorCall) DoAndReturn(f func(uint64)) *MockValidatorMonitorRemoveValidatorCall {
- c.Call = c.Call.DoAndReturn(f)
- return c
-}
diff --git a/cl/monitor/validator.go b/cl/monitor/validator.go
deleted file mode 100644
index eb7f9631cd9..00000000000
--- a/cl/monitor/validator.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package monitor
-
-import (
- "sync"
- "time"
-
- mapset "github.com/deckarep/golang-set/v2"
- "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/log/v3"
- "github.com/erigontech/erigon/cl/beacon/synced_data"
- "github.com/erigontech/erigon/cl/clparams"
- "github.com/erigontech/erigon/cl/cltypes"
- "github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/phase1/core/state"
- "github.com/erigontech/erigon/cl/utils/eth_clock"
-)
-
-type validatorMonitorImpl struct {
- syncedData *synced_data.SyncedDataManager
- ethClock eth_clock.EthereumClock
- beaconCfg *clparams.BeaconChainConfig
- vaidatorStatuses *validatorStatuses // map validatorID -> epoch -> validatorStatus
-}
-
-func NewValidatorMonitor(
- enableMonitor bool,
- ethClock eth_clock.EthereumClock,
- beaconConfig *clparams.BeaconChainConfig,
- syncedData *synced_data.SyncedDataManager,
-) ValidatorMonitor {
- if !enableMonitor {
- return &dummyValdatorMonitor{}
- }
-
- m := &validatorMonitorImpl{
- ethClock: ethClock,
- beaconCfg: beaconConfig,
- syncedData: syncedData,
- vaidatorStatuses: newValidatorStatuses(),
- }
- go m.runReportAttesterStatus()
- go m.runReportProposerStatus()
- return m
-}
-
-func (m *validatorMonitorImpl) ObserveValidator(vid uint64) {
- m.vaidatorStatuses.addValidator(vid)
-}
-
-func (m *validatorMonitorImpl) RemoveValidator(vid uint64) {
- m.vaidatorStatuses.removeValidator(vid)
-}
-
-func (m *validatorMonitorImpl) OnNewBlock(state *state.CachingBeaconState, block *cltypes.BeaconBlock) error {
- var (
- atts = block.Body.Attestations
- blockEpoch = m.ethClock.GetEpochAtSlot(block.Slot)
- currentEpoch = m.ethClock.GetCurrentEpoch()
- )
- if blockEpoch+2 < currentEpoch {
- // skip old blocks
- return nil
- }
-
- // todo: maybe launch a goroutine to update attester status
- // update attester status
- atts.Range(func(i int, att *solid.Attestation, length int) bool {
- indicies, err := state.GetAttestingIndicies(att, true)
- if err != nil {
- log.Warn("failed to get attesting indicies", "err", err, "slot", block.Slot, "stateRoot", block.StateRoot)
- return false
- }
- slot := att.Data.Slot
- attEpoch := m.ethClock.GetEpochAtSlot(slot)
- for _, vidx := range indicies {
- status := m.vaidatorStatuses.getValidatorStatus(vidx, attEpoch)
- if status == nil {
- continue
- }
- status.updateAttesterStatus(att)
- }
- return true
- })
- // update proposer status
- pIndex := block.ProposerIndex
- if status := m.vaidatorStatuses.getValidatorStatus(pIndex, blockEpoch); status != nil {
- status.proposeSlots.Add(block.Slot)
- }
-
- return nil
-}
-
-func (m *validatorMonitorImpl) runReportAttesterStatus() {
- // every epoch seconds
- epochDuration := time.Duration(m.beaconCfg.SlotsPerEpoch) * time.Duration(m.beaconCfg.SecondsPerSlot) * time.Second
- ticker := time.NewTicker(epochDuration)
- for range ticker.C {
- currentEpoch := m.ethClock.GetCurrentEpoch()
- // report attester status for current_epoch - 2
- epoch := currentEpoch - 2
- hitCount := 0
- missCount := 0
- m.vaidatorStatuses.iterate(func(vindex uint64, epochStatuses map[uint64]*validatorStatus) {
- if status, ok := epochStatuses[epoch]; ok {
- successAtt := status.attestedBlockRoots.Cardinality()
- metricAttestHit.AddInt(successAtt)
- hitCount += successAtt
- delete(epochStatuses, epoch)
- log.Debug("[monitor] report attester status hit", "epoch", epoch, "vindex", vindex, "countAttestedBlock", status.attestedBlockRoots.Cardinality())
- } else {
- metricAttestMiss.AddInt(1)
- missCount++
- log.Debug("[monitor] report attester status miss", "epoch", epoch, "vindex", vindex, "countAttestedBlock", 0)
- }
- })
- log.Info("[monitor] report attester hit/miss", "epoch", epoch, "hitCount", hitCount, "missCount", missCount, "cur_epoch", currentEpoch)
- }
-
-}
-
-func (m *validatorMonitorImpl) runReportProposerStatus() {
- // check proposer in previous slot every slot duration
- ticker := time.NewTicker(time.Duration(m.beaconCfg.SecondsPerSlot) * time.Second)
- defer ticker.Stop()
- for range ticker.C {
- prevSlot := m.ethClock.GetCurrentSlot() - 1
-
- var proposerIndex uint64
- if err := m.syncedData.ViewHeadState(func(headState *state.CachingBeaconState) (err error) {
- proposerIndex, err = headState.GetBeaconProposerIndexForSlot(prevSlot)
- if err != nil {
- return err
- }
- return nil
- }); err != nil {
- log.Warn("failed to get proposer index", "err", err, "slot", prevSlot)
- continue
- }
- // check proposer in previous slot
-
- if status := m.vaidatorStatuses.getValidatorStatus(proposerIndex, prevSlot/m.beaconCfg.SlotsPerEpoch); status != nil {
- if status.proposeSlots.Contains(prevSlot) {
- metricProposerHit.AddInt(1)
- log.Warn("[monitor] proposer hit", "slot", prevSlot, "proposerIndex", proposerIndex)
- } else {
- metricProposerMiss.AddInt(1)
- log.Warn("[monitor] proposer miss", "slot", prevSlot, "proposerIndex", proposerIndex)
- }
- }
- }
-}
-
-type validatorStatus struct {
- // attestedBlockRoots is the set of block roots that the validator has successfully attested during one epoch.
- attestedBlockRoots mapset.Set[common.Hash]
- // proposeSlots is the set of slots that the proposer has successfully proposed blocks during one epoch.
- proposeSlots mapset.Set[uint64]
-}
-
-func (s *validatorStatus) updateAttesterStatus(att *solid.Attestation) {
- data := att.Data
- s.attestedBlockRoots.Add(data.BeaconBlockRoot)
-}
-
-type validatorStatuses struct {
- statuses map[uint64]map[uint64]*validatorStatus
- vStatusMutex sync.RWMutex
-}
-
-func newValidatorStatuses() *validatorStatuses {
- return &validatorStatuses{
- statuses: make(map[uint64]map[uint64]*validatorStatus),
- }
-}
-
-// getValidatorStatus returns the validator status for the given validator index and epoch.
-// returns nil if validator is not observed.
-func (s *validatorStatuses) getValidatorStatus(vid uint64, epoch uint64) *validatorStatus {
- s.vStatusMutex.Lock()
- defer s.vStatusMutex.Unlock()
- statusByEpoch, ok := s.statuses[vid]
- if !ok {
- return nil
- }
- if _, ok := statusByEpoch[epoch]; !ok {
- statusByEpoch[epoch] = &validatorStatus{
- attestedBlockRoots: mapset.NewSet[common.Hash](),
- proposeSlots: mapset.NewSet[uint64](),
- }
- }
-
- return statusByEpoch[epoch]
-}
-
-func (s *validatorStatuses) addValidator(vid uint64) {
- s.vStatusMutex.Lock()
- defer s.vStatusMutex.Unlock()
- if _, ok := s.statuses[vid]; !ok {
- s.statuses[vid] = make(map[uint64]*validatorStatus)
- log.Trace("[monitor] add validator", "vid", vid)
- }
-}
-
-func (s *validatorStatuses) removeValidator(vid uint64) {
- s.vStatusMutex.Lock()
- defer s.vStatusMutex.Unlock()
- if _, ok := s.statuses[vid]; ok {
- delete(s.statuses, vid)
- log.Trace("[monitor] remove validator", "vid", vid)
- }
-}
-
-func (s *validatorStatuses) iterate(run func(vid uint64, statuses map[uint64]*validatorStatus)) {
- s.vStatusMutex.Lock()
- defer s.vStatusMutex.Unlock()
- for vid, statuses := range s.statuses {
- run(vid, statuses)
- }
-}
diff --git a/cl/persistence/genesisdb/genesis_db.go b/cl/persistence/genesisdb/genesis_db.go
index b25b4d8588d..ec7483386a7 100644
--- a/cl/persistence/genesisdb/genesis_db.go
+++ b/cl/persistence/genesisdb/genesis_db.go
@@ -54,7 +54,7 @@ func (g *genesisDB) ReadGenesisState() (*state.CachingBeaconState, error) {
return nil, err
}
- decompressedEnc, err := utils.DecompressSnappy(enc)
+ decompressedEnc, err := utils.DecompressSnappy(enc, false)
if err != nil {
return nil, err
}
diff --git a/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go b/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go
index 41c716a3479..7182d2bfdae 100644
--- a/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go
+++ b/cl/phase1/core/checkpoint_sync/local_checkpoint_syncer.go
@@ -32,7 +32,7 @@ func (l *LocalCheckpointSyncer) GetLatestBeaconState(ctx context.Context) (*stat
log.Warn("Could not read local state, starting sync from genesis.")
return l.genesisState.Copy()
}
- decompressedSnappy, err := utils.DecompressSnappy(snappyEncoded)
+ decompressedSnappy, err := utils.DecompressSnappy(snappyEncoded, false)
if err != nil {
return nil, fmt.Errorf("local state is corrupt: %s", err)
}
diff --git a/cl/phase1/core/state/ssz_test.go b/cl/phase1/core/state/ssz_test.go
index a9b53077751..6ee9e350966 100644
--- a/cl/phase1/core/state/ssz_test.go
+++ b/cl/phase1/core/state/ssz_test.go
@@ -35,7 +35,7 @@ var phase0BeaconSnappyTest []byte
func TestBeaconStateCapellaEncodingDecoding(t *testing.T) {
state := New(&clparams.MainnetBeaconConfig)
- decodedSSZ, err := utils.DecompressSnappy(capellaBeaconSnappyTest)
+ decodedSSZ, err := utils.DecompressSnappy(capellaBeaconSnappyTest, true)
require.NoError(t, err)
require.NoError(t, state.DecodeSSZ(decodedSSZ, int(clparams.CapellaVersion)))
root, err := state.HashSSZ()
@@ -46,7 +46,7 @@ func TestBeaconStateCapellaEncodingDecoding(t *testing.T) {
func TestBeaconStatePhase0EncodingDecoding(t *testing.T) {
state := New(&clparams.MainnetBeaconConfig)
- decodedSSZ, err := utils.DecompressSnappy(phase0BeaconSnappyTest)
+ decodedSSZ, err := utils.DecompressSnappy(phase0BeaconSnappyTest, true)
require.NoError(t, err)
state.DecodeSSZ(decodedSSZ, int(clparams.Phase0Version))
root, err := state.HashSSZ()
diff --git a/cl/phase1/core/state/upgrade.go b/cl/phase1/core/state/upgrade.go
index cf47b18e207..97b34f353be 100644
--- a/cl/phase1/core/state/upgrade.go
+++ b/cl/phase1/core/state/upgrade.go
@@ -21,6 +21,7 @@ import (
"github.com/Giulio2002/bls"
libcommon "github.com/erigontech/erigon-lib/common"
+ "github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/cltypes"
"github.com/erigontech/erigon/cl/cltypes/solid"
@@ -240,5 +241,6 @@ func (b *CachingBeaconState) UpgradeToElectra() error {
}
return true
})
+ log.Info("Upgrade to Electra complete")
return nil
}
diff --git a/cl/phase1/execution_client/block_collector/block_collector.go b/cl/phase1/execution_client/block_collector/block_collector.go
index 9f57fd6cca9..536306fe02b 100644
--- a/cl/phase1/execution_client/block_collector/block_collector.go
+++ b/cl/phase1/execution_client/block_collector/block_collector.go
@@ -98,7 +98,7 @@ func (b *blockCollector) Flush(ctx context.Context) error {
if len(v) == 0 {
return nil
}
- v, err = utils.DecompressSnappy(v)
+ v, err = utils.DecompressSnappy(v, false)
if err != nil {
return err
}
diff --git a/cl/phase1/forkchoice/fork_choice_test.go b/cl/phase1/forkchoice/fork_choice_test.go
index cdd59e1e06e..72e0a63c80a 100644
--- a/cl/phase1/forkchoice/fork_choice_test.go
+++ b/cl/phase1/forkchoice/fork_choice_test.go
@@ -29,7 +29,6 @@ import (
"github.com/erigontech/erigon/cl/beacon/beaconevents"
"github.com/erigontech/erigon/cl/beacon/synced_data"
"github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/phase1/core/state"
"github.com/erigontech/erigon/cl/phase1/forkchoice"
"github.com/erigontech/erigon/cl/phase1/forkchoice/fork_graph"
@@ -84,8 +83,7 @@ func TestForkChoiceBasic(t *testing.T) {
require.NoError(t, utils.DecodeSSZSnappy(anchorState, anchorStateEncoded, int(clparams.AltairVersion)))
pool := pool.NewOperationsPool(&clparams.MainnetBeaconConfig)
emitters := beaconevents.NewEventEmitter()
- validatorMonitor := monitor.NewValidatorMonitor(false, nil, nil, nil)
- store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, nil, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{}, emitters), emitters, sd, nil, validatorMonitor, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
+ store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, nil, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{}, emitters), emitters, sd, nil, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
require.NoError(t, err)
// first steps
store.OnTick(0)
@@ -151,7 +149,7 @@ func TestForkChoiceChainBellatrix(t *testing.T) {
sd := synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true)
store, err := forkchoice.NewForkChoiceStore(nil, anchorState, nil, pool, fork_graph.NewForkGraphDisk(anchorState, nil, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{
Beacon: true,
- }, emitters), emitters, sd, nil, nil, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
+ }, emitters), emitters, sd, nil, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
store.OnTick(2000)
require.NoError(t, err)
for _, block := range blocks {
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
index 14011fec259..d50df8f7d1f 100644
--- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk.go
@@ -26,7 +26,6 @@ import (
"github.com/spf13/afero"
libcommon "github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/common/dbg"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon/cl/beacon/beacon_router_configuration"
"github.com/erigontech/erigon/cl/beacon/beaconevents"
@@ -421,11 +420,8 @@ func (f *forkGraphDisk) getState(blockRoot libcommon.Hash, alwaysCopy bool, addC
blocksInTheWay := []*cltypes.SignedBeaconBlock{}
// Use the parent root as a reverse iterator.
currentIteratorRoot := blockRoot
- var copyReferencedState, outState *state.CachingBeaconState
+ var copyReferencedState *state.CachingBeaconState
var err error
- if addChainSegment && dbg.CaplinEfficientReorg {
- outState = f.currentState
- }
// try and find the point of recconnection
for copyReferencedState == nil {
@@ -434,7 +430,7 @@ func (f *forkGraphDisk) getState(blockRoot libcommon.Hash, alwaysCopy bool, addC
// check if it is in the header
bHeader, ok := f.GetHeader(currentIteratorRoot)
if ok && bHeader.Slot%dumpSlotFrequency == 0 {
- copyReferencedState, err = f.readBeaconStateFromDisk(currentIteratorRoot, outState)
+ copyReferencedState, err = f.readBeaconStateFromDisk(currentIteratorRoot)
if err != nil {
log.Trace("Could not retrieve state: Missing header", "missing", currentIteratorRoot, "err", err)
copyReferencedState = nil
@@ -445,7 +441,7 @@ func (f *forkGraphDisk) getState(blockRoot libcommon.Hash, alwaysCopy bool, addC
return nil, nil
}
if block.Block.Slot%dumpSlotFrequency == 0 {
- copyReferencedState, err = f.readBeaconStateFromDisk(currentIteratorRoot, outState)
+ copyReferencedState, err = f.readBeaconStateFromDisk(currentIteratorRoot)
if err != nil {
log.Trace("Could not retrieve state: Missing header", "missing", currentIteratorRoot, "err", err)
}
diff --git a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
index c670623fd8c..3ca261ef20e 100644
--- a/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
+++ b/cl/phase1/forkchoice/fork_graph/fork_graph_disk_fs.go
@@ -34,7 +34,7 @@ func getBeaconStateFilename(blockRoot libcommon.Hash) string {
return fmt.Sprintf("%x.snappy_ssz", blockRoot)
}
-func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash, out *state.CachingBeaconState) (bs *state.CachingBeaconState, err error) {
+func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash) (bs *state.CachingBeaconState, err error) {
var file afero.File
f.stateDumpLock.Lock()
defer f.stateDumpLock.Unlock()
@@ -72,11 +72,7 @@ func (f *forkGraphDisk) readBeaconStateFromDisk(blockRoot libcommon.Hash, out *s
return nil, fmt.Errorf("failed to read snappy buffer: %w, root: %x", err, blockRoot)
}
f.sszBuffer = f.sszBuffer[:n]
- if out == nil {
- bs = state.New(f.beaconCfg)
- } else {
- bs = out
- }
+ bs = state.New(f.beaconCfg)
if err = bs.DecodeSSZ(f.sszBuffer, int(v[0])); err != nil {
return nil, fmt.Errorf("failed to decode beacon state: %w, root: %x, len: %d, decLen: %d, bs: %+v", err, blockRoot, n, len(f.sszBuffer), bs)
diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go
index fd2b27b9fcc..9be59c7d204 100644
--- a/cl/phase1/forkchoice/forkchoice.go
+++ b/cl/phase1/forkchoice/forkchoice.go
@@ -27,7 +27,6 @@ import (
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/cltypes"
"github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
"github.com/erigontech/erigon/cl/phase1/core/state"
state2 "github.com/erigontech/erigon/cl/phase1/core/state"
@@ -136,7 +135,6 @@ type ForkChoiceStore struct {
ethClock eth_clock.EthereumClock
optimisticStore optimistic.OptimisticStore
- validatorMonitor monitor.ValidatorMonitor
probabilisticHeadGetter bool
}
@@ -160,7 +158,6 @@ func NewForkChoiceStore(
emitters *beaconevents.EventEmitter,
syncedDataManager *synced_data.SyncedDataManager,
blobStorage blob_storage.BlobStorage,
- validatorMonitor monitor.ValidatorMonitor,
publicKeysRegistry public_keys_registry.PublicKeyRegistry,
probabilisticHeadGetter bool,
) (*ForkChoiceStore, error) {
@@ -257,7 +254,6 @@ func NewForkChoiceStore(
blobStorage: blobStorage,
ethClock: ethClock,
optimisticStore: optimistic.NewOptimisticStore(),
- validatorMonitor: validatorMonitor,
probabilisticHeadGetter: probabilisticHeadGetter,
publicKeysRegistry: publicKeysRegistry,
verifiedExecutionPayload: verifiedExecutionPayload,
diff --git a/cl/phase1/forkchoice/on_block.go b/cl/phase1/forkchoice/on_block.go
index 9a06fa65d45..e2704871ce1 100644
--- a/cl/phase1/forkchoice/on_block.go
+++ b/cl/phase1/forkchoice/on_block.go
@@ -269,9 +269,7 @@ func (f *ForkChoiceStore) OnBlock(ctx context.Context, block *cltypes.SignedBeac
Block: blockRoot,
ExecutionOptimistic: f.optimisticStore.IsOptimistic(blockRoot),
})
- if f.validatorMonitor != nil {
- f.validatorMonitor.OnNewBlock(lastProcessedState, block.Block)
- }
+
if !isVerifiedExecutionPayload {
log.Debug("OnBlock", "elapsed", time.Since(start), "slot", block.Block.Slot)
}
diff --git a/cl/phase1/network/services/blob_sidecar_service.go b/cl/phase1/network/services/blob_sidecar_service.go
index 9f90ae32466..ad8d679a099 100644
--- a/cl/phase1/network/services/blob_sidecar_service.go
+++ b/cl/phase1/network/services/blob_sidecar_service.go
@@ -85,13 +85,14 @@ func (b *blobSidecarService) ProcessMessage(ctx context.Context, subnetId *uint6
return b.verifyAndStoreBlobSidecar(msg)
}
+ sidecarVersion := b.beaconCfg.GetCurrentStateVersion(msg.SignedBlockHeader.Header.Slot / b.beaconCfg.SlotsPerEpoch)
// [REJECT] The sidecar's index is consistent with MAX_BLOBS_PER_BLOCK -- i.e. blob_sidecar.index < MAX_BLOBS_PER_BLOCK.
- blockVersion := b.beaconCfg.GetCurrentStateVersion(msg.SignedBlockHeader.Header.Slot / b.beaconCfg.SlotsPerEpoch)
- maxBlobsPerBlock := b.beaconCfg.MaxBlobsPerBlockByVersion(blockVersion)
+ maxBlobsPerBlock := b.beaconCfg.MaxBlobsPerBlockByVersion(sidecarVersion)
if msg.Index >= maxBlobsPerBlock {
return errors.New("blob index out of range")
}
- sidecarSubnetIndex := msg.Index % maxBlobsPerBlock
+ // [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id
+ sidecarSubnetIndex := msg.Index % b.beaconCfg.BlobSidecarSubnetCountByVersion(sidecarVersion)
if sidecarSubnetIndex != *subnetId {
return ErrBlobIndexOutOfRange
}
@@ -103,6 +104,7 @@ func (b *blobSidecarService) ProcessMessage(ctx context.Context, subnetId *uint6
return ErrIgnore
}
+ // [IGNORE] The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
if b.forkchoiceStore.FinalizedSlot() >= sidecarSlot {
return ErrIgnore
}
diff --git a/cl/phase1/stages/clstages.go b/cl/phase1/stages/clstages.go
index dbcd73642c7..f31cbddc8f3 100644
--- a/cl/phase1/stages/clstages.go
+++ b/cl/phase1/stages/clstages.go
@@ -28,7 +28,6 @@ import (
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/clstages"
"github.com/erigontech/erigon/cl/cltypes"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/persistence/beacon_indicies"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
"github.com/erigontech/erigon/cl/phase1/core/state"
@@ -63,7 +62,6 @@ type Cfg struct {
sn *freezeblocks.CaplinSnapshots
blobStore blob_storage.BlobStorage
attestationDataProducer attestation_producer.AttestationDataProducer
- validatorMonitor monitor.ValidatorMonitor
caplinConfig clparams.CaplinConfig
hasDownloaded bool
}
@@ -96,7 +94,6 @@ func ClStagesCfg(
emitters *beaconevents.EventEmitter,
blobStore blob_storage.BlobStorage,
attestationDataProducer attestation_producer.AttestationDataProducer,
- validatorMonitor monitor.ValidatorMonitor,
) *Cfg {
return &Cfg{
rpc: rpc,
@@ -118,7 +115,6 @@ func ClStagesCfg(
blobStore: blobStore,
blockCollector: block_collector.NewBlockCollector(log.Root(), executionClient, beaconCfg, syncBackLoopLimit, dirs.Tmp),
attestationDataProducer: attestationDataProducer,
- validatorMonitor: validatorMonitor,
}
}
diff --git a/cl/phase1/stages/forward_sync.go b/cl/phase1/stages/forward_sync.go
index 7bde4554c59..68ac8cba87e 100644
--- a/cl/phase1/stages/forward_sync.go
+++ b/cl/phase1/stages/forward_sync.go
@@ -101,30 +101,6 @@ func downloadAndProcessEip4844DA(ctx context.Context, logger log.Logger, cfg *Cf
return highestProcessed - 1, err
}
-func filterUnneededBlocks(ctx context.Context, blocks []*cltypes.SignedBeaconBlock, cfg *Cfg) []*cltypes.SignedBeaconBlock {
- filtered := make([]*cltypes.SignedBeaconBlock, 0, len(blocks))
- // Find the latest block in the list
- for _, block := range blocks {
- blockRoot, err := block.Block.HashSSZ()
- if err != nil {
- panic(err)
- }
- _, hasInFcu := cfg.forkChoice.GetHeader(blockRoot)
-
- var hasSignedHeaderInDB bool
- if err = cfg.indiciesDB.View(ctx, func(tx kv.Tx) error {
- _, hasSignedHeaderInDB, err = beacon_indicies.ReadSignedHeaderByBlockRoot(ctx, tx, blockRoot)
- return err
- }); err != nil {
- panic(err)
- }
- if !hasInFcu || !hasSignedHeaderInDB {
- filtered = append(filtered, block)
- }
- }
- return filtered
-}
-
// processDownloadedBlockBatches processes a batch of downloaded blocks.
// It takes the highest block processed, a flag to determine if insertion is needed, and a list of signed beacon blocks as input.
// It returns the new highest block processed and an error if any.
@@ -134,12 +110,6 @@ func processDownloadedBlockBatches(ctx context.Context, logger log.Logger, cfg *
return blocks[i].Block.Slot < blocks[j].Block.Slot
})
- // Filter out blocks that are already in the FCU or have a signed header in the DB
- blocks = filterUnneededBlocks(ctx, blocks, cfg)
- if len(blocks) == 0 {
- return highestBlockProcessed, nil
- }
-
var (
blockRoot common.Hash
st *state.CachingBeaconState
@@ -228,16 +198,16 @@ func processDownloadedBlockBatches(ctx context.Context, logger log.Logger, cfg *
// forwardSync (MAIN ROUTINE FOR ForwardSync) performs the forward synchronization of beacon blocks.
func forwardSync(ctx context.Context, logger log.Logger, cfg *Cfg, args Args) error {
var (
- shouldInsert = cfg.executionClient != nil && cfg.executionClient.SupportInsertion() // Check if the execution client supports insertion
- finalizedCheckpoint = cfg.forkChoice.FinalizedCheckpoint() // Get the finalized checkpoint from fork choice
- secsPerLog = 30 // Interval in seconds for logging progress
- logTicker = time.NewTicker(time.Duration(secsPerLog) * time.Second) // Ticker for logging progress
- downloader = network2.NewForwardBeaconDownloader(ctx, cfg.rpc) // Initialize a new forward beacon downloader
- currentSlot atomic.Uint64 // Atomic variable to track the current slot
+ shouldInsert = cfg.executionClient != nil && cfg.executionClient.SupportInsertion() // Check if the execution client supports insertion
+ startSlot = cfg.forkChoice.HighestSeen() - 8 // Start forwardsync a little bit behind the highest seen slot (account for potential reorgs)
+ secsPerLog = 30 // Interval in seconds for logging progress
+ logTicker = time.NewTicker(time.Duration(secsPerLog) * time.Second) // Ticker for logging progress
+ downloader = network2.NewForwardBeaconDownloader(ctx, cfg.rpc) // Initialize a new forward beacon downloader
+ currentSlot atomic.Uint64 // Atomic variable to track the current slot
)
// Initialize the slot to download from the finalized checkpoint
- currentSlot.Store(finalizedCheckpoint.Epoch * cfg.beaconCfg.SlotsPerEpoch)
+ currentSlot.Store(startSlot)
// Always start from the current finalized checkpoint
downloader.SetHighestProcessedSlot(currentSlot.Load())
diff --git a/cl/sentinel/discovery.go b/cl/sentinel/discovery.go
index 384c752dc9a..53a840eb08b 100644
--- a/cl/sentinel/discovery.go
+++ b/cl/sentinel/discovery.go
@@ -114,8 +114,8 @@ func (s *Sentinel) listenForPeers() {
}
node := iterator.Node()
- needsPeersForSubnets := s.isPeerUsefulForAnySubnet(node)
- if s.HasTooManyPeers() && !needsPeersForSubnets {
+ // needsPeersForSubnets := s.isPeerUsefulForAnySubnet(node)
+ if s.HasTooManyPeers() {
log.Trace("[Sentinel] Not looking for peers, at peer limit")
time.Sleep(100 * time.Millisecond)
continue
@@ -179,6 +179,12 @@ func (s *Sentinel) setupENR(
func (s *Sentinel) onConnection(net network.Network, conn network.Conn) {
go func() {
peerId := conn.RemotePeer()
+ if s.HasTooManyPeers() {
+ log.Trace("[Sentinel] Not looking for peers, at peer limit")
+ s.host.Peerstore().RemovePeer(peerId)
+ s.host.Network().ClosePeer(peerId)
+ s.peers.RemovePeer(peerId)
+ }
valid, err := s.handshaker.ValidatePeer(peerId)
if err != nil {
log.Trace("[sentinel] failed to validate peer:", "err", err)
diff --git a/cl/sentinel/msg_id.go b/cl/sentinel/msg_id.go
index ed241a10b99..12e543a0ac0 100644
--- a/cl/sentinel/msg_id.go
+++ b/cl/sentinel/msg_id.go
@@ -40,7 +40,7 @@ func (s *Sentinel) msgId(pmsg *pubsubpb.Message) string {
// beyond Bellatrix epoch, allow 10 Mib gossip data size
gossipPubSubSize := s.cfg.NetworkConfig.GossipMaxSizeBellatrix
- decodedData, err := utils.DecompressSnappy(pmsg.Data)
+ decodedData, err := utils.DecompressSnappy(pmsg.Data, true)
if err != nil || uint64(len(decodedData)) > gossipPubSubSize {
totalLength :=
len(s.cfg.NetworkConfig.MessageDomainValidSnappy) +
diff --git a/cl/sentinel/sentinel.go b/cl/sentinel/sentinel.go
index 729becd1dca..447f216d293 100644
--- a/cl/sentinel/sentinel.go
+++ b/cl/sentinel/sentinel.go
@@ -23,7 +23,6 @@ import (
"net"
"net/http"
"os/signal"
- "strconv"
"strings"
"sync"
"syscall"
@@ -31,7 +30,6 @@ import (
"github.com/c2h5oh/datasize"
"github.com/go-chi/chi/v5"
- "github.com/prysmaticlabs/go-bitfield"
"github.com/libp2p/go-libp2p"
pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -45,7 +43,6 @@ import (
"github.com/erigontech/erigon-lib/kv"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon/cl/cltypes"
- "github.com/erigontech/erigon/cl/gossip"
"github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
"github.com/erigontech/erigon/cl/phase1/forkchoice"
@@ -386,89 +383,89 @@ func (s *Sentinel) HasTooManyPeers() bool {
return active >= int(s.cfg.MaxPeerCount)
}
-func (s *Sentinel) isPeerUsefulForAnySubnet(node *enode.Node) bool {
- ret := false
-
- nodeAttnets := bitfield.NewBitvector64()
- nodeSyncnets := bitfield.NewBitvector4()
- if err := node.Load(enr.WithEntry(s.cfg.NetworkConfig.AttSubnetKey, &nodeAttnets)); err != nil {
- log.Trace("Could not load att subnet", "err", err)
- return false
- }
- if err := node.Load(enr.WithEntry(s.cfg.NetworkConfig.SyncCommsSubnetKey, &nodeSyncnets)); err != nil {
- log.Trace("Could not load sync subnet", "err", err)
- return false
- }
-
- s.subManager.subscriptions.Range(func(key, value any) bool {
- sub := value.(*GossipSubscription)
- sub.lock.Lock()
- defer sub.lock.Unlock()
- if sub.sub == nil {
- return true
- }
-
- if !sub.subscribed.Load() {
- return true
- }
-
- if len(sub.topic.ListPeers()) > peerSubnetTarget {
- return true
- }
- if gossip.IsTopicBeaconAttestation(sub.sub.Topic()) {
- ret = s.isPeerUsefulForAttNet(sub, nodeAttnets)
- return !ret
- }
-
- if gossip.IsTopicSyncCommittee(sub.sub.Topic()) {
- ret = s.isPeerUsefulForSyncNet(sub, nodeSyncnets)
- return !ret
- }
-
- return true
- })
- return ret
-}
-
-func (s *Sentinel) isPeerUsefulForAttNet(sub *GossipSubscription, nodeAttnets bitfield.Bitvector64) bool {
- splitTopic := strings.Split(sub.sub.Topic(), "/")
- if len(splitTopic) < 4 {
- return false
- }
- subnetIdStr, found := strings.CutPrefix(splitTopic[3], "beacon_attestation_")
- if !found {
- return false
- }
- subnetId, err := strconv.Atoi(subnetIdStr)
- if err != nil {
- log.Warn("Could not parse subnet id", "subnet", subnetIdStr, "err", err)
- return false
- }
- // check if subnetIdth bit is set in nodeAttnets
- return nodeAttnets.BitAt(uint64(subnetId))
-
-}
-
-func (s *Sentinel) isPeerUsefulForSyncNet(sub *GossipSubscription, nodeSyncnets bitfield.Bitvector4) bool {
- splitTopic := strings.Split(sub.sub.Topic(), "/")
- if len(splitTopic) < 4 {
- return false
- }
- syncnetIdStr, found := strings.CutPrefix(splitTopic[3], "sync_committee_")
- if !found {
- return false
- }
- syncnetId, err := strconv.Atoi(syncnetIdStr)
- if err != nil {
- log.Warn("Could not parse syncnet id", "syncnet", syncnetIdStr, "err", err)
- return false
- }
- // check if syncnetIdth bit is set in nodeSyncnets
- if nodeSyncnets.BitAt(uint64(syncnetId)) {
- return true
- }
- return false
-}
+// func (s *Sentinel) isPeerUsefulForAnySubnet(node *enode.Node) bool {
+// ret := false
+
+// nodeAttnets := bitfield.NewBitvector64()
+// nodeSyncnets := bitfield.NewBitvector4()
+// if err := node.Load(enr.WithEntry(s.cfg.NetworkConfig.AttSubnetKey, &nodeAttnets)); err != nil {
+// log.Trace("Could not load att subnet", "err", err)
+// return false
+// }
+// if err := node.Load(enr.WithEntry(s.cfg.NetworkConfig.SyncCommsSubnetKey, &nodeSyncnets)); err != nil {
+// log.Trace("Could not load sync subnet", "err", err)
+// return false
+// }
+
+// s.subManager.subscriptions.Range(func(key, value any) bool {
+// sub := value.(*GossipSubscription)
+// sub.lock.Lock()
+// defer sub.lock.Unlock()
+// if sub.sub == nil {
+// return true
+// }
+
+// if !sub.subscribed.Load() {
+// return true
+// }
+
+// if len(sub.topic.ListPeers()) > peerSubnetTarget {
+// return true
+// }
+// if gossip.IsTopicBeaconAttestation(sub.sub.Topic()) {
+// ret = s.isPeerUsefulForAttNet(sub, nodeAttnets)
+// return !ret
+// }
+
+// if gossip.IsTopicSyncCommittee(sub.sub.Topic()) {
+// ret = s.isPeerUsefulForSyncNet(sub, nodeSyncnets)
+// return !ret
+// }
+
+// return true
+// })
+// return ret
+// }
+
+// func (s *Sentinel) isPeerUsefulForAttNet(sub *GossipSubscription, nodeAttnets bitfield.Bitvector64) bool {
+// splitTopic := strings.Split(sub.sub.Topic(), "/")
+// if len(splitTopic) < 4 {
+// return false
+// }
+// subnetIdStr, found := strings.CutPrefix(splitTopic[3], "beacon_attestation_")
+// if !found {
+// return false
+// }
+// subnetId, err := strconv.Atoi(subnetIdStr)
+// if err != nil {
+// log.Warn("Could not parse subnet id", "subnet", subnetIdStr, "err", err)
+// return false
+// }
+// // check if subnetIdth bit is set in nodeAttnets
+// return nodeAttnets.BitAt(uint64(subnetId))
+
+// }
+
+// func (s *Sentinel) isPeerUsefulForSyncNet(sub *GossipSubscription, nodeSyncnets bitfield.Bitvector4) bool {
+// splitTopic := strings.Split(sub.sub.Topic(), "/")
+// if len(splitTopic) < 4 {
+// return false
+// }
+// syncnetIdStr, found := strings.CutPrefix(splitTopic[3], "sync_committee_")
+// if !found {
+// return false
+// }
+// syncnetId, err := strconv.Atoi(syncnetIdStr)
+// if err != nil {
+// log.Warn("Could not parse syncnet id", "syncnet", syncnetIdStr, "err", err)
+// return false
+// }
+// // check if syncnetIdth bit is set in nodeSyncnets
+// if nodeSyncnets.BitAt(uint64(syncnetId)) {
+// return true
+// }
+// return false
+// }
func (s *Sentinel) GetPeersCount() (active int, connected int, disconnected int) {
peers := s.host.Network().Peers()
diff --git a/cl/sentinel/sentinel_gossip_test.go b/cl/sentinel/sentinel_gossip_test.go
index 9773e956e34..c9890d2b837 100644
--- a/cl/sentinel/sentinel_gossip_test.go
+++ b/cl/sentinel/sentinel_gossip_test.go
@@ -63,6 +63,7 @@ func TestSentinelGossipOnHardFork(t *testing.T) {
IpAddr: listenAddrHost,
Port: 7070,
EnableBlocks: true,
+ MaxPeerCount: 9999999,
}, ethClock, reader, nil, db, log.New(), &mock_services.ForkChoiceStorageMock{})
require.NoError(t, err)
defer sentinel1.Stop()
@@ -77,6 +78,7 @@ func TestSentinelGossipOnHardFork(t *testing.T) {
Port: 7077,
EnableBlocks: true,
TCPPort: 9123,
+ MaxPeerCount: 9999999,
}, ethClock, reader, nil, db, log.New(), &mock_services.ForkChoiceStorageMock{})
require.NoError(t, err)
defer sentinel2.Stop()
diff --git a/cl/sentinel/sentinel_requests_test.go b/cl/sentinel/sentinel_requests_test.go
index c2eac7403c7..cedf3aec840 100644
--- a/cl/sentinel/sentinel_requests_test.go
+++ b/cl/sentinel/sentinel_requests_test.go
@@ -77,6 +77,7 @@ func TestSentinelBlocksByRange(t *testing.T) {
IpAddr: listenAddrHost,
Port: 7070,
EnableBlocks: true,
+ MaxPeerCount: 8883,
}, ethClock, reader, nil, db, log.New(), &mock_services.ForkChoiceStorageMock{})
require.NoError(t, err)
defer sentinel.Stop()
@@ -181,6 +182,7 @@ func TestSentinelBlocksByRoots(t *testing.T) {
IpAddr: listenAddrHost,
Port: 7070,
EnableBlocks: true,
+ MaxPeerCount: 8883,
}, ethClock, reader, nil, db, log.New(), &mock_services.ForkChoiceStorageMock{})
require.NoError(t, err)
defer sentinel.Stop()
@@ -290,6 +292,7 @@ func TestSentinelStatusRequest(t *testing.T) {
IpAddr: listenAddrHost,
Port: 7070,
EnableBlocks: true,
+ MaxPeerCount: 8883,
}, ethClock, reader, nil, db, log.New(), &mock_services.ForkChoiceStorageMock{})
require.NoError(t, err)
defer sentinel.Stop()
diff --git a/cl/sentinel/service/service.go b/cl/sentinel/service/service.go
index 1b629cd9be8..122c1e6a740 100644
--- a/cl/sentinel/service/service.go
+++ b/cl/sentinel/service/service.go
@@ -378,7 +378,7 @@ func (s *SentinelServer) handleGossipPacket(pkt *sentinel.GossipMessage) error {
topic := pkt.TopicName
// If we use snappy codec then decompress it accordingly.
if strings.Contains(topic, sentinel.SSZSnappyCodec) {
- data, err = utils.DecompressSnappy(data)
+ data, err = utils.DecompressSnappy(data, true)
if err != nil {
return err
}
diff --git a/cl/spectest/consensus_tests/fork_choice.go b/cl/spectest/consensus_tests/fork_choice.go
index e0cbe959e5c..f421c883bf2 100644
--- a/cl/spectest/consensus_tests/fork_choice.go
+++ b/cl/spectest/consensus_tests/fork_choice.go
@@ -35,7 +35,6 @@ import (
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/clparams/initial_state"
"github.com/erigontech/erigon/cl/cltypes/solid"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/persistence/blob_storage"
"github.com/erigontech/erigon/cl/phase1/forkchoice"
"github.com/erigontech/erigon/cl/phase1/forkchoice/fork_graph"
@@ -206,11 +205,10 @@ func (b *ForkChoice) Run(t *testing.T, root fs.FS, c spectest.TestCase) (err err
ethClock := eth_clock.NewEthereumClock(genesisState.GenesisTime(), genesisState.GenesisValidatorsRoot(), beaconConfig)
blobStorage := blob_storage.NewBlobStore(memdb.New("/tmp", kv.ChainDB), afero.NewMemMapFs(), math.MaxUint64, &clparams.MainnetBeaconConfig, ethClock)
- validatorMonitor := monitor.NewValidatorMonitor(false, nil, nil, nil)
forkStore, err := forkchoice.NewForkChoiceStore(
ethClock, anchorState, nil, pool.NewOperationsPool(&clparams.MainnetBeaconConfig),
fork_graph.NewForkGraphDisk(anchorState, nil, afero.NewMemMapFs(), beacon_router_configuration.RouterConfiguration{}, emitters),
- emitters, synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true), blobStorage, validatorMonitor, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
+ emitters, synced_data.NewSyncedDataManager(&clparams.MainnetBeaconConfig, true), blobStorage, public_keys_registry.NewInMemoryPublicKeysRegistry(), false)
require.NoError(t, err)
forkStore.SetSynced(true)
diff --git a/cl/spectest/consensus_tests/ssz_static.go b/cl/spectest/consensus_tests/ssz_static.go
index 9141f743435..440540bf743 100644
--- a/cl/spectest/consensus_tests/ssz_static.go
+++ b/cl/spectest/consensus_tests/ssz_static.go
@@ -69,7 +69,7 @@ func getSSZStaticConsensusTest[T unmarshalerMarshalerHashable](ref T) spectest.H
snappyEncoded, err := fs.ReadFile(fsroot, serializedFile)
require.NoError(t, err)
- encoded, err := utils.DecompressSnappy(snappyEncoded)
+ encoded, err := utils.DecompressSnappy(snappyEncoded, false)
require.NoError(t, err)
if err := object.DecodeSSZ(encoded, int(c.Version())); err != nil && !isBeaconState {
@@ -167,7 +167,7 @@ func sszStaticTestNewObjectByFunc[T unmarshalerMarshalerHashable](
// read ssz bytes and decode
snappyEncoded, err := fs.ReadFile(fsroot, serializedFile)
require.NoError(t, err)
- encoded, err := utils.DecompressSnappy(snappyEncoded)
+ encoded, err := utils.DecompressSnappy(snappyEncoded, false)
require.NoError(t, err)
if err := object.DecodeSSZ(encoded, int(c.Version())); err != nil {
return err
diff --git a/cl/ssz/ssz_test.go b/cl/ssz/ssz_test.go
index 69eacababd8..4015f940b6a 100644
--- a/cl/ssz/ssz_test.go
+++ b/cl/ssz/ssz_test.go
@@ -38,6 +38,6 @@ func TestEncodeDecode(t *testing.T) {
require.Equal(t, common.Hash(root), common.HexToHash("0x36eb1bb5b4616f9d5046b2a719a8c4217f3dc40c1b7dff7abcc55c47f142a78b"))
d, err := bs.EncodeSSZ(nil)
require.NoError(t, err)
- dec, _ := utils.DecompressSnappy(beaconState)
+ dec, _ := utils.DecompressSnappy(beaconState, true)
require.Equal(t, dec, d)
}
diff --git a/cl/utils/bytes.go b/cl/utils/bytes.go
index a452e667b9e..20cc5808729 100644
--- a/cl/utils/bytes.go
+++ b/cl/utils/bytes.go
@@ -22,6 +22,7 @@ import (
"math/bits"
"unsafe"
+ "github.com/c2h5oh/datasize"
"github.com/erigontech/erigon-lib/types/ssz"
"github.com/golang/snappy"
@@ -29,6 +30,8 @@ import (
var IsSysLittleEndian bool
+const maxDecodeLenAllowed = 15 * datasize.MB
+
func init() {
buf := [2]byte{}
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
@@ -63,12 +66,15 @@ func Uint64ToLE(i uint64) []byte {
return buf
}
-func DecompressSnappy(data []byte) ([]byte, error) {
+func DecompressSnappy(data []byte, lengthCheck bool) ([]byte, error) {
// Decode the snappy
lenDecoded, err := snappy.DecodedLen(data)
if err != nil {
return nil, err
}
+ if lengthCheck && lenDecoded > int(maxDecodeLenAllowed) {
+ return nil, errors.New("snappy: decoded length is too large")
+ }
decodedData := make([]byte, lenDecoded)
return snappy.Decode(decodedData, data)
diff --git a/cl/utils/bytes_test.go b/cl/utils/bytes_test.go
index e3d11c28ef3..4711381475f 100644
--- a/cl/utils/bytes_test.go
+++ b/cl/utils/bytes_test.go
@@ -42,7 +42,7 @@ func TestSSZSnappy(t *testing.T) {
func TestPlainSnappy(t *testing.T) {
msg := common.Hex2Bytes("10103849358111387348383738784374783811111754097864786873478675489485765483936576486387645456876772090909090ff")
sussyEncoded := utils.CompressSnappy(msg)
- sussyDecoded, err := utils.DecompressSnappy(sussyEncoded)
+ sussyDecoded, err := utils.DecompressSnappy(sussyEncoded, false)
require.NoError(t, err)
require.Equal(t, msg, sussyDecoded)
}
diff --git a/cmd/caplin/caplin1/run.go b/cmd/caplin/caplin1/run.go
index 20d128ec4ed..11ed2b8d6dc 100644
--- a/cmd/caplin/caplin1/run.go
+++ b/cmd/caplin/caplin1/run.go
@@ -40,7 +40,6 @@ import (
"github.com/erigontech/erigon/cl/beacon/synced_data"
"github.com/erigontech/erigon/cl/clparams/initial_state"
"github.com/erigontech/erigon/cl/cltypes"
- "github.com/erigontech/erigon/cl/monitor"
"github.com/erigontech/erigon/cl/rpc"
"github.com/erigontech/erigon/cl/sentinel"
"github.com/erigontech/erigon/cl/sentinel/service"
@@ -263,7 +262,6 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi
syncContributionPool := sync_contribution_pool.NewSyncContributionPool(beaconConfig)
emitters := beaconevents.NewEventEmitter()
aggregationPool := aggregation.NewAggregationPool(ctx, beaconConfig, networkConfig, ethClock)
- validatorMonitor := monitor.NewValidatorMonitor(config.EnableValidatorMonitor, ethClock, beaconConfig, syncedDataManager)
doLMDSampling := len(state.GetActiveValidatorsIndices(state.Slot()/beaconConfig.SlotsPerEpoch)) >= 20_000
// create the public keys registry
@@ -271,7 +269,7 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi
forkChoice, err := forkchoice.NewForkChoiceStore(
ethClock, state, engine, pool, fork_graph.NewForkGraphDisk(state, syncedDataManager, fcuFs, config.BeaconAPIRouter, emitters),
- emitters, syncedDataManager, blobStorage, validatorMonitor, pksRegistry, doLMDSampling)
+ emitters, syncedDataManager, blobStorage, pksRegistry, doLMDSampling)
if err != nil {
logger.Error("Could not create forkchoice", "err", err)
return err
@@ -437,7 +435,6 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi
blsToExecutionChangeService,
proposerSlashingService,
option.builderClient,
- validatorMonitor,
stateSnapshots,
true,
)
@@ -466,7 +463,6 @@ func RunCaplinService(ctx context.Context, engine execution_client.ExecutionEngi
emitters,
blobStorage,
attestationProducer,
- validatorMonitor,
)
sync := stages.ConsensusClStages(ctx, stageCfg)
diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go
index 077db74c2c5..8eaebe924c8 100644
--- a/cmd/devnet/services/polygon/heimdall.go
+++ b/cmd/devnet/services/polygon/heimdall.go
@@ -221,6 +221,10 @@ func (h *Heimdall) getSpanOverrideHeight() uint64 {
//MainChain: 8664000
}
+func (h *Heimdall) FetchStatus(ctx context.Context) (*heimdall.Status, error) {
+ return nil, errors.New("TODO")
+}
+
func (h *Heimdall) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) {
return nil, errors.New("TODO")
}
diff --git a/cmd/devnet/services/polygon/heimdallsim/heimdall_simulator.go b/cmd/devnet/services/polygon/heimdallsim/heimdall_simulator.go
index ce33ff35b0d..6be53c4c284 100644
--- a/cmd/devnet/services/polygon/heimdallsim/heimdall_simulator.go
+++ b/cmd/devnet/services/polygon/heimdallsim/heimdall_simulator.go
@@ -228,6 +228,10 @@ func (h *HeimdallSimulator) FetchStateSyncEvent(ctx context.Context, id uint64)
return nil, errors.New("method FetchStateSyncEvent not implemented")
}
+func (h *HeimdallSimulator) FetchStatus(ctx context.Context) (*heimdall.Status, error) {
+ return nil, errors.New("method FetchStatus not implemented")
+}
+
func (h *HeimdallSimulator) FetchCheckpoint(ctx context.Context, number int64) (*heimdall.Checkpoint, error) {
return nil, errors.New("method FetchCheckpoint not implemented")
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index f9cf999d42b..ebaebb1720d 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -844,12 +844,12 @@ var (
CaplinMaxInboundTrafficPerPeerFlag = cli.StringFlag{
Name: "caplin.max-inbound-traffic-per-peer",
Usage: "Max inbound traffic per second per peer",
- Value: "256KB",
+ Value: "1MB",
}
CaplinMaxOutboundTrafficPerPeerFlag = cli.StringFlag{
Name: "caplin.max-outbound-traffic-per-peer",
Usage: "Max outbound traffic per second per peer",
- Value: "256KB",
+ Value: "1MB",
}
CaplinAdaptableTrafficRequirementsFlag = cli.BoolFlag{
Name: "caplin.adaptable-maximum-traffic-requirements",
@@ -879,7 +879,7 @@ var (
CaplinMaxPeerCount = cli.Uint64Flag{
Name: "caplin.max-peer-count",
Usage: "Max number of peers to connect",
- Value: 80,
+ Value: 128,
}
SentinelAddrFlag = cli.StringFlag{
diff --git a/core/blockchain.go b/core/blockchain.go
index 7c2a174031c..2f6c995a01b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -516,7 +516,7 @@ func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHead
return nil
}
-func BlockPostValidation(gasUsed, blobGasUsed uint64, checkReceipts bool, receipts types.Receipts, h *types.Header, isMining bool) error {
+func BlockPostValidation(gasUsed, blobGasUsed uint64, checkReceipts bool, receipts types.Receipts, h *types.Header, isMining bool, txns types.Transactions, chainConfig *chain.Config, logger log.Logger) error {
if gasUsed != h.GasUsed {
return fmt.Errorf("gas used by execution: %d, in header: %d, headerNum=%d, %x",
gasUsed, h.GasUsed, h.Number.Uint64(), h.Hash())
@@ -536,6 +536,9 @@ func BlockPostValidation(gasUsed, blobGasUsed uint64, checkReceipts bool, receip
h.ReceiptHash = receiptHash
return nil
}
+ if dbg.LogHashMismatchReason() {
+ logReceipts(receipts, txns, chainConfig, h, logger)
+ }
return fmt.Errorf("receiptHash mismatch: %x != %x, headerNum=%d, %x",
receiptHash, h.ReceiptHash, h.Number.Uint64(), h.Hash())
}
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
index 83a6d30bed4..941681ea6a7 100644
--- a/core/forkid/forkid_test.go
+++ b/core/forkid/forkid_test.go
@@ -148,11 +148,13 @@ func TestCreation(t *testing.T) {
params.ChiadoGenesisHash,
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}},
- {4100418, 1684934215, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, // Last pre-Shanghai block
- {4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // First Shanghai block
- {8021277, 1706724930, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // Last Shanghai block
- {8021278, 1706724940, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 0}}, // First Cancun block
- {10000000, 1800000000, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 0}}, // Future Cancun block (mock)
+ {4100418, 1684934215, ID{Hash: checksumToBytes(0x50d39d7b), Next: 1684934220}}, // Last pre-Shanghai block
+ {4100419, 1684934220, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // First Shanghai block
+ {8021277, 1706724930, ID{Hash: checksumToBytes(0xa15a4252), Next: 1706724940}}, // Last Shanghai block
+ {8021278, 1706724940, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 1741254220}}, // First Cancun block
+ {14655798, 1741254215, ID{Hash: checksumToBytes(0x5fbc16bc), Next: 1741254220}}, // Last Cancun block (approx)
+ {14655799, 1741254220, ID{Hash: checksumToBytes(0x8ba51786), Next: 0}}, // First Prague block (approx)
+ {20000000, 1800000000, ID{Hash: checksumToBytes(0x8ba51786), Next: 0}}, // Future Prague block (mock)
},
},
// Amoy test cases
diff --git a/core/state_transition.go b/core/state_transition.go
index 7f90b44aa69..ae037cf36ca 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -585,6 +585,8 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*evmtype
SenderInitBalance: senderInitBalance,
CoinbaseInitBalance: coinbaseInitBalance,
FeeTipped: amount,
+ EvmRefund: st.state.GetRefund(),
+ EvmGasUsed: st.gasUsed(),
}
if st.evm.Context.PostApplyMessage != nil {
diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go
index a165b0109c5..bcd1726dc7e 100644
--- a/core/types/dynamic_fee_tx.go
+++ b/core/types/dynamic_fee_tx.go
@@ -49,7 +49,6 @@ func (tx *DynamicFeeTransaction) GetEffectiveGasTip(baseFee *uint256.Int) *uint2
}
gasFeeCap := tx.GetFeeCap()
// return 0 because effectiveFee cant be < 0
- // transaction max fee is below base fee
if gasFeeCap.Lt(baseFee) {
return uint256.NewInt(0)
}
diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go
index c4bd4e68867..b345bb99fe6 100644
--- a/core/vm/evmtypes/evmtypes.go
+++ b/core/vm/evmtypes/evmtypes.go
@@ -72,6 +72,8 @@ type ExecutionResult struct {
SenderInitBalance *uint256.Int
CoinbaseInitBalance *uint256.Int
FeeTipped *uint256.Int
+ EvmRefund uint64 // Gas refunded by EVM without considering refundQuotient
+ EvmGasUsed uint64 // Gas used by the execution of all instructions only
}
// Unwrap returns the internal evm error which allows us for further
diff --git a/erigon-lib/common/dbg/experiments.go b/erigon-lib/common/dbg/experiments.go
index 64daf3f6ea6..167a4db34dd 100644
--- a/erigon-lib/common/dbg/experiments.go
+++ b/erigon-lib/common/dbg/experiments.go
@@ -67,7 +67,6 @@ var (
CommitEachStage = EnvBool("COMMIT_EACH_STAGE", false)
CaplinSyncedDataMangerDeadlockDetection = EnvBool("CAPLIN_SYNCED_DATA_MANAGER_DEADLOCK_DETECTION", false)
- CaplinEfficientReorg = EnvBool("CAPLIN_EFFICIENT_REORG", true)
)
func ReadMemStats(m *runtime.MemStats) {
diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go
index c3892fbcb99..16f10cf1ee2 100644
--- a/eth/gasprice/feehistory.go
+++ b/eth/gasprice/feehistory.go
@@ -169,66 +169,42 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
// also returned if requested and available.
// Note: an error is only returned if retrieving the head header has failed. If there are no
// retrievable blocks in the specified range then zero block count is returned with no error.
-func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNumber, blocks, maxHistory int) (*types.Block, []*types.Receipt, uint64, int, error) {
+func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.BlockNumber, blocks, maxHistory int) (*types.Block, []*types.Receipt, uint64, int, error) {
var (
- headBlock *types.Header
+ headBlock rpc.BlockNumber
pendingBlock *types.Block
pendingReceipts types.Receipts
- err error
)
-
- // Get the chain's current head.
- if headBlock, err = oracle.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber); err != nil {
- return nil, nil, 0, 0, err
- }
- head := rpc.BlockNumber(headBlock.Number.Uint64())
- // Fail if request block is beyond the chain's current head.
- if head < reqEnd {
- return nil, nil, 0, 0, fmt.Errorf("%w: requested %d, head %d", ErrRequestBeyondHead, reqEnd, head)
- }
-
- // Resolve block tag.
- if reqEnd < 0 {
- var (
- resolved *types.Header
- err error
- )
- switch reqEnd {
- case rpc.PendingBlockNumber:
- if pendingBlock, pendingReceipts = oracle.backend.PendingBlockAndReceipts(); pendingBlock != nil {
- resolved = pendingBlock.Header()
- } else {
- // Pending block not supported by backend, process only until latest block.
- resolved = headBlock
-
- // Update total blocks to return to account for this.
- blocks--
+ // query either pending block or head header and set headBlock
+ if lastBlock == rpc.PendingBlockNumber {
+ if pendingBlock, pendingReceipts = oracle.backend.PendingBlockAndReceipts(); pendingBlock != nil {
+ lastBlock = rpc.BlockNumber(pendingBlock.NumberU64())
+ headBlock = lastBlock - 1
+ } else {
+ // pending block not supported by backend, process until latest block
+ lastBlock = rpc.LatestBlockNumber
+ blocks--
+ if blocks == 0 {
+ return nil, nil, 0, 0, nil
}
- case rpc.LatestBlockNumber:
- // Retrieved above.
- resolved = headBlock
- case rpc.SafeBlockNumber:
- resolved, err = oracle.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
- case rpc.FinalizedBlockNumber:
- resolved, err = oracle.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
- case rpc.EarliestBlockNumber:
- resolved, err = oracle.backend.HeaderByNumber(ctx, rpc.EarliestBlockNumber)
}
- if resolved == nil || err != nil {
+ }
+ if pendingBlock == nil {
+ // if pending block is not fetched then we retrieve the head header to get the head block number
+ if latestHeader, err := oracle.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber); err == nil {
+ headBlock = rpc.BlockNumber(latestHeader.Number.Uint64())
+ } else {
return nil, nil, 0, 0, err
}
- // Absolute number resolved.
- reqEnd = rpc.BlockNumber(resolved.Number.Uint64())
}
-
- // If there are no blocks to return, short circuit.
- if blocks == 0 {
- return nil, nil, 0, 0, nil
+ if lastBlock == rpc.LatestBlockNumber {
+ lastBlock = headBlock
+ } else if pendingBlock == nil && lastBlock > headBlock {
+ return nil, nil, 0, 0, fmt.Errorf("%w: requested %d, head %d", ErrRequestBeyondHead, lastBlock, headBlock)
}
-
if maxHistory != 0 {
// limit retrieval to the given number of latest blocks
- if tooOldCount := int64(headBlock.Number.Uint64()) - int64(maxHistory) - int64(reqEnd) + int64(blocks); tooOldCount > 0 {
+ if tooOldCount := int64(headBlock) - int64(maxHistory) - int64(lastBlock) + int64(blocks); tooOldCount > 0 {
// tooOldCount is the number of requested blocks that are too old to be served
if int64(blocks) > tooOldCount {
blocks -= int(tooOldCount)
@@ -238,10 +214,10 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
}
}
// ensure not trying to retrieve before genesis
- if rpc.BlockNumber(blocks) > reqEnd+1 {
- blocks = int(reqEnd + 1)
+ if rpc.BlockNumber(blocks) > lastBlock+1 {
+ blocks = int(lastBlock + 1)
}
- return pendingBlock, pendingReceipts, uint64(reqEnd), blocks, nil
+ return pendingBlock, pendingReceipts, uint64(lastBlock), blocks, nil
}
// FeeHistory returns data relevant for fee estimation based on the specified range of blocks.
diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go
index 4bd3c819f27..150e61c8b18 100644
--- a/eth/stagedsync/exec3.go
+++ b/eth/stagedsync/exec3.go
@@ -572,7 +572,7 @@ Loop:
txTask.Config = cfg.genesis.Config
}
- if txTask.TxNum <= txNumInDB && txTask.TxNum > 0 {
+ if txTask.TxNum <= txNumInDB && txTask.TxNum > 0 && !cfg.blockProduction {
inputTxNum++
skipPostEvaluation = true
continue
diff --git a/eth/stagedsync/exec3_serial.go b/eth/stagedsync/exec3_serial.go
index 9fc62f3d96c..074810c062b 100644
--- a/eth/stagedsync/exec3_serial.go
+++ b/eth/stagedsync/exec3_serial.go
@@ -69,7 +69,7 @@ func (se *serialExecutor) execute(ctx context.Context, tasks []*state.TxTask) (c
}
checkReceipts := !se.cfg.vmConfig.StatelessExec && se.cfg.chainConfig.IsByzantium(txTask.BlockNum) && !se.cfg.vmConfig.NoReceipts && !se.isMining
if txTask.BlockNum > 0 && !se.skipPostEvaluation { //Disable check for genesis. Maybe need somehow improve it in future - to satisfy TestExecutionSpec
- if err := core.BlockPostValidation(se.usedGas, se.blobGasUsed, checkReceipts, txTask.BlockReceipts, txTask.Header, se.isMining); err != nil {
+ if err := core.BlockPostValidation(se.usedGas, se.blobGasUsed, checkReceipts, txTask.BlockReceipts, txTask.Header, se.isMining, txTask.Txs, se.cfg.chainConfig, se.logger); err != nil {
return fmt.Errorf("%w, txnIdx=%d, %v", consensus.ErrInvalidBlock, txTask.TxIndex, err) //same as in stage_exec.go
}
}
diff --git a/go.mod b/go.mod
index 8411cefddae..b3a6222e989 100644
--- a/go.mod
+++ b/go.mod
@@ -23,7 +23,7 @@ require (
require (
gfx.cafe/util/go/generic v0.0.0-20230721185457-c559e86c829c
github.com/99designs/gqlgen v0.17.63
- github.com/Giulio2002/bls v0.0.0-20241116091023-2ddcc8954ec0
+ github.com/Giulio2002/bls v0.0.0-20250218151206-daa74641714d
github.com/Masterminds/sprig/v3 v3.2.3
github.com/RoaringBitmap/roaring/v2 v2.4.3
github.com/alecthomas/kong v0.8.1
diff --git a/go.sum b/go.sum
index ca58c19173b..3eaa30aa91c 100644
--- a/go.sum
+++ b/go.sum
@@ -55,8 +55,8 @@ github.com/AskAlexSharov/bloomfilter/v2 v2.0.9 h1:BuZqNjRlYmcXJIsI7nrIkejYMz9mgF
github.com/AskAlexSharov/bloomfilter/v2 v2.0.9/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Giulio2002/bls v0.0.0-20241116091023-2ddcc8954ec0 h1:6DVEDL29nd7f2GoHZIA9rjpW90gYeNE3x5aUadOgTB4=
-github.com/Giulio2002/bls v0.0.0-20241116091023-2ddcc8954ec0/go.mod h1:k6OaCwpn4WGfzPgoXuEiWaV1BKXW+GjSkIz1mCA4jFU=
+github.com/Giulio2002/bls v0.0.0-20250218151206-daa74641714d h1:OZwEfxKMk510XJnpOJXiP50mc9aPEPhHuwjpGG95JQs=
+github.com/Giulio2002/bls v0.0.0-20250218151206-daa74641714d/go.mod h1:k6OaCwpn4WGfzPgoXuEiWaV1BKXW+GjSkIz1mCA4jFU=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
diff --git a/params/chainspecs/chiado.json b/params/chainspecs/chiado.json
index c109ac65466..ad51fc2a52f 100644
--- a/params/chainspecs/chiado.json
+++ b/params/chainspecs/chiado.json
@@ -15,6 +15,7 @@
"terminalTotalDifficultyPassed": true,
"shanghaiTime": 1684934220,
"cancunTime": 1706724940,
+ "pragueTime": 1741254220,
"minBlobGasPrice": 1000000000,
"blobSchedule": {
"cancun": {
diff --git a/params/version.go b/params/version.go
index 538f561411b..e6cc5da72ee 100644
--- a/params/version.go
+++ b/params/version.go
@@ -34,10 +34,10 @@ var (
// see https://calver.org
const (
- VersionMajor = 1 // Major version component of the current release
- VersionMinor = 3 // Minor version component of the current release
- VersionMicro = 3 // Patch version component of the current release
- VersionModifier = "beta2" // Modifier component of the current release
+ VersionMajor = 1 // Major version component of the current release
+ VersionMinor = 3 // Minor version component of the current release
+ VersionMicro = 3 // Patch version component of the current release
+ VersionModifier = "rc1" // Modifier component of the current release
VersionKeyCreated = "ErigonVersionCreated"
VersionKeyFinished = "ErigonVersionFinished"
ClientName = "erigon"
diff --git a/polygon/bor/bor_test.go b/polygon/bor/bor_test.go
index ac8221ac6fc..68787bf189d 100644
--- a/polygon/bor/bor_test.go
+++ b/polygon/bor/bor_test.go
@@ -79,6 +79,10 @@ func (h *test_heimdall) FetchStateSyncEvent(ctx context.Context, id uint64) (*he
return nil, nil
}
+func (h *test_heimdall) FetchStatus(ctx context.Context) (*heimdall.Status, error) {
+ return nil, nil
+}
+
func (h *test_heimdall) FetchSpan(ctx context.Context, spanID uint64) (*heimdall.Span, error) {
if span, ok := h.spans[heimdall.SpanId(spanID)]; ok {
diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go
index 1c68e412e7d..bc71f8b6b8c 100644
--- a/polygon/heimdall/client.go
+++ b/polygon/heimdall/client.go
@@ -30,6 +30,8 @@ type Client interface {
FetchSpan(ctx context.Context, spanID uint64) (*Span, error)
FetchSpans(ctx context.Context, page uint64, limit uint64) ([]*Span, error)
+ FetchStatus(ctx context.Context) (*Status, error)
+
FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint, error)
FetchCheckpointCount(ctx context.Context) (int64, error)
FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error)
diff --git a/polygon/heimdall/client_http.go b/polygon/heimdall/client_http.go
index 852c83519f4..877a80a7187 100644
--- a/polygon/heimdall/client_http.go
+++ b/polygon/heimdall/client_http.go
@@ -125,6 +125,8 @@ const (
fetchStateSyncEventsPath = "clerk/event-record/list"
fetchStateSyncEvent = "clerk/event-record/%s"
+ fetchStatus = "/status"
+
fetchCheckpoint = "/checkpoints/%s"
fetchCheckpointCount = "/checkpoints/count"
fetchCheckpointList = "/checkpoints/list"
@@ -349,6 +351,22 @@ func (c *HttpClient) FetchMilestone(ctx context.Context, number int64) (*Milesto
return &response.Result, nil
}
+func (c *HttpClient) FetchStatus(ctx context.Context) (*Status, error) {
+ url, err := statusURL(c.urlString)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx = withRequestType(ctx, statusRequest)
+
+ response, err := FetchWithRetry[StatusResponse](ctx, c, url, c.logger)
+ if err != nil {
+ return nil, err
+ }
+
+ return &response.Result, nil
+}
+
// FetchCheckpointCount fetches the checkpoint count from heimdall
func (c *HttpClient) FetchCheckpointCount(ctx context.Context) (int64, error) {
url, err := checkpointCountURL(c.urlString)
@@ -587,6 +605,10 @@ func checkpointCountURL(urlString string) (*url.URL, error) {
return makeURL(urlString, fetchCheckpointCount, "")
}
+func statusURL(urlString string) (*url.URL, error) {
+ return makeURL(urlString, fetchStatus, "")
+}
+
func checkpointListURL(urlString string, page uint64, limit uint64) (*url.URL, error) {
return makeURL(urlString, fetchCheckpointList, fmt.Sprintf(fetchCheckpointListQueryFormat, page, limit))
}
diff --git a/polygon/heimdall/client_mock.go b/polygon/heimdall/client_mock.go
index 7710eff7509..a8f3baf9089 100644
--- a/polygon/heimdall/client_mock.go
+++ b/polygon/heimdall/client_mock.go
@@ -620,3 +620,42 @@ func (c *MockClientFetchStateSyncEventsCall) DoAndReturn(f func(context.Context,
c.Call = c.Call.DoAndReturn(f)
return c
}
+
+// FetchStatus mocks base method.
+func (m *MockClient) FetchStatus(ctx context.Context) (*Status, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "FetchStatus", ctx)
+ ret0, _ := ret[0].(*Status)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FetchStatus indicates an expected call of FetchStatus.
+func (mr *MockClientMockRecorder) FetchStatus(ctx any) *MockClientFetchStatusCall {
+ mr.mock.ctrl.T.Helper()
+ call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchStatus", reflect.TypeOf((*MockClient)(nil).FetchStatus), ctx)
+ return &MockClientFetchStatusCall{Call: call}
+}
+
+// MockClientFetchStatusCall wrap *gomock.Call
+type MockClientFetchStatusCall struct {
+ *gomock.Call
+}
+
+// Return rewrite *gomock.Call.Return
+func (c *MockClientFetchStatusCall) Return(arg0 *Status, arg1 error) *MockClientFetchStatusCall {
+ c.Call = c.Call.Return(arg0, arg1)
+ return c
+}
+
+// Do rewrite *gomock.Call.Do
+func (c *MockClientFetchStatusCall) Do(f func(context.Context) (*Status, error)) *MockClientFetchStatusCall {
+ c.Call = c.Call.Do(f)
+ return c
+}
+
+// DoAndReturn rewrite *gomock.Call.DoAndReturn
+func (c *MockClientFetchStatusCall) DoAndReturn(f func(context.Context) (*Status, error)) *MockClientFetchStatusCall {
+ c.Call = c.Call.DoAndReturn(f)
+ return c
+}
diff --git a/polygon/heimdall/metrics.go b/polygon/heimdall/metrics.go
index 2fb58a3fcc6..7bfc2f9e4b0 100644
--- a/polygon/heimdall/metrics.go
+++ b/polygon/heimdall/metrics.go
@@ -34,6 +34,7 @@ type (
)
const (
+ statusRequest requestType = "status"
stateSyncRequest requestType = "state-sync"
spanRequest requestType = "span"
checkpointRequest requestType = "checkpoint"
diff --git a/polygon/heimdall/service.go b/polygon/heimdall/service.go
index dd60dc69bbf..4690e69c041 100644
--- a/polygon/heimdall/service.go
+++ b/polygon/heimdall/service.go
@@ -32,6 +32,10 @@ import (
"github.com/erigontech/erigon/polygon/bor/valset"
)
+const (
+ isCatchingDelaySec = 600
+)
+
type ServiceConfig struct {
Store Store
BorConfig *borcfg.BorConfig
@@ -47,6 +51,7 @@ type Service struct {
milestoneScraper *Scraper[*Milestone]
spanScraper *Scraper[*Span]
spanBlockProducersTracker *spanBlockProducersTracker
+ client Client
ready ready
}
@@ -100,6 +105,7 @@ func NewService(config ServiceConfig) *Service {
milestoneScraper: milestoneScraper,
spanScraper: spanScraper,
spanBlockProducersTracker: newSpanBlockProducersTracker(logger, borConfig, store.SpanBlockProducerSelections()),
+ client: client,
}
}
@@ -397,3 +403,25 @@ func (s *Service) replayUntrackedSpans(ctx context.Context) error {
return nil
}
+
+func (s *Service) IsCatchingUp(ctx context.Context) (bool, error) {
+ status, err := s.client.FetchStatus(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ if status.CatchingUp {
+ return true, nil
+ }
+
+ parsed, err := time.Parse(time.RFC3339, status.LatestBlockTime)
+ if err != nil {
+ return false, err
+ }
+
+ if parsed.Unix() < time.Now().Unix()-isCatchingDelaySec {
+ return true, nil
+ }
+
+ return false, nil
+}
diff --git a/polygon/heimdall/service_test.go b/polygon/heimdall/service_test.go
index 9ac28693325..8ed65d6fd1d 100644
--- a/polygon/heimdall/service_test.go
+++ b/polygon/heimdall/service_test.go
@@ -489,3 +489,47 @@ type difficultiesKV struct {
Signer common.Address
Difficulty uint64
}
+
+func TestIsCatchingUp(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ mockClient := NewMockClient(ctrl)
+
+ s := Service{
+ client: mockClient,
+ }
+
+ mockClient.EXPECT().
+ FetchStatus(gomock.Any()).
+ DoAndReturn(func(ctx context.Context) (*Status, error) {
+ return &Status{
+ LatestBlockTime: "",
+ CatchingUp: true,
+ }, nil
+ })
+
+ isCatchingUp, err := s.IsCatchingUp(context.TODO())
+ require.NoError(t, err)
+ require.True(t, isCatchingUp)
+}
+
+func TestIsCatchingUpLateBlock(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ mockClient := NewMockClient(ctrl)
+
+ s := Service{
+ client: mockClient,
+ }
+
+ mockClient.EXPECT().
+ FetchStatus(gomock.Any()).
+ DoAndReturn(func(ctx context.Context) (*Status, error) {
+ return &Status{
+ LatestBlockTime: "2025-02-14T11:45:00.764588Z",
+ CatchingUp: false,
+ }, nil
+ })
+
+ isCatchingUp, err := s.IsCatchingUp(context.TODO())
+ require.NoError(t, err)
+ require.True(t, isCatchingUp)
+}
diff --git a/polygon/heimdall/snapshot_store.go b/polygon/heimdall/snapshot_store.go
index 5c752857fef..6fdb3d4f365 100644
--- a/polygon/heimdall/snapshot_store.go
+++ b/polygon/heimdall/snapshot_store.go
@@ -189,6 +189,10 @@ func (s *SpanSnapshotStore) LastEntityId(ctx context.Context) (uint64, bool, err
return lastId, ok, err
}
+func (s *SpanSnapshotStore) LastEntity(ctx context.Context) (*Span, bool, error) {
+ return snapshotStoreLastEntity(ctx, s)
+}
+
func (s *SpanSnapshotStore) ValidateSnapshots(logger log.Logger, failFast bool) error {
return validateSnapshots(logger, failFast, s.snapshots, s.SnapType(), generics.New[Span])
}
@@ -312,6 +316,10 @@ func (s *MilestoneSnapshotStore) Entity(ctx context.Context, id uint64) (*Milest
return nil, false, fmt.Errorf("%w: %w", ErrMilestoneNotFound, err)
}
+func (s *MilestoneSnapshotStore) LastEntity(ctx context.Context) (*Milestone, bool, error) {
+ return snapshotStoreLastEntity(ctx, s)
+}
+
func (s *MilestoneSnapshotStore) ValidateSnapshots(logger log.Logger, failFast bool) error {
return validateSnapshots(logger, failFast, s.snapshots, s.SnapType(), generics.New[Milestone])
}
@@ -345,7 +353,7 @@ func (s *CheckpointSnapshotStore) WithTx(tx kv.Tx) EntityStore[*Checkpoint] {
return &CheckpointSnapshotStore{txEntityStore[*Checkpoint]{s.EntityStore.(*mdbxEntityStore[*Checkpoint]), tx}, s.snapshots}
}
-func (s *CheckpointSnapshotStore) LastCheckpointId(ctx context.Context, tx kv.Tx) (uint64, bool, error) {
+func (s *CheckpointSnapshotStore) LastEntityId(ctx context.Context) (uint64, bool, error) {
lastId, ok, err := s.EntityStore.LastEntityId(ctx)
snapshotLastCheckpointId := s.LastFrozenEntityId()
@@ -425,6 +433,10 @@ func (s *CheckpointSnapshotStore) LastFrozenEntityId() uint64 {
return index.BaseDataID() + index.KeyCount() - 1
}
+func (s *CheckpointSnapshotStore) LastEntity(ctx context.Context) (*Checkpoint, bool, error) {
+ return snapshotStoreLastEntity(ctx, s)
+}
+
func (s *CheckpointSnapshotStore) ValidateSnapshots(logger log.Logger, failFast bool) error {
return validateSnapshots(logger, failFast, s.snapshots, s.SnapType(), generics.New[Checkpoint])
}
@@ -482,3 +494,12 @@ func validateSnapshots[T Entity](logger log.Logger, failFast bool, snaps *RoSnap
return accumulatedErr
}
+
+func snapshotStoreLastEntity[T Entity](ctx context.Context, store EntityStore[T]) (T, bool, error) {
+ entityId, ok, err := store.LastEntityId(ctx)
+ if err != nil || !ok {
+ return generics.Zero[T](), false, err
+ }
+
+ return store.Entity(ctx, entityId)
+}
diff --git a/polygon/heimdall/status.go b/polygon/heimdall/status.go
new file mode 100644
index 00000000000..6e63fb5abca
--- /dev/null
+++ b/polygon/heimdall/status.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package heimdall
+
+type Status struct {
+ LatestBlockHash string `json:"latest_block_hash"`
+ LatestAppHash string `json:"latest_app_hash"`
+ LatestBlockHeight string `json:"latest_block_height"`
+ LatestBlockTime string `json:"latest_block_time"`
+ CatchingUp bool `json:"catching_up"`
+}
+
+type StatusResponse struct {
+ Height string `json:"height"`
+ Result Status `json:"result"`
+}
diff --git a/polygon/sync/sync.go b/polygon/sync/sync.go
index 5ac8ba471b2..93b67b948e9 100644
--- a/polygon/sync/sync.go
+++ b/polygon/sync/sync.go
@@ -25,6 +25,7 @@ import (
"github.com/hashicorp/golang-lru/v2/simplelru"
"github.com/erigontech/erigon-lib/common"
+ libcommon "github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/log/v3"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/eth/ethconfig"
@@ -34,6 +35,7 @@ import (
)
type heimdallSynchronizer interface {
+ IsCatchingUp(ctx context.Context) (bool, error)
SynchronizeCheckpoints(ctx context.Context) (latest *heimdall.Checkpoint, err error)
SynchronizeMilestones(ctx context.Context) (latest *heimdall.Milestone, err error)
SynchronizeSpans(ctx context.Context, blockNum uint64) error
@@ -668,6 +670,26 @@ func (s *Sync) Run(ctx context.Context) error {
s.logger.Info(syncLogPrefix("running sync component"))
+ for {
+ // we have to check if the heimdall we are connected to is synchonised with the chain
+ // to prevent getting empty list of checkpoints/milestones during the sync
+
+ catchingUp, err := s.heimdallSync.IsCatchingUp(ctx)
+ if err != nil {
+ return err
+ }
+
+ if !catchingUp {
+ break
+ }
+
+ s.logger.Warn(syncLogPrefix("your heimdalld process is behind, please check its logs and :1317/status api"))
+
+ if err := libcommon.Sleep(ctx, 30*time.Second); err != nil {
+ return err
+ }
+ }
+
result, err := s.syncToTip(ctx)
if err != nil {
return err
diff --git a/spectest/util.go b/spectest/util.go
index b09e7235163..6ca95323450 100644
--- a/spectest/util.go
+++ b/spectest/util.go
@@ -129,7 +129,7 @@ func ReadBlockSlot(root fs.FS, index int) (uint64, error) {
return 0, err
}
- blockBytes, err = utils.DecompressSnappy(blockBytes)
+ blockBytes, err = utils.DecompressSnappy(blockBytes, false)
if err != nil {
return 0, err
}
diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go
index db0b51e32a1..d1a4cccb90e 100644
--- a/turbo/app/snapshots_cmd.go
+++ b/turbo/app/snapshots_cmd.go
@@ -1418,10 +1418,12 @@ func doRetireCommand(cliCtx *cli.Context, dirs datadir.Dirs) error {
if ok {
from, to, every = from2, to2, to2-from2
}
+ } else {
+ forwardProgress = to
}
logger.Info("Params", "from", from, "to", to, "every", every)
- if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil, nil); err != nil {
+ if err := br.RetireBlocks(ctx, from, forwardProgress, log.LvlInfo, nil, nil, nil); err != nil {
return err
}
diff --git a/turbo/jsonrpc/eth_accounts.go b/turbo/jsonrpc/eth_accounts.go
index 7d1fc20f2bf..5fbeaf86251 100644
--- a/turbo/jsonrpc/eth_accounts.go
+++ b/turbo/jsonrpc/eth_accounts.go
@@ -122,9 +122,6 @@ func (api *APIImpl) GetCode(ctx context.Context, address libcommon.Address, bloc
func (api *APIImpl) GetStorageAt(ctx context.Context, address libcommon.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) {
var empty []byte
indexBytes := hexutility.FromHex(index)
- if len(indexBytes) < 32 {
- return "", errors.New("unable to decode storage key: hex string invalid")
- }
if len(indexBytes) > 32 {
return "", errors.New("unable to decode storage key: hex string too long, want at most 32 bytes")
}
diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go
index 11215a9d56e..2e45417a4b1 100644
--- a/turbo/jsonrpc/eth_call.go
+++ b/turbo/jsonrpc/eth_call.go
@@ -49,7 +49,6 @@ import (
"github.com/erigontech/erigon/core/state"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/core/vm"
- "github.com/erigontech/erigon/core/vm/evmtypes"
"github.com/erigontech/erigon/eth/stagedsync"
"github.com/erigontech/erigon/eth/tracers/logger"
"github.com/erigontech/erigon/params"
@@ -151,76 +150,63 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
}
defer dbtx.Rollback()
+ // Use latest block by default
+ if blockNrOrHash == nil {
+ blockNrOrHash = &latestNumOrHash
+ }
+
chainConfig, err := api.chainConfig(ctx, dbtx)
if err != nil {
return 0, err
}
engine := api.engine()
- latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(ctx, latestNumOrHash, dbtx, api._blockReader, api.filters) // DoCall cannot be executed on non-canonical blocks
+ blockNum, blockHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(ctx, *blockNrOrHash, dbtx, api._blockReader, api.filters) // DoCall cannot be executed on non-canonical blocks
if err != nil {
return 0, err
}
// try and get the block from the lru cache first then try DB before failing
- block := api.tryBlockFromLru(latestCanHash)
+ block := api.tryBlockFromLru(blockHash)
if block == nil {
- block, err = api.blockWithSenders(ctx, dbtx, latestCanHash, latestCanBlockNumber)
+ block, err = api.blockWithSenders(ctx, dbtx, blockHash, blockNum)
if err != nil {
return 0, err
}
}
+
if block == nil {
- return 0, errors.New("could not find latest block in cache or db")
+ return 0, errors.New(fmt.Sprintf("could not find the block %s in cache or db", blockNrOrHash.String()))
}
+ header := block.HeaderNoCopy()
txNumsReader := rawdbv3.TxNums.WithCustomReadTxNumFunc(freezeblocks.ReadTxNumFuncFromBlockReader(ctx, api._blockReader))
- stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, txNumsReader, latestCanBlockNumber, isLatest, 0, api.stateCache, chainConfig.ChainName)
+ stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, txNumsReader, blockNum, isLatest, 0, api.stateCache, chainConfig.ChainName)
if err != nil {
return 0, err
}
// Binary search the gas requirement, as it may be higher than the amount used
var (
- lo = params.TxGas - 1
- hi uint64
- gasCap uint64
+ lo uint64
+ hi uint64
)
// Use zero address if sender unspecified.
if args.From == nil {
args.From = new(libcommon.Address)
}
- bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
- if blockNrOrHash != nil {
- bNrOrHash = *blockNrOrHash
- }
-
// Determine the highest gas limit can be used during the estimation.
if args.Gas != nil && uint64(*args.Gas) >= params.TxGas {
hi = uint64(*args.Gas)
} else {
// Retrieve the block to act as the gas ceiling
- h, err := headerByNumberOrHash(ctx, dbtx, bNrOrHash, api)
- if err != nil {
- return 0, err
- }
- if h == nil {
- // if a block number was supplied and there is no header return 0
- if blockNrOrHash != nil {
- return 0, nil
- }
-
- // block number not supplied, so we haven't found a pending block, read the latest block instead
- h, err = headerByNumberOrHash(ctx, dbtx, latestNumOrHash, api)
- if err != nil {
- return 0, err
- }
- if h == nil {
- return 0, nil
- }
- }
- hi = h.GasLimit
+ hi = header.GasLimit
+ }
+ // Recap the highest gas allowance with specified gascap.
+ if hi > api.GasCap {
+ log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", api.GasCap)
+ hi = api.GasCap
}
var feeCap *big.Int
@@ -265,68 +251,49 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
}
}
- // Recap the highest gas allowance with specified gascap.
- if hi > api.GasCap {
- log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", api.GasCap)
- hi = api.GasCap
- }
- gasCap = hi
-
- header := block.HeaderNoCopy()
-
- caller, err := transactions.NewReusableCaller(engine, stateReader, overrides, header, args, api.GasCap, latestNumOrHash, dbtx, api._blockReader, chainConfig, api.evmCallTimeout)
+ caller, err := transactions.NewReusableCaller(engine, stateReader, overrides, header, args, api.GasCap, *blockNrOrHash, dbtx, api._blockReader, chainConfig, api.evmCallTimeout)
if err != nil {
return 0, err
}
- // Create a helper to check if a gas allowance results in an executable transaction
- executable := func(gas uint64) (bool, *evmtypes.ExecutionResult, error) {
- result, err := caller.DoCallWithNewGas(ctx, gas)
- if err != nil {
- if errors.Is(err, core.ErrIntrinsicGas) {
- // Special case, raise gas limit
- return true, nil, nil
+ // First try with highest gas possible
+ result, err := caller.DoCallWithNewGas(ctx, hi, engine, overrides)
+ if err != nil || result == nil {
+ return 0, err
+ }
+ if result.Failed() {
+ if !errors.Is(result.Err, vm.ErrOutOfGas) {
+ if len(result.Revert()) > 0 {
+ return 0, ethapi2.NewRevertError(result)
}
-
- // Bail out
- return true, nil, err
+ return 0, result.Err
}
- return result.Failed(), result, nil
+ // Otherwise, the specified gas cap is too low
+ return 0, fmt.Errorf("gas required exceeds allowance (%d)", hi)
}
+ // Assuming a contract can freely run all the instructions, we have
+ // the true amount of gas it wants to consume to execute fully.
+ // We want to ensure that the gas used doesn't fall below this
+ trueGas := result.EvmGasUsed // Must not fall below this
+ lo = min(trueGas+result.EvmRefund-1, params.TxGas-1)
+ i := 0
// Execute the binary search and hone in on an executable gas limit
for lo+1 < hi {
mid := (hi + lo) / 2
- failed, _, err := executable(mid)
+ result, err := caller.DoCallWithNewGas(ctx, mid, engine, overrides)
// If the error is not nil(consensus error), it means the provided message
// call or transaction will never be accepted no matter how much gas it is
// assigened. Return the error directly, don't struggle any more.
if err != nil {
return 0, err
}
- if failed {
+ if result.Failed() || result.EvmGasUsed < trueGas {
lo = mid
} else {
hi = mid
}
- }
-
- // Reject the transaction as invalid if it still fails at the highest allowance
- if hi == gasCap {
- failed, result, err := executable(hi)
- if err != nil {
- return 0, err
- }
- if failed {
- if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) {
- if len(result.Revert()) > 0 {
- return 0, ethapi2.NewRevertError(result)
- }
- return 0, result.Err
- }
- // Otherwise, the specified gas cap is too low
- return 0, fmt.Errorf("gas required exceeds allowance (%d)", gasCap)
- }
+ i++
}
return hexutil.Uint64(hi), nil
}
diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go
index 6c74e575ed3..3922d03fc17 100644
--- a/turbo/jsonrpc/eth_receipts.go
+++ b/turbo/jsonrpc/eth_receipts.go
@@ -22,6 +22,7 @@ import (
"fmt"
"github.com/RoaringBitmap/roaring/v2"
+
"github.com/erigontech/erigon-lib/chain"
"github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/kv"
@@ -295,6 +296,35 @@ func (api *BaseAPI) getLogsV3(ctx context.Context, tx kv.TemporalTx, begin, end
return nil, err
}
if isFinalTxn {
+ if chainConfig.Bor != nil {
+ // check for state sync event logs
+ events, err := api.stateSyncEvents(ctx, tx, header.Hash(), blockNum, chainConfig)
+ if err != nil {
+ return logs, err
+ }
+
+ borLogs, err := api.borReceiptGenerator.GenerateBorLogs(ctx, events, txNumsReader, tx, header, chainConfig, txIndex, len(logs))
+ if err != nil {
+ return logs, err
+ }
+
+ borLogs = borLogs.Filter(addrMap, crit.Topics, 0)
+ for _, filteredLog := range borLogs {
+ logs = append(logs, &types.ErigonLog{
+ Address: filteredLog.Address,
+ Topics: filteredLog.Topics,
+ Data: filteredLog.Data,
+ BlockNumber: filteredLog.BlockNumber,
+ TxHash: filteredLog.TxHash,
+ TxIndex: filteredLog.TxIndex,
+ BlockHash: filteredLog.BlockHash,
+ Index: filteredLog.Index,
+ Removed: filteredLog.Removed,
+ Timestamp: header.Time,
+ })
+ }
+ }
+
continue
}
diff --git a/turbo/jsonrpc/receipts/bor_receipts_generator.go b/turbo/jsonrpc/receipts/bor_receipts_generator.go
index 0a0ba66a458..b6389546a94 100644
--- a/turbo/jsonrpc/receipts/bor_receipts_generator.go
+++ b/turbo/jsonrpc/receipts/bor_receipts_generator.go
@@ -68,7 +68,7 @@ func (g *BorGenerator) GenerateBorReceipt(ctx context.Context, tx kv.TemporalTx,
gp := new(core.GasPool).AddGas(msgs[0].Gas() * uint64(len(msgs))).AddBlobGas(msgs[0].BlobGas() * uint64(len(msgs)))
evm := vm.NewEVM(blockContext, evmtypes.TxContext{}, ibs, chainConfig, vm.Config{})
- receipt, err := applyBorTransaction(msgs, evm, gp, ibs, block, cumGasUsedInLastBlock)
+ receipt, err := applyBorTransaction(msgs, evm, gp, ibs, block, cumGasUsedInLastBlock, 0)
if err != nil {
return nil, err
}
@@ -77,7 +77,19 @@ func (g *BorGenerator) GenerateBorReceipt(ctx context.Context, tx kv.TemporalTx,
return receipt, nil
}
-func applyBorTransaction(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state.IntraBlockState, block *types.Block, cumulativeGasUsed uint64) (*types.Receipt, error) {
+func (g *BorGenerator) GenerateBorLogs(ctx context.Context, msgs []*types.Message, txNumsReader rawdbv3.TxNumsReader, tx kv.TemporalTx, header *types.Header, chainConfig *chain.Config, txIndex, logIndex int) (types.Logs, error) {
+ ibs, blockContext, _, _, _, err := transactions.ComputeBlockContext(ctx, g.engine, header, chainConfig, g.blockReader, txNumsReader, tx, txIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ gp := new(core.GasPool).AddGas(msgs[0].Gas() * uint64(len(msgs))).AddBlobGas(msgs[0].BlobGas() * uint64(len(msgs)))
+ evm := vm.NewEVM(blockContext, evmtypes.TxContext{}, ibs, chainConfig, vm.Config{})
+
+ return getBorLogs(msgs, evm, gp, ibs, header.Number.Uint64(), header.Hash(), uint(txIndex), uint(logIndex))
+}
+
+func getBorLogs(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state.IntraBlockState, blockNum uint64, blockHash libcommon.Hash, txIndex, logIndex uint) (types.Logs, error) {
for _, msg := range msgs {
txContext := core.NewEVMTxContext(msg)
evm.Reset(txContext, ibs)
@@ -88,9 +100,23 @@ func applyBorTransaction(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, i
}
}
- numReceipts := len(block.Transactions())
+ receiptLogs := ibs.GetLogs(0, bortypes.ComputeBorTxHash(blockNum, blockHash), blockNum, blockHash)
- receiptLogs := ibs.GetLogs(0, bortypes.ComputeBorTxHash(block.NumberU64(), block.Hash()), block.NumberU64(), block.Hash())
+ // set fields
+ for i, log := range receiptLogs {
+ log.TxIndex = txIndex
+ log.Index = logIndex + uint(i)
+ }
+ return receiptLogs, nil
+}
+
+func applyBorTransaction(msgs []*types.Message, evm *vm.EVM, gp *core.GasPool, ibs *state.IntraBlockState, block *types.Block, cumulativeGasUsed uint64, logIndex uint) (*types.Receipt, error) {
+ receiptLogs, err := getBorLogs(msgs, evm, gp, ibs, block.Number().Uint64(), block.Hash(), uint(len(block.Transactions())), logIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ numReceipts := len(block.Transactions())
receipt := types.Receipt{
Type: 0,
CumulativeGasUsed: cumulativeGasUsed,
diff --git a/turbo/jsonrpc/send_transaction.go b/turbo/jsonrpc/send_transaction.go
index 9ce098809d6..813b4b6b081 100644
--- a/turbo/jsonrpc/send_transaction.go
+++ b/turbo/jsonrpc/send_transaction.go
@@ -7,10 +7,8 @@ import (
"math/big"
"github.com/erigontech/erigon-lib/common"
- "github.com/erigontech/erigon-lib/common/hexutil"
"github.com/erigontech/erigon-lib/common/hexutility"
txPoolProto "github.com/erigontech/erigon-lib/gointerfaces/txpoolproto"
- "github.com/holiman/uint256"
"github.com/erigontech/erigon/core/types"
"github.com/erigontech/erigon/params"
@@ -23,25 +21,11 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutility
return common.Hash{}, err
}
- if txn.Type() == types.BlobTxType || txn.Type() == types.DynamicFeeTxType || txn.Type() == types.SetCodeTxType {
- baseFeeBig, err := api.BaseFee(ctx)
- if err != nil {
- return common.Hash{}, err
- }
-
- // If the transaction fee cap is already specified, ensure the
- // effective gas fee is less than fee cap.
- if err := checkDynamicTxFee(txn.GetFeeCap(), baseFeeBig); err != nil {
- return common.Hash{}, err
- }
- } else {
- // If the transaction fee cap is already specified, ensure the
- // fee of the given transaction is _reasonable_.
- if err := checkTxFee(txn.GetPrice().ToBig(), txn.GetGas(), api.FeeCap); err != nil {
- return common.Hash{}, err
- }
+ // If the transaction fee cap is already specified, ensure the
+ // fee of the given transaction is _reasonable_.
+ if err := checkTxFee(txn.GetPrice().ToBig(), txn.GetGas(), api.FeeCap); err != nil {
+ return common.Hash{}, err
}
-
if !txn.Protected() && !api.AllowUnprotectedTxs {
return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC")
}
@@ -93,28 +77,10 @@ func checkTxFee(gasPrice *big.Int, gas uint64, gasCap float64) error {
if gasCap == 0 {
return nil
}
-
feeEth := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gas))), new(big.Float).SetInt(big.NewInt(params.Ether)))
feeFloat, _ := feeEth.Float64()
if feeFloat > gasCap {
return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, gasCap)
}
-
- return nil
-}
-
-// checkTxFee is an internal function used to check whether the fee of
-// the given transaction is _reasonable_(under the cap).
-func checkDynamicTxFee(gasCap *uint256.Int, baseFeeBig *hexutil.Big) error {
- baseFee := uint256.NewInt(0)
- overflow := baseFee.SetFromBig(baseFeeBig.ToInt())
- if overflow {
- return errors.New("opts.Value higher than 2^256-1")
- }
-
- if gasCap.Lt(baseFee) {
- return errors.New("fee cap is lower than the base fee")
- }
-
return nil
}
diff --git a/turbo/jsonrpc/send_transaction_test.go b/turbo/jsonrpc/send_transaction_test.go
index 92c5b473532..16da6a44f58 100644
--- a/turbo/jsonrpc/send_transaction_test.go
+++ b/turbo/jsonrpc/send_transaction_test.go
@@ -190,105 +190,3 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *uint256.Int, key
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, uint256.NewInt(100), gaslimit, gasprice, nil), *types.LatestSignerForChainID(big.NewInt(1337)), key)
return tx
}
-
-func TestSendRawTransactionDynamicFee(t *testing.T) {
- // Initialize a mock Ethereum node (Sentry) with protocol changes enabled
- mockSentry := mock.MockWithAllProtocolChanges(t)
- require := require.New(t)
- logger := log.New()
-
- // Set up a single block step for the mock chain
- oneBlockStep(mockSentry, require, t)
-
- // Create a test gRPC connection and initialize TxPool & API
- ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, mockSentry)
- txPool := txpool.NewTxpoolClient(conn)
- api := jsonrpc.NewEthAPI(
- newBaseApiForTest(mockSentry),
- mockSentry.DB,
- nil,
- txPool,
- nil,
- 5_000_000, // Gas limit
- 1*params.GWei,
- 100_000,
- false,
- 100_000,
- 128,
- logger,
- )
-
- // Get the current base fee
- baseFee, err := api.BaseFee(ctx)
- require.NoError(err)
- baseFeeValue := baseFee.Uint64()
-
- // Define gas tip (priority fee)
- gasTip := uint256.NewInt(5 * params.Wei)
-
- // --- Test Case 1: Transaction with valid gas fee cap ---
- {
- // Gas fee cap: 2x BaseFee + Tip
- gasFeeCap := uint256.NewInt((2 * baseFeeValue) + gasTip.Uint64())
-
- // Create and sign a transaction
- txn, err := types.SignTx(
- types.NewEIP1559Transaction(
- uint256.Int{1337}, // Nonce
- 0, // Gas price (not used in EIP-1559)
- common.Address{1}, // Recipient
- uint256.NewInt(1234),
- params.TxGas,
- uint256.NewInt(2_000_000),
- gasTip,
- gasFeeCap,
- nil,
- ),
- *types.LatestSignerForChainID(mockSentry.ChainConfig.ChainID),
- mockSentry.Key,
- )
- require.NoError(err)
-
- // Serialize the transaction
- buf := bytes.NewBuffer(nil)
- err = txn.MarshalBinary(buf)
- require.NoError(err)
-
- // Send the transaction
- _, err = api.SendRawTransaction(ctx, buf.Bytes())
- require.NoError(err, "Transaction with sufficient gas fee cap should be accepted")
- }
-
- // --- Test Case 2: Transaction with gas fee cap lower than base fee ---
- {
- // Gas fee cap: BaseFee - Tip (too low to be accepted)
- gasFeeCap := uint256.NewInt(baseFeeValue - gasTip.Uint64())
-
- // Create and sign a transaction
- txn, err := types.SignTx(
- types.NewEIP1559Transaction(
- uint256.Int{1337}, // Nonce
- 1, // Gas price (not used in EIP-1559)
- common.Address{1}, // Recipient
- uint256.NewInt(1234),
- params.TxGas,
- uint256.NewInt(2_000_000),
- gasTip,
- gasFeeCap,
- nil,
- ),
- *types.LatestSignerForChainID(mockSentry.ChainConfig.ChainID),
- mockSentry.Key,
- )
- require.NoError(err)
-
- // Serialize the transaction
- buf := bytes.NewBuffer(nil)
- err = txn.MarshalBinary(buf)
- require.NoError(err)
-
- // Send the transaction (should fail)
- _, err = api.SendRawTransaction(ctx, buf.Bytes())
- require.Error(err, "Transaction with gas fee cap lower than base fee should be rejected")
- }
-}
diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go
index 46938632995..ca6a0c04ef2 100644
--- a/turbo/stages/mock/mock_sentry.go
+++ b/turbo/stages/mock/mock_sentry.go
@@ -644,22 +644,6 @@ func MockWithTxPool(t *testing.T) *MockSentry {
return MockWithEverything(t, gspec, key, prune.DefaultMode, ethash.NewFaker(), blockBufferSize, true, false, checkStateRoot)
}
-func MockWithAllProtocolChanges(t *testing.T) *MockSentry {
- funds := big.NewInt(1 * params.Ether)
- key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address := crypto.PubkeyToAddress(key.PublicKey)
- chainConfig := params.AllProtocolChanges
- gspec := &types.Genesis{
- Config: chainConfig,
- Alloc: types.GenesisAlloc{
- address: {Balance: funds},
- },
- }
-
- checkStateRoot := true
- return MockWithEverything(t, gspec, key, prune.DefaultMode, ethash.NewFaker(), blockBufferSize, true, false, checkStateRoot)
-}
-
func MockWithZeroTTD(t *testing.T, withPosDownloader bool) *MockSentry {
funds := big.NewInt(1 * params.Ether)
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go
index ccfebaa001d..0fd7d5c20bc 100644
--- a/turbo/transactions/call.go
+++ b/turbo/transactions/call.go
@@ -156,6 +156,8 @@ type ReusableCaller struct {
func (r *ReusableCaller) DoCallWithNewGas(
ctx context.Context,
newGas uint64,
+ engine consensus.EngineReader,
+ overrides *ethapi2.StateOverrides,
) (*evmtypes.ExecutionResult, error) {
var cancel context.CancelFunc
if r.callTimeout > 0 {
@@ -172,7 +174,10 @@ func (r *ReusableCaller) DoCallWithNewGas(
// reset the EVM so that we can continue to use it with the new context
txCtx := core.NewEVMTxContext(r.message)
- r.intraBlockState = state.New(r.stateReader)
+ if overrides == nil {
+ r.intraBlockState = state.New(r.stateReader)
+ }
+
r.evm.Reset(txCtx, r.intraBlockState)
timedOut := false