diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 66d0c94f7fd3..b1b1e92642d2 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -183,6 +183,7 @@ var ( utils.DARecoveryInitialBatchFlag, utils.DARecoverySignBlocksFlag, utils.DARecoveryL2EndBlockFlag, + utils.DARecoveryProduceBlocksFlag, } rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index fb2fe6e3714f..7bc8fe5d70c0 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -926,6 +926,10 @@ var ( Name: "da.recovery.l2endblock", Usage: "End L2 block to recover to", } + DARecoveryProduceBlocksFlag = cli.BoolFlag{ + Name: "da.recovery.produceblocks", + Usage: "Produce unsigned blocks after L1 recovery for permissionless batch submission", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1693,6 +1697,9 @@ func setDA(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(DARecoveryL2EndBlockFlag.Name) { cfg.DA.L2EndBlock = ctx.Uint64(DARecoveryL2EndBlockFlag.Name) } + if ctx.IsSet(DARecoveryProduceBlocksFlag.Name) { + cfg.DA.ProduceBlocks = ctx.Bool(DARecoveryProduceBlocksFlag.Name) + } } func setMaxBlockRange(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/eth/backend.go b/eth/backend.go index a8d87c2cb371..9dd822be21ec 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -229,11 +229,15 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether // simply let them run simultaneously. If messages are missing in DA syncing, it will be handled by the syncing pipeline // by waiting and retrying. if config.EnableDASyncing { - eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA) - if err != nil { - return nil, fmt.Errorf("cannot initialize da syncer: %w", err) + // Do not start syncing pipeline if we are producing blocks for permissionless batches. + if !config.DA.ProduceBlocks { + eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA) + if err != nil { + return nil, fmt.Errorf("cannot initialize da syncer: %w", err) + } + + eth.syncingPipeline.Start() } - eth.syncingPipeline.Start() } // initialize and start L1 message sync service @@ -273,7 +277,8 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether return nil, err } - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing) + config.Miner.SigningDisabled = config.DA.ProduceBlocks + eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing && !config.DA.ProduceBlocks) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} @@ -632,7 +637,7 @@ func (s *Ethereum) Stop() error { if s.config.EnableRollupVerify { s.rollupSyncService.Stop() } - if s.config.EnableDASyncing { + if s.config.EnableDASyncing && s.syncingPipeline != nil { s.syncingPipeline.Stop() } s.miner.Close() diff --git a/miner/miner.go b/miner/miner.go index e6b1b2ae5d38..0d483643df9e 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -60,6 +60,8 @@ type Config struct { StoreSkippedTxTraces bool // Whether store the wrapped traces when storing a skipped tx MaxAccountsNum int // Maximum number of accounts that miner will fetch the pending transactions of when building a new block CCCMaxWorkers int // Maximum number of workers to use for async CCC tasks + + SigningDisabled bool // Whether to disable signing blocks with consensus enginek } // Miner creates blocks and searches for proof-of-work values. diff --git a/miner/scroll_worker.go b/miner/scroll_worker.go index bad27de84c6d..b58893031996 100644 --- a/miner/scroll_worker.go +++ b/miner/scroll_worker.go @@ -502,11 +502,19 @@ func (w *worker) newWork(now time.Time, parentHash common.Hash, reorging bool, r header.Coinbase = w.coinbase } - prepareStart := time.Now() - if err := w.engine.Prepare(w.chain, header); err != nil { - return fmt.Errorf("failed to prepare header for mining: %w", err) + if w.config.SigningDisabled { + // Need to make sure to set difficulty so that a new canonical chain is detected in Blockchain + header.Difficulty = new(big.Int).SetUint64(1) + header.MixDigest = common.Hash{} + header.Coinbase = common.Address{} + header.Nonce = types.BlockNonce{} + } else { + prepareStart := time.Now() + if err := w.engine.Prepare(w.chain, header); err != nil { + return fmt.Errorf("failed to prepare header for mining: %w", err) + } + prepareTimer.UpdateSince(prepareStart) } - prepareTimer.UpdateSince(prepareStart) var nextL1MsgIndex uint64 if dbVal := rawdb.ReadFirstQueueIndexNotInL2Block(w.eth.ChainDb(), header.ParentHash); dbVal != nil { @@ -853,28 +861,33 @@ func (w *worker) commit() (common.Hash, error) { return common.Hash{}, err } - sealHash := w.engine.SealHash(block.Header()) - log.Info("Committing new mining work", "number", block.Number(), "sealhash", sealHash, - "txs", w.current.txs.Len(), - "gas", block.GasUsed(), "fees", totalFees(block, w.current.receipts)) - - resultCh, stopCh := make(chan *types.Block), make(chan struct{}) - if err := w.engine.Seal(w.chain, block, resultCh, stopCh); err != nil { - return common.Hash{}, err - } - // Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy - // or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of - // that artificially added delay and subtract it from overall runtime of commit(). - sealStart := time.Now() - block = <-resultCh - sealDelay = time.Since(sealStart) - if block == nil { - return common.Hash{}, errors.New("missed seal response from consensus engine") - } + var sealHash common.Hash + if w.config.SigningDisabled { + sealHash = block.Hash() + } else { + sealHash = w.engine.SealHash(block.Header()) + log.Info("Committing new mining work", "number", block.Number(), "sealhash", sealHash, + "txs", w.current.txs.Len(), + "gas", block.GasUsed(), "fees", totalFees(block, w.current.receipts)) + + resultCh, stopCh := make(chan *types.Block), make(chan struct{}) + if err := w.engine.Seal(w.chain, block, resultCh, stopCh); err != nil { + return common.Hash{}, err + } + // Clique.Seal() will only wait for a second before giving up on us. So make sure there is nothing computational heavy + // or a call that blocks between the call to Seal and the line below. Seal might introduce some delay, so we keep track of + // that artificially added delay and subtract it from overall runtime of commit(). + sealStart := time.Now() + block = <-resultCh + sealDelay = time.Since(sealStart) + if block == nil { + return common.Hash{}, errors.New("missed seal response from consensus engine") + } - // verify the generated block with local consensus engine to make sure everything is as expected - if err = w.engine.VerifyHeader(w.chain, block.Header(), true); err != nil { - return common.Hash{}, retryableCommitError{inner: err} + // verify the generated block with local consensus engine to make sure everything is as expected + if err = w.engine.VerifyHeader(w.chain, block.Header(), true); err != nil { + return common.Hash{}, retryableCommitError{inner: err} + } } blockHash := block.Hash() diff --git a/params/version.go b/params/version.go index 77ac78f10bca..a2a606e49082 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 11 // Patch version component of the current release + VersionPatch = 12 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 7526548ea17b..844f23e72cd4 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -29,6 +29,8 @@ type Config struct { InitialBatch uint64 // Batch number from which to start syncing and overriding blocks SignBlocks bool // Whether to sign the blocks after reading them from the pipeline (requires correct Clique signer key) and history of blocks with Clique signatures L2EndBlock uint64 // L2 block number to sync until + + ProduceBlocks bool // Whether to produce blocks in DA recovery mode. The pipeline will be disabled when starting the node with this flag. } // SyncingPipeline is a derivation pipeline for syncing data from L1 and DA and transform it into