Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: abstract codec versions into common interfaces #25

Open
wants to merge 72 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
72 commits
Select commit Hold shift + click to select a range
ad42cd9
feat: support conditional encode
colinlyguo Aug 14, 2024
62758c8
move append conditionalEncode flag after validity check
colinlyguo Aug 14, 2024
6901956
update da-codec
colinlyguo Aug 18, 2024
e4bf12e
align naming
colinlyguo Aug 18, 2024
030349d
add ConvertBlobToBlobBytes utility functions
colinlyguo Aug 19, 2024
ed4de9e
kept blob bytes
colinlyguo Aug 19, 2024
c6af3bb
rename enableEncode to enableCompress
colinlyguo Aug 19, 2024
a5691d4
refactor: move some common functions to encoding (#24)
colinlyguo Aug 20, 2024
9532963
move symbol replace script to zstd folder
colinlyguo Aug 20, 2024
990bdb3
refactor: move some util functions to public package
colinlyguo Aug 20, 2024
6b86866
fix CI
colinlyguo Aug 20, 2024
3ad692a
add interfaces of codec
colinlyguo Aug 20, 2024
a5c6430
add SetCompression
colinlyguo Aug 20, 2024
43f56e6
move interface to encoding
colinlyguo Aug 20, 2024
cd280de
refactor
colinlyguo Aug 20, 2024
879bb98
add dablock.go
colinlyguo Aug 21, 2024
77aafd4
add dachunk.go
colinlyguo Aug 21, 2024
6ee5c19
add dabatch.go
colinlyguo Aug 21, 2024
79422a2
move computeBatchDataHash to codecv
colinlyguo Aug 21, 2024
296880e
fix
colinlyguo Aug 21, 2024
c038850
add DABatchBase
colinlyguo Aug 21, 2024
4499e2c
add GetCodecVersion
colinlyguo Aug 21, 2024
8e763dd
add BlobVersionedHashes
colinlyguo Aug 21, 2024
98d5635
rename encoding.go to interfaces.go
colinlyguo Aug 22, 2024
bdb98f8
add NewDABatchWithExpectedBlobVersionedHashes
colinlyguo Aug 22, 2024
f3f0fbd
Merge branch 'main' into refactor-move-some-util-functions-to-public-…
colinlyguo Aug 22, 2024
08d60a3
tweak
colinlyguo Aug 22, 2024
2d425d8
fix a bug
colinlyguo Aug 24, 2024
c1e4a0d
add more logs
colinlyguo Aug 24, 2024
e5df846
add DecodeDAChunks
colinlyguo Sep 5, 2024
ecaca71
add BlockRange interface
colinlyguo Sep 5, 2024
484fa59
fix
colinlyguo Sep 5, 2024
f1fe4c8
add version check
colinlyguo Sep 5, 2024
97711e2
add Version
colinlyguo Sep 5, 2024
2a63797
remove DABatchBase
colinlyguo Sep 5, 2024
87c4537
add DABlock
colinlyguo Sep 5, 2024
2ac7825
fixes
colinlyguo Sep 5, 2024
8a6c35f
fix
colinlyguo Sep 5, 2024
83f6b62
add CodecFromVersion and CodecFromConfig
colinlyguo Sep 5, 2024
c4a2495
remove GetCodecVersion
colinlyguo Sep 5, 2024
10af8e7
fix typos
colinlyguo Sep 18, 2024
1594e0f
make Block fields internal
colinlyguo Sep 22, 2024
1f9facd
make chunk fields internal
colinlyguo Sep 22, 2024
451eb68
make batch fields internal and add some tweaks
colinlyguo Sep 22, 2024
955f375
add JSONFromBytes
colinlyguo Sep 22, 2024
f73c63e
fix a typo
colinlyguo Sep 30, 2024
cf9f084
use register mode
colinlyguo Oct 1, 2024
5ef6c5f
Merge branch 'main' into refactor-move-some-util-functions-to-public-…
colinlyguo Oct 3, 2024
7eb1dc0
fix CI
colinlyguo Oct 3, 2024
91ac897
remove register mode
colinlyguo Oct 4, 2024
e6c8965
add common functions
colinlyguo Oct 5, 2024
ba853e3
add EstimateBlockL1CommitCalldataSize
colinlyguo Oct 5, 2024
aca0bef
add dabatch interfaces
colinlyguo Oct 5, 2024
95d2bc7
update interface implementations
colinlyguo Oct 6, 2024
75812c4
add data hash
colinlyguo Oct 6, 2024
78588e4
fix codecv3 & codecv4 estimate gas
colinlyguo Oct 7, 2024
45548e1
fix
colinlyguo Oct 7, 2024
4469219
fix bugs
colinlyguo Oct 7, 2024
6934f3d
tweak
colinlyguo Oct 7, 2024
0079225
add CheckChunkCompressedDataCompatibility & CheckBatchCompressedDataC…
colinlyguo Oct 7, 2024
3f774d2
fix
colinlyguo Oct 7, 2024
669b454
add BlobDataProofForPointEvaluation
colinlyguo Oct 7, 2024
09127a5
add nil check in NewDAChunk
colinlyguo Oct 10, 2024
1c519d6
make some util functions internal
colinlyguo Oct 11, 2024
d2350ff
remove GetMaxChunksPerBatch
colinlyguo Oct 11, 2024
b885af8
fix CI
colinlyguo Oct 11, 2024
986850d
change receiver mark from o to d
colinlyguo Oct 11, 2024
e222406
remove SetCompression
colinlyguo Oct 11, 2024
ef5ea6e
fix
colinlyguo Oct 11, 2024
9d9fd89
add GetChunkEnableCompression & GetBatchEnableCompression
colinlyguo Oct 11, 2024
df76a9d
fix
colinlyguo Oct 11, 2024
6e28644
update l2geth dependency
colinlyguo Oct 11, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
398 changes: 398 additions & 0 deletions encoding/codecv0.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,398 @@
package encoding

import (
"encoding/binary"
"errors"
"fmt"
"math"

"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)

type DACodecV0 struct{}

// codecv0MaxNumChunks is the maximum number of chunks that a batch can contain.
const codecv0MaxNumChunks = 15

// Version returns the codec version.
func (d *DACodecV0) Version() CodecVersion {
return CodecV0
}

// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
func (d *DACodecV0) MaxNumChunksPerBatch() uint64 {
return codecv0MaxNumChunks
}

// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
func (d *DACodecV0) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) {
if !block.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}

// note: numL1Messages includes skipped messages
numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}

// note: numTransactions includes skipped messages
numL2Transactions := block.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}

daBlock := NewDABlockV0(
block.Header.Number.Uint64(), // number
block.Header.Time, // timestamp
block.Header.BaseFee, // baseFee
block.Header.GasLimit, // gasLimit
uint16(numTransactions), // numTransactions
uint16(numL1Messages), // numL1Messages
)

return daBlock, nil
}

// NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before.
func (d *DACodecV0) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64) (DAChunk, error) {
var blocks []DABlock
var txs [][]*types.TransactionData

if chunk == nil {
colinlyguo marked this conversation as resolved.
Show resolved Hide resolved
return nil, errors.New("chunk is nil")
}

if len(chunk.Blocks) == 0 {
return nil, errors.New("number of blocks is 0")
}

if len(chunk.Blocks) > 255 {
return nil, errors.New("number of blocks exceeds 1 byte")
}

for _, block := range chunk.Blocks {
b, err := d.NewDABlock(block, totalL1MessagePoppedBefore)
if err != nil {
return nil, err
}
blocks = append(blocks, b)
totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore)
txs = append(txs, block.Transactions)
}

daChunk := NewDAChunkV0(
blocks, // blocks
txs, // transactions
)

return daChunk, nil
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
func (d *DACodecV0) DecodeDAChunksRawTx(chunkBytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range chunkBytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
blocks[i] = &DABlockV0{}
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions
currentIndex := 1 + numBlocks*BlockContextByteSize
for _, block := range blocks {
var blockTransactions types.Transactions
// ignore L1 msg transactions from the block, consider only L2 transactions
txNum := int(block.NumTransactions() - block.NumL1Messages())
for i := 0; i < txNum; i++ {
if len(chunk) < currentIndex+TxLenByteSize {
return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize, i)
}
txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+TxLenByteSize]))
if len(chunk) < currentIndex+TxLenByteSize+txLen {
return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize+txLen, i)
}
txData := chunk[currentIndex+TxLenByteSize : currentIndex+TxLenByteSize+txLen]
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tx, pos of tx in chunk bytes: %d. tx num without l1 msgs: %d, err: %w", currentIndex, i, err)
}
blockTransactions = append(blockTransactions, tx)
currentIndex += TxLenByteSize + txLen
}
transactions = append(transactions, blockTransactions)
}

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func (d *DACodecV0) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
return nil
}

// NewDABatch creates a DABatch from the provided Batch.
func (d *DACodecV0) NewDABatch(batch *Batch) (DABatch, error) {
// compute batch data hash
var dataBytes []byte
totalL1MessagePoppedBeforeChunk := batch.TotalL1MessagePoppedBefore

for _, chunk := range batch.Chunks {
// build data hash
daChunk, err := d.NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk)
if err != nil {
return nil, err
}
totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk)
daChunkHash, err := daChunk.Hash()
if err != nil {
return nil, err
}
dataBytes = append(dataBytes, daChunkHash.Bytes()...)
}

// compute data hash
dataHash := crypto.Keccak256Hash(dataBytes)

// skipped L1 messages bitmap
bitmapBytes, totalL1MessagePoppedAfter, err := ConstructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore)
if err != nil {
return nil, err
}

daBatch := NewDABatchV0(
uint8(CodecV0), // version
batch.Index, // batchIndex
totalL1MessagePoppedAfter-batch.TotalL1MessagePoppedBefore, // l1MessagePopped
totalL1MessagePoppedAfter, // totalL1MessagePopped
dataHash, // dataHash
batch.ParentBatchHash, // parentBatchHash
bitmapBytes, // skippedL1MessageBitmap
)

return daBatch, nil
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
func (d *DACodecV0) NewDABatchFromBytes(data []byte) (DABatch, error) {
if len(data) < 89 {
return nil, fmt.Errorf("insufficient data for DABatch, expected at least 89 bytes but got %d", len(data))
}

if CodecVersion(data[0]) != CodecV0 {
return nil, fmt.Errorf("invalid codec version: %d, expected: %d", data[0], CodecV0)
}

b := NewDABatchV0(
data[0], // version
binary.BigEndian.Uint64(data[1:9]), // batchIndex
binary.BigEndian.Uint64(data[9:17]), // l1MessagePopped
binary.BigEndian.Uint64(data[17:25]), // totalL1MessagePopped
common.BytesToHash(data[25:57]), // dataHash
common.BytesToHash(data[57:89]), // parentBatchHash
data[89:], // skippedL1MessageBitmap
)

return b, nil
}

// EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately.
func (d *DACodecV0) EstimateBlockL1CommitCalldataSize(b *Block) (uint64, error) {
var size uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
continue
}
size += 4 // 4 bytes payload length
txPayloadLength, err := getTxPayloadLength(txData)
if err != nil {
return 0, err
}
size += txPayloadLength
}
size += BlockContextByteSize
return size, nil
}

// EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately.
func (d *DACodecV0) EstimateBlockL1CommitGas(b *Block) (uint64, error) {
var total uint64
var numL1Messages uint64
for _, txData := range b.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}

txPayloadLength, err := getTxPayloadLength(txData)
if err != nil {
return 0, err
}
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
total += getKeccak256Gas(txPayloadLength) // l2 tx hash
}

total += CalldataNonZeroByteGas * BlockContextByteSize

// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue

// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue

total += getMemoryExpansionCost(36) * numL1Messages // staticcall to proxy
total += 100 * numL1Messages // read admin in proxy
total += 100 * numL1Messages // read impl in proxy
total += 100 * numL1Messages // access impl
total += getMemoryExpansionCost(36) * numL1Messages // delegatecall to impl

return total, nil
}

// EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately.
func (d *DACodecV0) EstimateChunkL1CommitCalldataSize(c *Chunk) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, block := range c.Blocks {
blockL1CommitCalldataSize, err := d.EstimateBlockL1CommitCalldataSize(block)
if err != nil {
return 0, err
}
totalL1CommitCalldataSize += blockL1CommitCalldataSize
}
return totalL1CommitCalldataSize, nil
}

// EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately.
func (d *DACodecV0) EstimateChunkL1CommitGas(c *Chunk) (uint64, error) {
var totalTxNum uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalTxNum += uint64(len(block.Transactions))
blockL1CommitGas, err := d.EstimateBlockL1CommitGas(block)
if err != nil {
return 0, err
}
totalL1CommitGas += blockL1CommitGas
}

numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * BlockContextByteSize // numBlocks of BlockContext in chunk

totalL1CommitGas += getKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas, nil
}

// EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately.
func (d *DACodecV0) EstimateBatchL1CommitGas(b *Batch) (uint64, error) {
var totalL1CommitGas uint64

// Add extra gas costs
totalL1CommitGas += 100000 // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += CalldataNonZeroByteGas // version in calldata

// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccak256Gas(89 + 32) // parent batch header hash, length is estimated as 89 (constant part)+ 32 (1 skippedL1MessageBitmap)
totalL1CommitGas += CalldataNonZeroByteGas * (89 + 32) // parent batch header in calldata

// adjust batch data hash gas cost
totalL1CommitGas += getKeccak256Gas(uint64(32 * len(b.Chunks)))

totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore

for _, chunk := range b.Chunks {
chunkL1CommitGas, err := d.EstimateChunkL1CommitGas(chunk)
if err != nil {
return 0, err
}
totalL1CommitGas += chunkL1CommitGas

totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore)
totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk

totalL1CommitGas += CalldataNonZeroByteGas * (32 * (totalL1MessagePoppedInChunk + 255) / 256)
totalL1CommitGas += getKeccak256Gas(89 + 32*(totalL1MessagePoppedInChunk+255)/256)

chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err
}
totalL1CommitGas += getMemoryExpansionCost(chunkL1CommitCalldataSize)
}

return totalL1CommitGas, nil
}

// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
func (d *DACodecV0) EstimateBatchL1CommitCalldataSize(b *Batch) (uint64, error) {
var totalL1CommitCalldataSize uint64
for _, chunk := range b.Chunks {
chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
return 0, err
}
totalL1CommitCalldataSize += chunkL1CommitCalldataSize
}
return totalL1CommitCalldataSize, nil
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
func (d *DACodecV0) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
return true, nil
}

// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
func (d *DACodecV0) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
return true, nil
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func (d *DACodecV0) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) {
return 0, 0, nil
}

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func (d *DACodecV0) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) {
return 0, 0, nil
}

// JSONFromBytes for CodecV0 returns empty values.
func (c *DACodecV0) JSONFromBytes(data []byte) ([]byte, error) {
// DACodecV0 doesn't need this, so just return empty values
return nil, nil
}
Loading
Loading