Skip to content

Commit

Permalink
feat: add decoding methods (#10)
Browse files Browse the repository at this point in the history
* add decoding mehods

* add tests for codecv0 and ccodecv1

* decompressing

* add decompressing for codecv2

* fix

* change zstd library from c binding to full go port

* handle error

* sync with main

* add v3 decoding

* refactor: make DAChunkRawTx an alias

* address comments

* comment

* comment

* address comments

* fix test

* support v4

* address renaming nit-picks

---------

Co-authored-by: jonastheis <[email protected]>
  • Loading branch information
NazariiDenha and jonastheis authored Sep 2, 2024
1 parent 9e32313 commit 41c6486
Show file tree
Hide file tree
Showing 15 changed files with 529 additions and 9 deletions.
27 changes: 27 additions & 0 deletions encoding/bitmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,30 @@ func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePo

return bitmapBytes, nextIndex, nil
}

// DecodeBitmap decodes skipped L1 message bitmap of the batch from bytes to big.Int's
func DecodeBitmap(skippedL1MessageBitmap []byte, totalL1MessagePopped int) ([]*big.Int, error) {
length := len(skippedL1MessageBitmap)
if length%32 != 0 {
return nil, fmt.Errorf("skippedL1MessageBitmap length doesn't match, skippedL1MessageBitmap length should be equal 0 modulo 32, length of skippedL1MessageBitmap: %v", length)
}
if length*8 < totalL1MessagePopped {
return nil, fmt.Errorf("skippedL1MessageBitmap length is too small, skippedL1MessageBitmap length should be at least %v, length of skippedL1MessageBitmap: %v", (totalL1MessagePopped+7)/8, length)
}
var skippedBitmap []*big.Int
for index := 0; index < length/32; index++ {
bitmap := big.NewInt(0).SetBytes(skippedL1MessageBitmap[index*32 : index*32+32])
skippedBitmap = append(skippedBitmap, bitmap)
}
return skippedBitmap, nil
}

// IsL1MessageSkipped checks if index is skipped in bitmap
func IsL1MessageSkipped(skippedBitmap []*big.Int, index uint64) bool {
if index > uint64(len(skippedBitmap))*256 {
return false
}
quo := index / 256
rem := index % 256
return skippedBitmap[quo].Bit(int(rem)) != 0
}
67 changes: 67 additions & 0 deletions encoding/codecv0/codecv0.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ import (
"github.com/scroll-tech/da-codec/encoding"
)

const BlockContextByteSize = 60
const TxLenByteSize = 4

// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Expand All @@ -32,6 +35,12 @@ type DAChunk struct {
Transactions [][]*types.TransactionData
}

// DAChunkRawTx groups consecutive DABlocks with their L2 transactions, L1 msgs are loaded in another place.
type DAChunkRawTx struct {
Blocks []*DABlock
Transactions []types.Transactions
}

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
Version uint8
Expand Down Expand Up @@ -179,6 +188,64 @@ func (c *DAChunk) Encode() ([]byte, error) {
return chunkBytes, nil
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
blocks[i] = &DABlock{}
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions
currentIndex := 1 + numBlocks*BlockContextByteSize
for _, block := range blocks {
var blockTransactions types.Transactions
// ignore L1 msg transactions from the block, consider only L2 transactions
txNum := int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
if len(chunk) < currentIndex+TxLenByteSize {
return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize, i)
}
txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+TxLenByteSize]))
if len(chunk) < currentIndex+TxLenByteSize+txLen {
return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize+txLen, i)
}
txData := chunk[currentIndex+TxLenByteSize : currentIndex+TxLenByteSize+txLen]
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tx, pos of tx in chunk bytes: %d. tx num without l1 msgs: %d, err: %w", currentIndex, i, err)
}
blockTransactions = append(blockTransactions, tx)
currentIndex += TxLenByteSize + txLen
}
transactions = append(transactions, blockTransactions)
}

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
Expand Down
48 changes: 47 additions & 1 deletion encoding/codecv0/codecv0_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@ import (
"os"
"testing"

"github.com/stretchr/testify/assert"

"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"

"github.com/scroll-tech/da-codec/encoding"
)
Expand Down Expand Up @@ -264,6 +265,38 @@ func TestCodecV0(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes2))

daChunksRawTx, err := DecodeDAChunksRawTx([][]byte{chunkBytes1, chunkBytes2})
assert.NoError(t, err)
// assert number of chunks
assert.Equal(t, 2, len(daChunksRawTx))

// assert block in first chunk
assert.Equal(t, 3, len(daChunksRawTx[0].Blocks))
assert.Equal(t, daChunk1.Blocks[0], daChunksRawTx[0].Blocks[0])
assert.Equal(t, daChunk1.Blocks[1], daChunksRawTx[0].Blocks[1])
daChunksRawTx[0].Blocks[2].BaseFee = nil
assert.Equal(t, daChunk1.Blocks[2], daChunksRawTx[0].Blocks[2])

// assert block in second chunk
assert.Equal(t, 1, len(daChunksRawTx[1].Blocks))
daChunksRawTx[1].Blocks[0].BaseFee = nil
assert.Equal(t, daChunk2.Blocks[0], daChunksRawTx[1].Blocks[0])

// assert transactions in first chunk
assert.Equal(t, 3, len(daChunksRawTx[0].Transactions))
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
assert.Equal(t, 2, len(daChunksRawTx[0].Transactions[0]))
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[1]))
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[2]))

assert.EqualValues(t, daChunk1.Transactions[0][0].TxHash, daChunksRawTx[0].Transactions[0][0].Hash().String())
assert.EqualValues(t, daChunk1.Transactions[0][1].TxHash, daChunksRawTx[0].Transactions[0][1].Hash().String())

// assert transactions in second chunk
assert.Equal(t, 1, len(daChunksRawTx[1].Transactions))
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
assert.Equal(t, 0, len(daChunksRawTx[1].Transactions[0]))

batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
Expand Down Expand Up @@ -297,6 +330,19 @@ func TestCodecV0(t *testing.T) {
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)

decodedBitmap, err := encoding.DecodeBitmap(decodedDABatch.SkippedL1MessageBitmap, int(decodedDABatch.L1MessagePopped))
assert.NoError(t, err)
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 0))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 9))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 10))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 11))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 36))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 37))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 38))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 39))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 40))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 41))

// Test case: many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block4},
Expand Down
133 changes: 133 additions & 0 deletions encoding/codecv1/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,17 @@ import (
// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 15

const BlockContextByteSize = codecv0.BlockContextByteSize

// DABlock represents a Data Availability Block.
type DABlock = codecv0.DABlock

// DAChunk groups consecutive DABlocks with their transactions.
type DAChunk codecv0.DAChunk

// DAChunkRawTx groups consecutive DABlocks with their L2 transactions, L1 msgs are loaded in another place.
type DAChunkRawTx = codecv0.DAChunkRawTx

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
// header
Expand Down Expand Up @@ -93,6 +98,41 @@ func (c *DAChunk) Encode() []byte {
return chunkBytes
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
// Beginning from codecv1 tx data posted to blobs, not to chunk bytes in calldata
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
blocks[i] = &DABlock{}
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions, // Transactions field is still empty in the phase of DecodeDAChunksRawTx, because txs moved to bobs and filled in DecodeTxsFromBlob method.
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
var dataBytes []byte
Expand Down Expand Up @@ -286,6 +326,99 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
return blob, blobVersionedHash, &z, nil
}

// DecodeTxsFromBytes decodes txs from blob bytes and writes to chunks
func DecodeTxsFromBytes(blobBytes []byte, chunks []*DAChunkRawTx, maxNumChunks int) error {
numChunks := int(binary.BigEndian.Uint16(blobBytes[0:2]))
if numChunks != len(chunks) {
return fmt.Errorf("blob chunk number is not same as calldata, blob num chunks: %d, calldata num chunks: %d", numChunks, len(chunks))
}
index := 2 + maxNumChunks*4
for chunkID, chunk := range chunks {
var transactions []types.Transactions
chunkSize := int(binary.BigEndian.Uint32(blobBytes[2+4*chunkID : 2+4*chunkID+4]))

chunkBytes := blobBytes[index : index+chunkSize]
curIndex := 0
for _, block := range chunk.Blocks {
var blockTransactions types.Transactions
txNum := int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
tx, nextIndex, err := GetNextTx(chunkBytes, curIndex)
if err != nil {
return fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, index+curIndex+4)
}
curIndex = nextIndex
blockTransactions = append(blockTransactions, tx)
}
transactions = append(transactions, blockTransactions)
}
chunk.Transactions = transactions
index += chunkSize
}
return nil
}

// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
batchBytes := encoding.BytesFromBlobCanonical(blob)
return DecodeTxsFromBytes(batchBytes[:], chunks, MaxNumChunks)
}

var errSmallLength error = fmt.Errorf("length of blob bytes is too small")

// GetNextTx parses blob bytes to find length of payload of next Tx and decode it
func GetNextTx(bytes []byte, index int) (*types.Transaction, int, error) {
var nextIndex int
length := len(bytes)
if length < index+1 {
return nil, 0, errSmallLength
}
var txBytes []byte
if bytes[index] <= 0x7f {
// the first byte is transaction type, rlp encoding begins from next byte
txBytes = append(txBytes, bytes[index])
index++
}
if length < index+1 {
return nil, 0, errSmallLength
}
if bytes[index] >= 0xc0 && bytes[index] <= 0xf7 {
// length of payload is simply bytes[index] - 0xc0
payloadLen := int(bytes[index] - 0xc0)
if length < index+1+payloadLen {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+payloadLen]...)
nextIndex = index + 1 + payloadLen
} else if bytes[index] > 0xf7 {
// the length of payload is encoded in next bytes[index] - 0xf7 bytes
// length of bytes representation of length of payload
lenPayloadLen := int(bytes[index] - 0xf7)
if length < index+1+lenPayloadLen {
return nil, 0, errSmallLength
}
lenBytes := bytes[index+1 : index+1+lenPayloadLen]
for len(lenBytes) < 8 {
lenBytes = append([]byte{0x0}, lenBytes...)
}
payloadLen := binary.BigEndian.Uint64(lenBytes)

if length < index+1+lenPayloadLen+int(payloadLen) {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+lenPayloadLen+int(payloadLen)]...)
nextIndex = index + 1 + lenPayloadLen + int(payloadLen)
} else {
return nil, 0, fmt.Errorf("incorrect format of rlp encoding")
}
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txBytes)
if err != nil {
return nil, 0, fmt.Errorf("failed to unmarshal tx, err: %w", err)
}
return tx, nextIndex, nil
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down
Loading

0 comments on commit 41c6486

Please sign in to comment.