Skip to content

Commit

Permalink
fix CI
Browse files Browse the repository at this point in the history
  • Loading branch information
colinlyguo committed Oct 11, 2024
1 parent d2350ff commit b885af8
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 60 deletions.
6 changes: 3 additions & 3 deletions encoding/codecv0.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ import (

type DACodecV0 struct{}

// Codecv0MaxNumChunks is the maximum number of chunks that a batch can contain.
const Codecv0MaxNumChunks = 15
// codecv0MaxNumChunks is the maximum number of chunks that a batch can contain.
const codecv0MaxNumChunks = 15

// Version returns the codec version.
func (o *DACodecV0) Version() CodecVersion {
Expand All @@ -25,7 +25,7 @@ func (o *DACodecV0) Version() CodecVersion {

// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
func (o *DACodecV0) MaxNumChunksPerBatch() uint64 {
return Codecv0MaxNumChunks
return codecv0MaxNumChunks
}

// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
Expand Down
24 changes: 12 additions & 12 deletions encoding/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ import (

type DACodecV1 struct{}

// Codecv1MaxNumChunks is the maximum number of chunks that a batch can contain.
const Codecv1MaxNumChunks = 15
// codecv1MaxNumChunks is the maximum number of chunks that a batch can contain.
const codecv1MaxNumChunks = 15

// Version returns the codec version.
func (o *DACodecV1) Version() CodecVersion {
Expand All @@ -27,7 +27,7 @@ func (o *DACodecV1) Version() CodecVersion {

// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
func (o *DACodecV1) MaxNumChunksPerBatch() uint64 {
return Codecv1MaxNumChunks
return codecv1MaxNumChunks
}

// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
Expand Down Expand Up @@ -134,13 +134,13 @@ func (o *DACodecV1) DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error)
// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func (o *DACodecV1) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
batchBytes := BytesFromBlobCanonical(blob)
return DecodeTxsFromBytes(batchBytes[:], chunks, Codecv1MaxNumChunks)
return DecodeTxsFromBytes(batchBytes[:], chunks, codecv1MaxNumChunks)
}

// NewDABatch creates a DABatch from the provided Batch.
func (o *DACodecV1) NewDABatch(batch *Batch) (DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > Codecv1MaxNumChunks {
if len(batch.Chunks) > codecv1MaxNumChunks {
return nil, errors.New("too many chunks in batch")
}

Expand Down Expand Up @@ -200,14 +200,14 @@ func (o *DACodecV1) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash
// constructBlobPayload constructs the 4844 blob payload.
func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + Codecv1MaxNumChunks*4
metadataLength := 2 + codecv1MaxNumChunks*4

// the raw (un-padded) blob payload
blobBytes := make([]byte, metadataLength)

// challenge digest preimage
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+Codecv1MaxNumChunks+1)*32)
challengePreimage := make([]byte, (1+codecv1MaxNumChunks+1)*32)

// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
Expand Down Expand Up @@ -245,10 +245,10 @@ func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}

// if we have fewer than Codecv1MaxNumChunks chunks, the rest
// if we have fewer than codecv1MaxNumChunks chunks, the rest
// of the blob metadata is correctly initialized to 0,
// but we need to add padding to the challenge preimage
for chunkID := len(chunks); chunkID < Codecv1MaxNumChunks; chunkID++ {
for chunkID := len(chunks); chunkID < codecv1MaxNumChunks; chunkID++ {
// use the last chunk's data hash as padding
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
Expand All @@ -271,7 +271,7 @@ func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)

// challenge: append blob versioned hash
copy(challengePreimage[(1+Codecv1MaxNumChunks)*32:], blobVersionedHash[:])
copy(challengePreimage[(1+codecv1MaxNumChunks)*32:], blobVersionedHash[:])

// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
Expand Down Expand Up @@ -463,7 +463,7 @@ func (o *DACodecV1) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func (o *DACodecV1) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) {
metadataSize := uint64(2 + 4*Codecv1MaxNumChunks)
metadataSize := uint64(2 + 4*codecv1MaxNumChunks)
batchDataSize, err := o.chunkL1CommitBlobDataSize(c)
if err != nil {
return 0, 0, err
Expand All @@ -474,7 +474,7 @@ func (o *DACodecV1) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func (o *DACodecV1) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) {
metadataSize := uint64(2 + 4*Codecv1MaxNumChunks)
metadataSize := uint64(2 + 4*codecv1MaxNumChunks)
var batchDataSize uint64
for _, c := range b.Chunks {
chunkDataSize, err := o.chunkL1CommitBlobDataSize(c)
Expand Down
28 changes: 14 additions & 14 deletions encoding/codecv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import (

type DACodecV2 struct{}

// Codecv2MaxNumChunks is the maximum number of chunks that a batch can contain.
const Codecv2MaxNumChunks = 45
// codecv2MaxNumChunks is the maximum number of chunks that a batch can contain.
const codecv2MaxNumChunks = 45

// Version returns the codec version.
func (o *DACodecV2) Version() CodecVersion {
Expand All @@ -31,7 +31,7 @@ func (o *DACodecV2) Version() CodecVersion {

// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
func (o *DACodecV2) MaxNumChunksPerBatch() uint64 {
return Codecv2MaxNumChunks
return codecv2MaxNumChunks
}

// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
Expand Down Expand Up @@ -144,13 +144,13 @@ func (o *DACodecV2) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx
if err != nil {
return err
}
return DecodeTxsFromBytes(batchBytes, chunks, Codecv2MaxNumChunks)
return DecodeTxsFromBytes(batchBytes, chunks, codecv2MaxNumChunks)
}

// NewDABatch creates a DABatch from the provided Batch.
func (o *DACodecV2) NewDABatch(batch *Batch) (DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > Codecv2MaxNumChunks {
if len(batch.Chunks) > codecv2MaxNumChunks {
return nil, errors.New("too many chunks in batch")
}

Expand Down Expand Up @@ -210,14 +210,14 @@ func (o *DACodecV2) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash
// constructBlobPayload constructs the 4844 blob payload.
func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + Codecv2MaxNumChunks*4
metadataLength := 2 + codecv2MaxNumChunks*4

// batchBytes represents the raw (un-compressed and un-padded) blob payload
batchBytes := make([]byte, metadataLength)

// challenge digest preimage
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+Codecv2MaxNumChunks+1)*32)
challengePreimage := make([]byte, (1+codecv2MaxNumChunks+1)*32)

// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
Expand Down Expand Up @@ -255,10 +255,10 @@ func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}

// if we have fewer than Codecv2MaxNumChunks chunks, the rest
// if we have fewer than codecv2MaxNumChunks chunks, the rest
// of the blob metadata is correctly initialized to 0,
// but we need to add padding to the challenge preimage
for chunkID := len(chunks); chunkID < Codecv2MaxNumChunks; chunkID++ {
for chunkID := len(chunks); chunkID < codecv2MaxNumChunks; chunkID++ {
// use the last chunk's data hash as padding
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
Expand Down Expand Up @@ -301,7 +301,7 @@ func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)

// challenge: append blob versioned hash
copy(challengePreimage[(1+Codecv2MaxNumChunks)*32:], blobVersionedHash[:])
copy(challengePreimage[(1+codecv2MaxNumChunks)*32:], blobVersionedHash[:])

// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
Expand Down Expand Up @@ -345,7 +345,7 @@ func (o *DACodecV2) NewDABatchFromBytes(data []byte) (DABatch, error) {

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func (o *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) {
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv2MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o)
if err != nil {
return 0, 0, err
}
Expand All @@ -358,7 +358,7 @@ func (o *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func (o *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) {
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv2MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o)
if err != nil {
return 0, 0, err
}
Expand All @@ -372,7 +372,7 @@ func (o *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64,
// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
// It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB.
func (o *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv2MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o)
if err != nil {
return false, err
}
Expand All @@ -394,7 +394,7 @@ func (o *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error
// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
// It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB.
func (o *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv2MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o)
if err != nil {
return false, err
}
Expand Down
28 changes: 14 additions & 14 deletions encoding/codecv3.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ import (

type DACodecV3 struct{}

// Codecv3MaxNumChunks is the maximum number of chunks that a batch can contain.
const Codecv3MaxNumChunks = 45
// codecv3MaxNumChunks is the maximum number of chunks that a batch can contain.
const codecv3MaxNumChunks = 45

// Version returns the codec version.
func (o *DACodecV3) Version() CodecVersion {
Expand All @@ -32,7 +32,7 @@ func (o *DACodecV3) Version() CodecVersion {

// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
func (o *DACodecV3) MaxNumChunksPerBatch() uint64 {
return Codecv3MaxNumChunks
return codecv3MaxNumChunks
}

// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
Expand Down Expand Up @@ -145,13 +145,13 @@ func (o *DACodecV3) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx
if err != nil {
return err
}
return DecodeTxsFromBytes(batchBytes, chunks, Codecv3MaxNumChunks)
return DecodeTxsFromBytes(batchBytes, chunks, codecv3MaxNumChunks)
}

// NewDABatch creates a DABatch from the provided Batch.
func (o *DACodecV3) NewDABatch(batch *Batch) (DABatch, error) {
// this encoding can only support a fixed number of chunks per batch
if len(batch.Chunks) > Codecv3MaxNumChunks {
if len(batch.Chunks) > codecv3MaxNumChunks {
return nil, errors.New("too many chunks in batch")
}

Expand Down Expand Up @@ -218,14 +218,14 @@ func (o *DACodecV3) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash
// constructBlobPayload constructs the 4844 blob payload.
func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + Codecv3MaxNumChunks*4
metadataLength := 2 + codecv3MaxNumChunks*4

// batchBytes represents the raw (un-compressed and un-padded) blob payload
batchBytes := make([]byte, metadataLength)

// challenge digest preimage
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+Codecv3MaxNumChunks+1)*32)
challengePreimage := make([]byte, (1+codecv3MaxNumChunks+1)*32)

// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
Expand Down Expand Up @@ -263,10 +263,10 @@ func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}

// if we have fewer than Codecv2MaxNumChunks chunks, the rest
// if we have fewer than codecv3MaxNumChunks chunks, the rest
// of the blob metadata is correctly initialized to 0,
// but we need to add padding to the challenge preimage
for chunkID := len(chunks); chunkID < Codecv3MaxNumChunks; chunkID++ {
for chunkID := len(chunks); chunkID < codecv3MaxNumChunks; chunkID++ {
// use the last chunk's data hash as padding
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
}
Expand Down Expand Up @@ -309,7 +309,7 @@ func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)

// challenge: append blob versioned hash
copy(challengePreimage[(1+Codecv3MaxNumChunks)*32:], blobVersionedHash[:])
copy(challengePreimage[(1+codecv3MaxNumChunks)*32:], blobVersionedHash[:])

// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
Expand Down Expand Up @@ -359,7 +359,7 @@ func (o *DACodecV3) NewDABatchFromBytes(data []byte) (DABatch, error) {

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func (o *DACodecV3) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) {
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv3MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o)
if err != nil {
return 0, 0, err
}
Expand All @@ -372,7 +372,7 @@ func (o *DACodecV3) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,

// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch.
func (o *DACodecV3) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) {
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv3MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o)
if err != nil {
return 0, 0, err
}
Expand All @@ -385,7 +385,7 @@ func (o *DACodecV3) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64,

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
func (o *DACodecV3) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv3MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o)
if err != nil {
return false, err
}
Expand All @@ -406,7 +406,7 @@ func (o *DACodecV3) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error

// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
func (o *DACodecV3) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv3MaxNumChunks)
batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o)
if err != nil {
return false, err
}
Expand Down
Loading

0 comments on commit b885af8

Please sign in to comment.