diff --git a/encoding/codecv0.go b/encoding/codecv0.go index 40d8f34..edd06c0 100644 --- a/encoding/codecv0.go +++ b/encoding/codecv0.go @@ -15,8 +15,8 @@ import ( type DACodecV0 struct{} -// Codecv0MaxNumChunks is the maximum number of chunks that a batch can contain. -const Codecv0MaxNumChunks = 15 +// codecv0MaxNumChunks is the maximum number of chunks that a batch can contain. +const codecv0MaxNumChunks = 15 // Version returns the codec version. func (o *DACodecV0) Version() CodecVersion { @@ -25,7 +25,7 @@ func (o *DACodecV0) Version() CodecVersion { // MaxNumChunksPerBatch returns the maximum number of chunks per batch. func (o *DACodecV0) MaxNumChunksPerBatch() uint64 { - return Codecv0MaxNumChunks + return codecv0MaxNumChunks } // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. diff --git a/encoding/codecv1.go b/encoding/codecv1.go index 7efa304..ae0ed1c 100644 --- a/encoding/codecv1.go +++ b/encoding/codecv1.go @@ -17,8 +17,8 @@ import ( type DACodecV1 struct{} -// Codecv1MaxNumChunks is the maximum number of chunks that a batch can contain. -const Codecv1MaxNumChunks = 15 +// codecv1MaxNumChunks is the maximum number of chunks that a batch can contain. +const codecv1MaxNumChunks = 15 // Version returns the codec version. func (o *DACodecV1) Version() CodecVersion { @@ -27,7 +27,7 @@ func (o *DACodecV1) Version() CodecVersion { // MaxNumChunksPerBatch returns the maximum number of chunks per batch. func (o *DACodecV1) MaxNumChunksPerBatch() uint64 { - return Codecv1MaxNumChunks + return codecv1MaxNumChunks } // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. @@ -134,13 +134,13 @@ func (o *DACodecV1) DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) // DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks func (o *DACodecV1) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { batchBytes := BytesFromBlobCanonical(blob) - return DecodeTxsFromBytes(batchBytes[:], chunks, Codecv1MaxNumChunks) + return DecodeTxsFromBytes(batchBytes[:], chunks, codecv1MaxNumChunks) } // NewDABatch creates a DABatch from the provided Batch. func (o *DACodecV1) NewDABatch(batch *Batch) (DABatch, error) { // this encoding can only support a fixed number of chunks per batch - if len(batch.Chunks) > Codecv1MaxNumChunks { + if len(batch.Chunks) > codecv1MaxNumChunks { return nil, errors.New("too many chunks in batch") } @@ -200,14 +200,14 @@ func (o *DACodecV1) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash // constructBlobPayload constructs the 4844 blob payload. func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) { // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + Codecv1MaxNumChunks*4 + metadataLength := 2 + codecv1MaxNumChunks*4 // the raw (un-padded) blob payload blobBytes := make([]byte, metadataLength) // challenge digest preimage // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash - challengePreimage := make([]byte, (1+Codecv1MaxNumChunks+1)*32) + challengePreimage := make([]byte, (1+codecv1MaxNumChunks+1)*32) // the chunk data hash used for calculating the challenge preimage var chunkDataHash common.Hash @@ -245,10 +245,10 @@ func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } - // if we have fewer than Codecv1MaxNumChunks chunks, the rest + // if we have fewer than codecv1MaxNumChunks chunks, the rest // of the blob metadata is correctly initialized to 0, // but we need to add padding to the challenge preimage - for chunkID := len(chunks); chunkID < Codecv1MaxNumChunks; chunkID++ { + for chunkID := len(chunks); chunkID < codecv1MaxNumChunks; chunkID++ { // use the last chunk's data hash as padding copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } @@ -271,7 +271,7 @@ func (o *DACodecV1) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) // challenge: append blob versioned hash - copy(challengePreimage[(1+Codecv1MaxNumChunks)*32:], blobVersionedHash[:]) + copy(challengePreimage[(1+codecv1MaxNumChunks)*32:], blobVersionedHash[:]) // compute z = challenge_digest % BLS_MODULUS challengeDigest := crypto.Keccak256Hash(challengePreimage) @@ -463,7 +463,7 @@ func (o *DACodecV1) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func (o *DACodecV1) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { - metadataSize := uint64(2 + 4*Codecv1MaxNumChunks) + metadataSize := uint64(2 + 4*codecv1MaxNumChunks) batchDataSize, err := o.chunkL1CommitBlobDataSize(c) if err != nil { return 0, 0, err @@ -474,7 +474,7 @@ func (o *DACodecV1) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func (o *DACodecV1) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { - metadataSize := uint64(2 + 4*Codecv1MaxNumChunks) + metadataSize := uint64(2 + 4*codecv1MaxNumChunks) var batchDataSize uint64 for _, c := range b.Chunks { chunkDataSize, err := o.chunkL1CommitBlobDataSize(c) diff --git a/encoding/codecv2.go b/encoding/codecv2.go index 50a2186..f493504 100644 --- a/encoding/codecv2.go +++ b/encoding/codecv2.go @@ -21,8 +21,8 @@ import ( type DACodecV2 struct{} -// Codecv2MaxNumChunks is the maximum number of chunks that a batch can contain. -const Codecv2MaxNumChunks = 45 +// codecv2MaxNumChunks is the maximum number of chunks that a batch can contain. +const codecv2MaxNumChunks = 45 // Version returns the codec version. func (o *DACodecV2) Version() CodecVersion { @@ -31,7 +31,7 @@ func (o *DACodecV2) Version() CodecVersion { // MaxNumChunksPerBatch returns the maximum number of chunks per batch. func (o *DACodecV2) MaxNumChunksPerBatch() uint64 { - return Codecv2MaxNumChunks + return codecv2MaxNumChunks } // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. @@ -144,13 +144,13 @@ func (o *DACodecV2) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx if err != nil { return err } - return DecodeTxsFromBytes(batchBytes, chunks, Codecv2MaxNumChunks) + return DecodeTxsFromBytes(batchBytes, chunks, codecv2MaxNumChunks) } // NewDABatch creates a DABatch from the provided Batch. func (o *DACodecV2) NewDABatch(batch *Batch) (DABatch, error) { // this encoding can only support a fixed number of chunks per batch - if len(batch.Chunks) > Codecv2MaxNumChunks { + if len(batch.Chunks) > codecv2MaxNumChunks { return nil, errors.New("too many chunks in batch") } @@ -210,14 +210,14 @@ func (o *DACodecV2) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash // constructBlobPayload constructs the 4844 blob payload. func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, error) { // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + Codecv2MaxNumChunks*4 + metadataLength := 2 + codecv2MaxNumChunks*4 // batchBytes represents the raw (un-compressed and un-padded) blob payload batchBytes := make([]byte, metadataLength) // challenge digest preimage // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash - challengePreimage := make([]byte, (1+Codecv2MaxNumChunks+1)*32) + challengePreimage := make([]byte, (1+codecv2MaxNumChunks+1)*32) // the chunk data hash used for calculating the challenge preimage var chunkDataHash common.Hash @@ -255,10 +255,10 @@ func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } - // if we have fewer than Codecv2MaxNumChunks chunks, the rest + // if we have fewer than codecv2MaxNumChunks chunks, the rest // of the blob metadata is correctly initialized to 0, // but we need to add padding to the challenge preimage - for chunkID := len(chunks); chunkID < Codecv2MaxNumChunks; chunkID++ { + for chunkID := len(chunks); chunkID < codecv2MaxNumChunks; chunkID++ { // use the last chunk's data hash as padding copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } @@ -301,7 +301,7 @@ func (o *DACodecV2) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) // challenge: append blob versioned hash - copy(challengePreimage[(1+Codecv2MaxNumChunks)*32:], blobVersionedHash[:]) + copy(challengePreimage[(1+codecv2MaxNumChunks)*32:], blobVersionedHash[:]) // compute z = challenge_digest % BLS_MODULUS challengeDigest := crypto.Keccak256Hash(challengePreimage) @@ -345,7 +345,7 @@ func (o *DACodecV2) NewDABatchFromBytes(data []byte) (DABatch, error) { // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func (o *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv2MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return 0, 0, err } @@ -358,7 +358,7 @@ func (o *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func (o *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv2MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return 0, 0, err } @@ -372,7 +372,7 @@ func (o *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. func (o *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv2MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return false, err } @@ -394,7 +394,7 @@ func (o *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. func (o *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv2MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return false, err } diff --git a/encoding/codecv3.go b/encoding/codecv3.go index 2f5081e..0430f9d 100644 --- a/encoding/codecv3.go +++ b/encoding/codecv3.go @@ -22,8 +22,8 @@ import ( type DACodecV3 struct{} -// Codecv3MaxNumChunks is the maximum number of chunks that a batch can contain. -const Codecv3MaxNumChunks = 45 +// codecv3MaxNumChunks is the maximum number of chunks that a batch can contain. +const codecv3MaxNumChunks = 45 // Version returns the codec version. func (o *DACodecV3) Version() CodecVersion { @@ -32,7 +32,7 @@ func (o *DACodecV3) Version() CodecVersion { // MaxNumChunksPerBatch returns the maximum number of chunks per batch. func (o *DACodecV3) MaxNumChunksPerBatch() uint64 { - return Codecv3MaxNumChunks + return codecv3MaxNumChunks } // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. @@ -145,13 +145,13 @@ func (o *DACodecV3) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx if err != nil { return err } - return DecodeTxsFromBytes(batchBytes, chunks, Codecv3MaxNumChunks) + return DecodeTxsFromBytes(batchBytes, chunks, codecv3MaxNumChunks) } // NewDABatch creates a DABatch from the provided Batch. func (o *DACodecV3) NewDABatch(batch *Batch) (DABatch, error) { // this encoding can only support a fixed number of chunks per batch - if len(batch.Chunks) > Codecv3MaxNumChunks { + if len(batch.Chunks) > codecv3MaxNumChunks { return nil, errors.New("too many chunks in batch") } @@ -218,14 +218,14 @@ func (o *DACodecV3) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash // constructBlobPayload constructs the 4844 blob payload. func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, error) { // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + Codecv3MaxNumChunks*4 + metadataLength := 2 + codecv3MaxNumChunks*4 // batchBytes represents the raw (un-compressed and un-padded) blob payload batchBytes := make([]byte, metadataLength) // challenge digest preimage // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash - challengePreimage := make([]byte, (1+Codecv3MaxNumChunks+1)*32) + challengePreimage := make([]byte, (1+codecv3MaxNumChunks+1)*32) // the chunk data hash used for calculating the challenge preimage var chunkDataHash common.Hash @@ -263,10 +263,10 @@ func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } - // if we have fewer than Codecv2MaxNumChunks chunks, the rest + // if we have fewer than codecv3MaxNumChunks chunks, the rest // of the blob metadata is correctly initialized to 0, // but we need to add padding to the challenge preimage - for chunkID := len(chunks); chunkID < Codecv3MaxNumChunks; chunkID++ { + for chunkID := len(chunks); chunkID < codecv3MaxNumChunks; chunkID++ { // use the last chunk's data hash as padding copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } @@ -309,7 +309,7 @@ func (o *DACodecV3) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) // challenge: append blob versioned hash - copy(challengePreimage[(1+Codecv3MaxNumChunks)*32:], blobVersionedHash[:]) + copy(challengePreimage[(1+codecv3MaxNumChunks)*32:], blobVersionedHash[:]) // compute z = challenge_digest % BLS_MODULUS challengeDigest := crypto.Keccak256Hash(challengePreimage) @@ -359,7 +359,7 @@ func (o *DACodecV3) NewDABatchFromBytes(data []byte) (DABatch, error) { // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func (o *DACodecV3) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv3MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return 0, 0, err } @@ -372,7 +372,7 @@ func (o *DACodecV3) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func (o *DACodecV3) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv3MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return 0, 0, err } @@ -385,7 +385,7 @@ func (o *DACodecV3) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. func (o *DACodecV3) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv3MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return false, err } @@ -406,7 +406,7 @@ func (o *DACodecV3) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. func (o *DACodecV3) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv3MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return false, err } diff --git a/encoding/codecv4.go b/encoding/codecv4.go index ff9bdfc..2b5f2b5 100644 --- a/encoding/codecv4.go +++ b/encoding/codecv4.go @@ -25,8 +25,8 @@ type DACodecV4 struct { enableCompress uint32 } -// Codecv4MaxNumChunks is the maximum number of chunks that a batch can contain. -const Codecv4MaxNumChunks = 45 +// codecv4MaxNumChunks is the maximum number of chunks that a batch can contain. +const codecv4MaxNumChunks = 45 // Version returns the codec version. func (o *DACodecV4) Version() CodecVersion { @@ -35,7 +35,7 @@ func (o *DACodecV4) Version() CodecVersion { // MaxNumChunksPerBatch returns the maximum number of chunks per batch. func (o *DACodecV4) MaxNumChunksPerBatch() uint64 { - return Codecv4MaxNumChunks + return codecv4MaxNumChunks } // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. @@ -150,16 +150,16 @@ func (o *DACodecV4) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx if err != nil { return err } - return DecodeTxsFromBytes(batchBytes, chunks, Codecv4MaxNumChunks) + return DecodeTxsFromBytes(batchBytes, chunks, codecv4MaxNumChunks) } else { - return DecodeTxsFromBytes(rawBytes[1:], chunks, Codecv4MaxNumChunks) + return DecodeTxsFromBytes(rawBytes[1:], chunks, codecv4MaxNumChunks) } } // NewDABatch creates a DABatch from the provided Batch. func (o *DACodecV4) NewDABatch(batch *Batch) (DABatch, error) { // this encoding can only support a fixed number of chunks per batch - if len(batch.Chunks) > Codecv4MaxNumChunks { + if len(batch.Chunks) > codecv4MaxNumChunks { return nil, errors.New("too many chunks in batch") } @@ -231,14 +231,14 @@ func (o *DACodecV4) NewDABatchWithExpectedBlobVersionedHashes(batch *Batch, hash // constructBlobPayload constructs the 4844 blob payload. func (o *DACodecV4) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, error) { // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + Codecv4MaxNumChunks*4 + metadataLength := 2 + codecv4MaxNumChunks*4 // batchBytes represents the raw (un-compressed and un-padded) blob payload batchBytes := make([]byte, metadataLength) // challenge digest preimage // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash - challengePreimage := make([]byte, (1+Codecv4MaxNumChunks+1)*32) + challengePreimage := make([]byte, (1+codecv4MaxNumChunks+1)*32) // the chunk data hash used for calculating the challenge preimage var chunkDataHash common.Hash @@ -276,10 +276,10 @@ func (o *DACodecV4) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } - // if we have fewer than Codecv4MaxNumChunks chunks, the rest + // if we have fewer than codecv4MaxNumChunks chunks, the rest // of the blob metadata is correctly initialized to 0, // but we need to add padding to the challenge preimage - for chunkID := len(chunks); chunkID < Codecv4MaxNumChunks; chunkID++ { + for chunkID := len(chunks); chunkID < codecv4MaxNumChunks; chunkID++ { // use the last chunk's data hash as padding copy(challengePreimage[32+chunkID*32:], chunkDataHash[:]) } @@ -327,7 +327,7 @@ func (o *DACodecV4) constructBlobPayload(chunks []*Chunk, useMockTxData bool) (* blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) // challenge: append blob versioned hash - copy(challengePreimage[(1+Codecv4MaxNumChunks)*32:], blobVersionedHash[:]) + copy(challengePreimage[(1+codecv4MaxNumChunks)*32:], blobVersionedHash[:]) // compute z = challenge_digest % BLS_MODULUS challengeDigest := crypto.Keccak256Hash(challengePreimage) @@ -377,7 +377,7 @@ func (o *DACodecV4) NewDABatchFromBytes(data []byte) (DABatch, error) { // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk. func (o *DACodecV4) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv4MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return 0, 0, err } @@ -396,7 +396,7 @@ func (o *DACodecV4) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a batch. func (o *DACodecV4) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv4MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return 0, 0, err } @@ -415,7 +415,7 @@ func (o *DACodecV4) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. func (o *DACodecV4) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, Codecv4MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, o) if err != nil { return false, err } @@ -432,7 +432,7 @@ func (o *DACodecV4) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. func (o *DACodecV4) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { - batchBytes, err := constructBatchPayloadInBlob(b.Chunks, Codecv4MaxNumChunks) + batchBytes, err := constructBatchPayloadInBlob(b.Chunks, o) if err != nil { return false, err } diff --git a/encoding/da.go b/encoding/da.go index 0a239b1..538de67 100644 --- a/encoding/da.go +++ b/encoding/da.go @@ -399,9 +399,9 @@ func calculatePaddedBlobSize(dataSize uint64) uint64 { // constructBatchPayloadInBlob constructs the batch payload. // This function is only used in compressed batch payload length estimation. -func constructBatchPayloadInBlob(chunks []*Chunk, MaxNumChunks uint64) ([]byte, error) { +func constructBatchPayloadInBlob(chunks []*Chunk, codec Codec) ([]byte, error) { // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) - metadataLength := 2 + MaxNumChunks*4 + metadataLength := 2 + codec.MaxNumChunksPerBatch()*4 // batchBytes represents the raw (un-compressed and un-padded) blob payload batchBytes := make([]byte, metadataLength)