Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 22 additions & 10 deletions encoding/codecv7.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []b
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err)
}

compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes)
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
if err != nil {
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}
Expand Down Expand Up @@ -225,19 +225,22 @@ func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx

// checkCompressedDataCompatibility checks the compressed data compatibility for a batch.
// It constructs a blob payload, compresses the data, and checks the compressed data compatibility.
func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte) ([]byte, bool, error) {
// flag checkLength indicates whether to check the length of the compressed data against the original data.
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes)
if err != nil {
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
}

if err = checkCompressedDataCompatibility(compressedPayloadBytes); err != nil {
if err = checkCompressedDataCompatibilityV7(compressedPayloadBytes); err != nil {
log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
return nil, false, nil
}

// check if compressed data is bigger or equal to the original data -> no need to compress
if len(compressedPayloadBytes) >= len(payloadBytes) {
if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) {
log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
return nil, false, nil
}
Expand All @@ -246,10 +249,16 @@ func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte) ([]byt
}

// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
// Note: For DACodecV7, this function is not implemented since there is no notion of DAChunk in this version. Blobs
// contain the entire batch data, and it is up to a prover to decide the chunk sizes.
func (d *DACodecV7) CheckChunkCompressedDataCompatibility(_ *Chunk) (bool, error) {
return true, nil
func (d *DACodecV7) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
// filling the needed fields for the batch used in the check
b := &Batch{
Chunks: []*Chunk{c},
PrevL1MessageQueueHash: c.PrevL1MessageQueueHash,
PostL1MessageQueueHash: c.PostL1MessageQueueHash,
Blocks: c.Blocks,
}

return d.CheckBatchCompressedDataCompatibility(b)
}

// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
Expand All @@ -267,7 +276,10 @@ func (d *DACodecV7) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error
return false, fmt.Errorf("failed to construct blob payload: %w", err)
}

_, compatible, err := d.checkCompressedDataCompatibility(payloadBytes)
// This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected.
// rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected.
// Since length check is used for DA and proving efficiency, it does not need to be checked here.
_, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */)
if err != nil {
return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}
Expand All @@ -287,7 +299,7 @@ func (d *DACodecV7) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64,
return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err)
}

compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes)
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
if err != nil {
return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
}
Expand Down
26 changes: 1 addition & 25 deletions encoding/codecv7_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,18 +384,6 @@ func TestCodecV7BatchStandardTestCasesEnableCompression(t *testing.T) {
})
}

repeat := func(element byte, count int) string {
result := make([]byte, 0, count)
for i := 0; i < count; i++ {
result = append(result, element)
}
return "0x" + common.Bytes2Hex(result)
}

// Taking into consideration compression, we allow up to 5x of max blob bytes minus 5 byte for the blob envelope header.
// We subtract 74 bytes for the blobPayloadV7 metadata.
//compressableAvailableBytes := maxEffectiveBlobBytes*5 - 5 - blobPayloadV7MinEncodedLength
maxAvailableBytesCompressable := 5*maxEffectiveBlobBytes - 5 - blobPayloadV7MinEncodedLength
maxAvailableBytesIncompressable := maxEffectiveBlobBytes - 5 - blobPayloadV7MinEncodedLength
// 52 bytes for each block as per daBlockV7 encoding.
bytesPerBlock := 52
Expand Down Expand Up @@ -455,18 +443,6 @@ func TestCodecV7BatchStandardTestCasesEnableCompression(t *testing.T) {
txData: []string{generateRandomData(maxAvailableBytesIncompressable/2 - bytesPerBlock*2)},
expectedBlobVersionedHash: "0x017d7f0d569464b5c74175679e5f2bc880fcf5966c3e1928c9675c942b5274f0",
},
{
name: "single block, single tx, full blob repeat data",
numBlocks: 1,
txData: []string{repeat(0x12, maxAvailableBytesCompressable-bytesPerBlock)},
expectedBlobVersionedHash: "0x01f5d7bbfe7deb429bcbdd7347606359bca75cb93b9198e8f089b82e45f92b43",
},
{
name: "2 blocks, single 2, full blob random data",
numBlocks: 2,
txData: []string{repeat(0x12, maxAvailableBytesCompressable/2-bytesPerBlock*2), repeat(0x13, maxAvailableBytesCompressable/2-bytesPerBlock*2)},
expectedBlobVersionedHash: "0x01dccca3859640c50e0058fd42eaf14f942070e6497a4e2ba507b4546280a772",
},
{
name: "single block, single tx, full blob random data -> error because 1 byte too big",
numBlocks: 1,
Expand Down Expand Up @@ -652,7 +628,7 @@ func TestCodecV7BatchCompressedDataCompatibilityCheck(t *testing.T) {
require.NoError(t, err)

// bypass batch validation checks by calling checkCompressedDataCompatibility directly
_, compatible, err := codecV7.(*DACodecV7).checkCompressedDataCompatibility([]byte{0})
_, compatible, err := codecV7.(*DACodecV7).checkCompressedDataCompatibility([]byte{0}, true /* checkLength */)
require.NoError(t, err)
require.Equal(t, false, compatible)

Expand Down
46 changes: 45 additions & 1 deletion encoding/da.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,51 @@ func checkCompressedDataCompatibility(data []byte) error {
data = data[3+blkSize:]
}

// Should we return invalid if isLast is still false?
if !isLast {
return fmt.Errorf("unexpected end before last block")
}

return nil
}

// Fast testing if the compressed data (v7) is compatible with our circuit
// (require specified frame header and each block is compressed)
func checkCompressedDataCompatibilityV7(data []byte) error {
if len(data) < 16 {
return fmt.Errorf("too small size (0x%x), what is it?", data)
}

fheader := data[0]
// it is not the encoding type we expected in our zstd header
if fheader&63 != 32 {
return fmt.Errorf("unexpected header type (%x)", fheader)
}

// skip content size
switch fheader >> 6 {
case 0:
data = data[2:]
case 1:
data = data[3:]
case 2:
data = data[5:]
case 3:
data = data[9:]
default:
panic("impossible")
}

isLast := false
// scan each block until done
for len(data) > 3 && !isLast {
isLast = (data[0] & 1) == 1
blkSize := (uint(data[2])*65536 + uint(data[1])*256 + uint(data[0])) >> 3
if len(data) < 3+int(blkSize) {
return fmt.Errorf("wrong data len {%d}, expect min {%d}", len(data), 3+blkSize)
}
data = data[3+blkSize:]
}

if !isLast {
return fmt.Errorf("unexpected end before last block")
}
Expand Down