Skip to content

Commit 41c6486

Browse files
feat: add decoding methods (#10)
* add decoding mehods * add tests for codecv0 and ccodecv1 * decompressing * add decompressing for codecv2 * fix * change zstd library from c binding to full go port * handle error * sync with main * add v3 decoding * refactor: make DAChunkRawTx an alias * address comments * comment * comment * address comments * fix test * support v4 * address renaming nit-picks --------- Co-authored-by: jonastheis <[email protected]>
1 parent 9e32313 commit 41c6486

File tree

15 files changed

+529
-9
lines changed

15 files changed

+529
-9
lines changed

encoding/bitmap.go

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,3 +63,30 @@ func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePo
6363

6464
return bitmapBytes, nextIndex, nil
6565
}
66+
67+
// DecodeBitmap decodes skipped L1 message bitmap of the batch from bytes to big.Int's
68+
func DecodeBitmap(skippedL1MessageBitmap []byte, totalL1MessagePopped int) ([]*big.Int, error) {
69+
length := len(skippedL1MessageBitmap)
70+
if length%32 != 0 {
71+
return nil, fmt.Errorf("skippedL1MessageBitmap length doesn't match, skippedL1MessageBitmap length should be equal 0 modulo 32, length of skippedL1MessageBitmap: %v", length)
72+
}
73+
if length*8 < totalL1MessagePopped {
74+
return nil, fmt.Errorf("skippedL1MessageBitmap length is too small, skippedL1MessageBitmap length should be at least %v, length of skippedL1MessageBitmap: %v", (totalL1MessagePopped+7)/8, length)
75+
}
76+
var skippedBitmap []*big.Int
77+
for index := 0; index < length/32; index++ {
78+
bitmap := big.NewInt(0).SetBytes(skippedL1MessageBitmap[index*32 : index*32+32])
79+
skippedBitmap = append(skippedBitmap, bitmap)
80+
}
81+
return skippedBitmap, nil
82+
}
83+
84+
// IsL1MessageSkipped checks if index is skipped in bitmap
85+
func IsL1MessageSkipped(skippedBitmap []*big.Int, index uint64) bool {
86+
if index > uint64(len(skippedBitmap))*256 {
87+
return false
88+
}
89+
quo := index / 256
90+
rem := index % 256
91+
return skippedBitmap[quo].Bit(int(rem)) != 0
92+
}

encoding/codecv0/codecv0.go

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ import (
1616
"github.com/scroll-tech/da-codec/encoding"
1717
)
1818

19+
const BlockContextByteSize = 60
20+
const TxLenByteSize = 4
21+
1922
// DABlock represents a Data Availability Block.
2023
type DABlock struct {
2124
BlockNumber uint64
@@ -32,6 +35,12 @@ type DAChunk struct {
3235
Transactions [][]*types.TransactionData
3336
}
3437

38+
// DAChunkRawTx groups consecutive DABlocks with their L2 transactions, L1 msgs are loaded in another place.
39+
type DAChunkRawTx struct {
40+
Blocks []*DABlock
41+
Transactions []types.Transactions
42+
}
43+
3544
// DABatch contains metadata about a batch of DAChunks.
3645
type DABatch struct {
3746
Version uint8
@@ -179,6 +188,64 @@ func (c *DAChunk) Encode() ([]byte, error) {
179188
return chunkBytes, nil
180189
}
181190

191+
// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
192+
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
193+
var chunks []*DAChunkRawTx
194+
for _, chunk := range bytes {
195+
if len(chunk) < 1 {
196+
return nil, fmt.Errorf("invalid chunk, length is less than 1")
197+
}
198+
199+
numBlocks := int(chunk[0])
200+
if len(chunk) < 1+numBlocks*BlockContextByteSize {
201+
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
202+
}
203+
204+
blocks := make([]*DABlock, numBlocks)
205+
for i := 0; i < numBlocks; i++ {
206+
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
207+
endIdx := startIdx + BlockContextByteSize
208+
blocks[i] = &DABlock{}
209+
err := blocks[i].Decode(chunk[startIdx:endIdx])
210+
if err != nil {
211+
return nil, err
212+
}
213+
}
214+
215+
var transactions []types.Transactions
216+
currentIndex := 1 + numBlocks*BlockContextByteSize
217+
for _, block := range blocks {
218+
var blockTransactions types.Transactions
219+
// ignore L1 msg transactions from the block, consider only L2 transactions
220+
txNum := int(block.NumTransactions - block.NumL1Messages)
221+
for i := 0; i < txNum; i++ {
222+
if len(chunk) < currentIndex+TxLenByteSize {
223+
return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize, i)
224+
}
225+
txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+TxLenByteSize]))
226+
if len(chunk) < currentIndex+TxLenByteSize+txLen {
227+
return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+TxLenByteSize+txLen, i)
228+
}
229+
txData := chunk[currentIndex+TxLenByteSize : currentIndex+TxLenByteSize+txLen]
230+
tx := &types.Transaction{}
231+
err := tx.UnmarshalBinary(txData)
232+
if err != nil {
233+
return nil, fmt.Errorf("failed to unmarshal tx, pos of tx in chunk bytes: %d. tx num without l1 msgs: %d, err: %w", currentIndex, i, err)
234+
}
235+
blockTransactions = append(blockTransactions, tx)
236+
currentIndex += TxLenByteSize + txLen
237+
}
238+
transactions = append(transactions, blockTransactions)
239+
}
240+
241+
chunks = append(chunks, &DAChunkRawTx{
242+
Blocks: blocks,
243+
Transactions: transactions,
244+
})
245+
}
246+
return chunks, nil
247+
}
248+
182249
// Hash computes the hash of the DAChunk data.
183250
func (c *DAChunk) Hash() (common.Hash, error) {
184251
chunkBytes, err := c.Encode()

encoding/codecv0/codecv0_test.go

Lines changed: 47 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@ import (
77
"os"
88
"testing"
99

10+
"github.com/stretchr/testify/assert"
11+
1012
"github.com/scroll-tech/go-ethereum/common"
1113
"github.com/scroll-tech/go-ethereum/log"
12-
"github.com/stretchr/testify/assert"
1314

1415
"github.com/scroll-tech/da-codec/encoding"
1516
)
@@ -264,6 +265,38 @@ func TestCodecV0(t *testing.T) {
264265
assert.NoError(t, err)
265266
assert.Equal(t, 61, len(chunkBytes2))
266267

268+
daChunksRawTx, err := DecodeDAChunksRawTx([][]byte{chunkBytes1, chunkBytes2})
269+
assert.NoError(t, err)
270+
// assert number of chunks
271+
assert.Equal(t, 2, len(daChunksRawTx))
272+
273+
// assert block in first chunk
274+
assert.Equal(t, 3, len(daChunksRawTx[0].Blocks))
275+
assert.Equal(t, daChunk1.Blocks[0], daChunksRawTx[0].Blocks[0])
276+
assert.Equal(t, daChunk1.Blocks[1], daChunksRawTx[0].Blocks[1])
277+
daChunksRawTx[0].Blocks[2].BaseFee = nil
278+
assert.Equal(t, daChunk1.Blocks[2], daChunksRawTx[0].Blocks[2])
279+
280+
// assert block in second chunk
281+
assert.Equal(t, 1, len(daChunksRawTx[1].Blocks))
282+
daChunksRawTx[1].Blocks[0].BaseFee = nil
283+
assert.Equal(t, daChunk2.Blocks[0], daChunksRawTx[1].Blocks[0])
284+
285+
// assert transactions in first chunk
286+
assert.Equal(t, 3, len(daChunksRawTx[0].Transactions))
287+
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
288+
assert.Equal(t, 2, len(daChunksRawTx[0].Transactions[0]))
289+
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[1]))
290+
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[2]))
291+
292+
assert.EqualValues(t, daChunk1.Transactions[0][0].TxHash, daChunksRawTx[0].Transactions[0][0].Hash().String())
293+
assert.EqualValues(t, daChunk1.Transactions[0][1].TxHash, daChunksRawTx[0].Transactions[0][1].Hash().String())
294+
295+
// assert transactions in second chunk
296+
assert.Equal(t, 1, len(daChunksRawTx[1].Transactions))
297+
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
298+
assert.Equal(t, 0, len(daChunksRawTx[1].Transactions[0]))
299+
267300
batch = &encoding.Batch{
268301
Index: 1,
269302
TotalL1MessagePoppedBefore: 0,
@@ -297,6 +330,19 @@ func TestCodecV0(t *testing.T) {
297330
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
298331
assert.Equal(t, batchHexString, decodedBatchHexString)
299332

333+
decodedBitmap, err := encoding.DecodeBitmap(decodedDABatch.SkippedL1MessageBitmap, int(decodedDABatch.L1MessagePopped))
334+
assert.NoError(t, err)
335+
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 0))
336+
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 9))
337+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 10))
338+
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 11))
339+
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 36))
340+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 37))
341+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 38))
342+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 39))
343+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 40))
344+
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 41))
345+
300346
// Test case: many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs.
301347
chunk = &encoding.Chunk{
302348
Blocks: []*encoding.Block{block4},

encoding/codecv1/codecv1.go

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,17 @@ import (
2121
// MaxNumChunks is the maximum number of chunks that a batch can contain.
2222
const MaxNumChunks = 15
2323

24+
const BlockContextByteSize = codecv0.BlockContextByteSize
25+
2426
// DABlock represents a Data Availability Block.
2527
type DABlock = codecv0.DABlock
2628

2729
// DAChunk groups consecutive DABlocks with their transactions.
2830
type DAChunk codecv0.DAChunk
2931

32+
// DAChunkRawTx groups consecutive DABlocks with their L2 transactions, L1 msgs are loaded in another place.
33+
type DAChunkRawTx = codecv0.DAChunkRawTx
34+
3035
// DABatch contains metadata about a batch of DAChunks.
3136
type DABatch struct {
3237
// header
@@ -93,6 +98,41 @@ func (c *DAChunk) Encode() []byte {
9398
return chunkBytes
9499
}
95100

101+
// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
102+
// Beginning from codecv1 tx data posted to blobs, not to chunk bytes in calldata
103+
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
104+
var chunks []*DAChunkRawTx
105+
for _, chunk := range bytes {
106+
if len(chunk) < 1 {
107+
return nil, fmt.Errorf("invalid chunk, length is less than 1")
108+
}
109+
110+
numBlocks := int(chunk[0])
111+
if len(chunk) < 1+numBlocks*BlockContextByteSize {
112+
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
113+
}
114+
115+
blocks := make([]*DABlock, numBlocks)
116+
for i := 0; i < numBlocks; i++ {
117+
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
118+
endIdx := startIdx + BlockContextByteSize
119+
blocks[i] = &DABlock{}
120+
err := blocks[i].Decode(chunk[startIdx:endIdx])
121+
if err != nil {
122+
return nil, err
123+
}
124+
}
125+
126+
var transactions []types.Transactions
127+
128+
chunks = append(chunks, &DAChunkRawTx{
129+
Blocks: blocks,
130+
Transactions: transactions, // Transactions field is still empty in the phase of DecodeDAChunksRawTx, because txs moved to bobs and filled in DecodeTxsFromBlob method.
131+
})
132+
}
133+
return chunks, nil
134+
}
135+
96136
// Hash computes the hash of the DAChunk data.
97137
func (c *DAChunk) Hash() (common.Hash, error) {
98138
var dataBytes []byte
@@ -286,6 +326,99 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
286326
return blob, blobVersionedHash, &z, nil
287327
}
288328

329+
// DecodeTxsFromBytes decodes txs from blob bytes and writes to chunks
330+
func DecodeTxsFromBytes(blobBytes []byte, chunks []*DAChunkRawTx, maxNumChunks int) error {
331+
numChunks := int(binary.BigEndian.Uint16(blobBytes[0:2]))
332+
if numChunks != len(chunks) {
333+
return fmt.Errorf("blob chunk number is not same as calldata, blob num chunks: %d, calldata num chunks: %d", numChunks, len(chunks))
334+
}
335+
index := 2 + maxNumChunks*4
336+
for chunkID, chunk := range chunks {
337+
var transactions []types.Transactions
338+
chunkSize := int(binary.BigEndian.Uint32(blobBytes[2+4*chunkID : 2+4*chunkID+4]))
339+
340+
chunkBytes := blobBytes[index : index+chunkSize]
341+
curIndex := 0
342+
for _, block := range chunk.Blocks {
343+
var blockTransactions types.Transactions
344+
txNum := int(block.NumTransactions - block.NumL1Messages)
345+
for i := 0; i < txNum; i++ {
346+
tx, nextIndex, err := GetNextTx(chunkBytes, curIndex)
347+
if err != nil {
348+
return fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, index+curIndex+4)
349+
}
350+
curIndex = nextIndex
351+
blockTransactions = append(blockTransactions, tx)
352+
}
353+
transactions = append(transactions, blockTransactions)
354+
}
355+
chunk.Transactions = transactions
356+
index += chunkSize
357+
}
358+
return nil
359+
}
360+
361+
// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
362+
func DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
363+
batchBytes := encoding.BytesFromBlobCanonical(blob)
364+
return DecodeTxsFromBytes(batchBytes[:], chunks, MaxNumChunks)
365+
}
366+
367+
var errSmallLength error = fmt.Errorf("length of blob bytes is too small")
368+
369+
// GetNextTx parses blob bytes to find length of payload of next Tx and decode it
370+
func GetNextTx(bytes []byte, index int) (*types.Transaction, int, error) {
371+
var nextIndex int
372+
length := len(bytes)
373+
if length < index+1 {
374+
return nil, 0, errSmallLength
375+
}
376+
var txBytes []byte
377+
if bytes[index] <= 0x7f {
378+
// the first byte is transaction type, rlp encoding begins from next byte
379+
txBytes = append(txBytes, bytes[index])
380+
index++
381+
}
382+
if length < index+1 {
383+
return nil, 0, errSmallLength
384+
}
385+
if bytes[index] >= 0xc0 && bytes[index] <= 0xf7 {
386+
// length of payload is simply bytes[index] - 0xc0
387+
payloadLen := int(bytes[index] - 0xc0)
388+
if length < index+1+payloadLen {
389+
return nil, 0, errSmallLength
390+
}
391+
txBytes = append(txBytes, bytes[index:index+1+payloadLen]...)
392+
nextIndex = index + 1 + payloadLen
393+
} else if bytes[index] > 0xf7 {
394+
// the length of payload is encoded in next bytes[index] - 0xf7 bytes
395+
// length of bytes representation of length of payload
396+
lenPayloadLen := int(bytes[index] - 0xf7)
397+
if length < index+1+lenPayloadLen {
398+
return nil, 0, errSmallLength
399+
}
400+
lenBytes := bytes[index+1 : index+1+lenPayloadLen]
401+
for len(lenBytes) < 8 {
402+
lenBytes = append([]byte{0x0}, lenBytes...)
403+
}
404+
payloadLen := binary.BigEndian.Uint64(lenBytes)
405+
406+
if length < index+1+lenPayloadLen+int(payloadLen) {
407+
return nil, 0, errSmallLength
408+
}
409+
txBytes = append(txBytes, bytes[index:index+1+lenPayloadLen+int(payloadLen)]...)
410+
nextIndex = index + 1 + lenPayloadLen + int(payloadLen)
411+
} else {
412+
return nil, 0, fmt.Errorf("incorrect format of rlp encoding")
413+
}
414+
tx := &types.Transaction{}
415+
err := tx.UnmarshalBinary(txBytes)
416+
if err != nil {
417+
return nil, 0, fmt.Errorf("failed to unmarshal tx, err: %w", err)
418+
}
419+
return tx, nextIndex, nil
420+
}
421+
289422
// NewDABatchFromBytes decodes the given byte slice into a DABatch.
290423
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
291424
func NewDABatchFromBytes(data []byte) (*DABatch, error) {

0 commit comments

Comments
 (0)