Skip to content

Commit 01bd9b5

Browse files
committed
refactoring to minimize duplicate code and increase maintainability
1 parent f9608ed commit 01bd9b5

File tree

3 files changed

+180
-155
lines changed

3 files changed

+180
-155
lines changed

encoding/codecv7.go

Lines changed: 17 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -30,34 +30,7 @@ func (d *DACodecV7) MaxNumChunksPerBatch() int {
3030

3131
// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
3232
func (d *DACodecV7) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) {
33-
if !block.Header.Number.IsUint64() {
34-
return nil, errors.New("block number is not uint64")
35-
}
36-
37-
numL1Messages, _, highestQueueIndex, err := block.NumL1MessagesNoSkipping()
38-
if err != nil {
39-
return nil, fmt.Errorf("failed to calculate number of L1 messages: %w", err)
40-
}
41-
if numL1Messages > 0 && totalL1MessagePoppedBefore+uint64(numL1Messages) != highestQueueIndex+1 {
42-
return nil, fmt.Errorf("failed to sanity check L1 messages count: totalL1MessagePoppedBefore + numL1Messages != highestQueueIndex+1: %d + %d != %d", totalL1MessagePoppedBefore, numL1Messages, highestQueueIndex+1)
43-
}
44-
45-
numL2Transactions := block.NumL2Transactions()
46-
numTransactions := uint64(numL1Messages) + numL2Transactions
47-
if numTransactions > math.MaxUint16 {
48-
return nil, errors.New("number of transactions exceeds max uint16")
49-
}
50-
51-
daBlock := newDABlockV7(
52-
block.Header.Number.Uint64(), // number
53-
block.Header.Time, // timestamp
54-
block.Header.BaseFee, // baseFee
55-
block.Header.GasLimit, // gasLimit
56-
uint16(numTransactions), // numTransactions
57-
numL1Messages, // numL1Messages
58-
)
59-
60-
return daBlock, nil
33+
return newDABlockV7FromBlockWithValidation(block, &totalL1MessagePoppedBefore)
6134
}
6235

6336
// NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before.
@@ -77,43 +50,21 @@ func (d *DACodecV7) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64)
7750
return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(chunk.Blocks), math.MaxUint16)
7851
}
7952

80-
// sanity check: prevL1MessageQueueHash+apply(L1Messages) = postL1MessageQueueHash
81-
computedPostL1MessageQueueHash, err := MessageQueueV2ApplyL1MessagesFromBlocks(chunk.PrevL1MessageQueueHash, chunk.Blocks)
82-
if err != nil {
83-
return nil, fmt.Errorf("failed to apply L1 messages to prevL1MessageQueueHash: %w", err)
84-
}
85-
if computedPostL1MessageQueueHash != chunk.PostL1MessageQueueHash {
86-
return nil, fmt.Errorf("failed to sanity check postL1MessageQueueHash after applying all L1 messages: expected %s, got %s", computedPostL1MessageQueueHash, chunk.PostL1MessageQueueHash)
87-
}
88-
89-
if !chunk.Blocks[0].Header.Number.IsUint64() {
90-
return nil, errors.New("block number of initial block is not uint64")
91-
}
92-
initialL2BlockNumber := chunk.Blocks[0].Header.Number.Uint64()
93-
l1MessageIndex := totalL1MessagePoppedBefore
94-
9553
blocks := make([]DABlock, 0, len(chunk.Blocks))
9654
txs := make([][]*types.TransactionData, 0, len(chunk.Blocks))
9755

98-
for i, block := range chunk.Blocks {
99-
daBlock, err := d.NewDABlock(block, l1MessageIndex)
100-
if err != nil {
101-
return nil, fmt.Errorf("failed to create DABlock from block %d: %w", block.Header.Number.Uint64(), err)
102-
}
103-
l1MessageIndex += uint64(daBlock.NumL1Messages())
104-
105-
// sanity check: block numbers are contiguous
106-
if block.Header.Number.Uint64() != initialL2BlockNumber+uint64(i) {
107-
return nil, fmt.Errorf("invalid block number: expected %d but got %d", initialL2BlockNumber+uint64(i), block.Header.Number.Uint64())
108-
}
109-
56+
if err := iterateAndVerifyBlocksAndL1Messages(chunk.PrevL1MessageQueueHash, chunk.PostL1MessageQueueHash, chunk.Blocks, &totalL1MessagePoppedBefore, func(initialBlockNumber uint64) {}, func(block *Block, daBlock *daBlockV7) error {
11057
blocks = append(blocks, daBlock)
11158
txs = append(txs, block.Transactions)
59+
60+
return nil
61+
}); err != nil {
62+
return nil, fmt.Errorf("failed to iterate and verify blocks and L1 messages: %w", err)
11263
}
11364

11465
daChunk := newDAChunkV7(
115-
blocks, // blocks
116-
txs, // transactions
66+
blocks,
67+
txs,
11768
)
11869

11970
return daChunk, nil
@@ -125,27 +76,8 @@ func (d *DACodecV7) NewDABatch(batch *Batch) (DABatch, error) {
12576
return nil, errors.New("batch must contain at least one block")
12677
}
12778

128-
// If the batch contains chunks, we need to ensure that the blocks in the chunks match the blocks in the batch.
129-
// Chunks are not directly used in DACodecV7, but we still need to check the consistency of the blocks.
130-
// This is done to ensure compatibility with older versions and the relayer implementation.
131-
if len(batch.Chunks) != 0 {
132-
totalBlocks := len(batch.Blocks)
133-
chunkBlocksCount := 0
134-
for _, chunk := range batch.Chunks {
135-
for _, block := range chunk.Blocks {
136-
if chunkBlocksCount > totalBlocks {
137-
return nil, errors.New("chunks contain more blocks than the batch")
138-
}
139-
140-
if batch.Blocks[chunkBlocksCount].Header.Hash() != block.Header.Hash() {
141-
return nil, errors.New("blocks in chunks do not match the blocks in the batch")
142-
}
143-
chunkBlocksCount++
144-
}
145-
}
146-
if chunkBlocksCount != totalBlocks {
147-
return nil, fmt.Errorf("chunks contain less blocks than the batch: %d < %d", chunkBlocksCount, totalBlocks)
148-
}
79+
if err := checkBlocksBatchVSChunksConsistency(batch); err != nil {
80+
return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
14981
}
15082

15183
blob, blobVersionedHash, blobBytes, err := d.constructBlob(batch)
@@ -260,6 +192,9 @@ func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) {
260192

261193
// read the compressed flag and decompress if needed
262194
compressed := rawBytes[blobEnvelopeV7OffsetCompressedFlag]
195+
if compressed != 0x0 && compressed != 0x1 {
196+
return nil, fmt.Errorf("invalid compressed flag: %d", compressed)
197+
}
263198
if compressed == 0x1 {
264199
var err error
265200
if payloadBytes, err = decompressV7Bytes(payloadBytes); err != nil {
@@ -311,30 +246,14 @@ func (d *DACodecV7) CheckChunkCompressedDataCompatibility(_ *Chunk) (bool, error
311246

312247
// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
313248
func (d *DACodecV7) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
314-
// If the batch contains chunks, we need to ensure that the blocks in the chunks match the blocks in the batch.
315-
// Chunks are not directly used in DACodecV7, but we still need to check the consistency of the blocks.
316-
// This is done to ensure compatibility with older versions and the relayer implementation.
317-
if len(b.Chunks) != 0 {
318-
totalBlocks := len(b.Blocks)
319-
chunkBlocksCount := 0
320-
for _, chunk := range b.Chunks {
321-
for _, block := range chunk.Blocks {
322-
if chunkBlocksCount > totalBlocks {
323-
return false, errors.New("chunks contain more blocks than the batch")
324-
}
325-
326-
if b.Blocks[chunkBlocksCount].Header.Hash() != block.Header.Hash() {
327-
return false, errors.New("blocks in chunks do not match the blocks in the batch")
328-
}
329-
chunkBlocksCount++
330-
}
331-
}
332-
}
333-
334249
if len(b.Blocks) == 0 {
335250
return false, errors.New("batch must contain at least one block")
336251
}
337252

253+
if err := checkBlocksBatchVSChunksConsistency(b); err != nil {
254+
return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
255+
}
256+
338257
payloadBytes, err := d.constructBlobPayload(b)
339258
if err != nil {
340259
return false, fmt.Errorf("failed to construct blob payload: %w", err)

encoding/codecv7_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,10 +339,10 @@ func TestCodecV7BlobEncodingAndHashing(t *testing.T) {
339339
require.Equal(t, len(tc.batch.Blocks), len(blobPayload.Blocks()))
340340
decodedBlocks := blobPayload.Blocks()
341341
for i, block := range tc.batch.Blocks {
342-
numL1Messages, _, _, err := block.NumL1MessagesNoSkipping()
342+
numL1Messages, lowestQueueIndex, _, err := block.NumL1MessagesNoSkipping()
343343
require.NoError(t, err)
344344

345-
daBlock := newDABlockV7(block.Header.Number.Uint64(), block.Header.Time, block.Header.BaseFee, block.Header.GasLimit, uint16(block.NumL2Transactions())+numL1Messages, numL1Messages)
345+
daBlock := newDABlockV7(block.Header.Number.Uint64(), block.Header.Time, block.Header.BaseFee, block.Header.GasLimit, uint16(block.NumL2Transactions())+numL1Messages, numL1Messages, lowestQueueIndex)
346346
assertEqualDABlocks(t, daBlock, decodedBlocks[i])
347347

348348
txDataDecoded := TxsToTxsData(blobPayload.Transactions()[i])

0 commit comments

Comments
 (0)