Skip to content

Commit 1dde89a

Browse files
committed
address review comments
1 parent 64133ef commit 1dde89a

File tree

3 files changed

+130
-61
lines changed

3 files changed

+130
-61
lines changed

encoding/codecv7.go

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,26 @@ func (d *DACodecV7) Version() CodecVersion {
2424

2525
// MaxNumChunksPerBatch returns the maximum number of chunks per batch.
2626
func (d *DACodecV7) MaxNumChunksPerBatch() int {
27-
return 1
27+
return math.MaxInt
2828
}
2929

3030
// NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before.
31-
func (d *DACodecV7) NewDABlock(block *Block, _ uint64) (DABlock, error) {
31+
func (d *DACodecV7) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) {
3232
if !block.Header.Number.IsUint64() {
3333
return nil, errors.New("block number is not uint64")
3434
}
3535

36-
// note: numL1Messages includes skipped messages
37-
numL1Messages := block.NumL1MessagesNoSkipping()
36+
numL1Messages, highestQueueIndex, err := block.NumL1MessagesNoSkipping()
37+
if err != nil {
38+
return nil, fmt.Errorf("failed to calculate number of L1 messages: %w", err)
39+
}
3840
if numL1Messages > math.MaxUint16 {
3941
return nil, errors.New("number of L1 messages exceeds max uint16")
4042
}
43+
if totalL1MessagePoppedBefore+uint64(numL1Messages) != highestQueueIndex {
44+
return nil, fmt.Errorf("failed to sanity check L1 messages count: totalL1MessagePoppedBefore + numL1Messages != highestQueueIndex: %d + %d != %d", totalL1MessagePoppedBefore, numL1Messages, highestQueueIndex)
45+
}
4146

42-
// note: numTransactions includes skipped messages
4347
numL2Transactions := block.NumL2Transactions()
4448
numTransactions := uint64(numL1Messages) + numL2Transactions
4549
if numTransactions > math.MaxUint16 {
@@ -60,7 +64,7 @@ func (d *DACodecV7) NewDABlock(block *Block, _ uint64) (DABlock, error) {
6064

6165
// NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before.
6266
// Note: For DACodecV7, this function is not implemented since there is no notion of DAChunk in this version. Blobs
63-
// contain the entire batch data, and it is up to a prover to decide the chunk sizes.
67+
// contain the entire batch data without any information of Chunks within.
6468
func (d *DACodecV7) NewDAChunk(_ *Chunk, _ uint64) (DAChunk, error) {
6569
return nil, nil
6670
}
@@ -94,8 +98,8 @@ func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []b
9498
return nil, common.Hash{}, nil, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
9599
}
96100

97-
blobBytes := make([]byte, blobEnvelopeV7PayloadOffset)
98-
blobBytes[blobEnvelopeV7VersionOffset] = uint8(CodecV7)
101+
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)
102+
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(CodecV7)
99103

100104
payloadBytes, err := d.constructBlobPayload(batch)
101105
if err != nil {
@@ -113,14 +117,14 @@ func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []b
113117
log.Error("ConstructBlob: compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
114118
return nil, common.Hash{}, nil, err
115119
}
116-
blobBytes[blobEnvelopeV7CompressedFlagOffset] = 0x1
120+
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = 0x1
117121
payloadBytes = compressedPayloadBytes
118122
} else {
119-
blobBytes[blobEnvelopeV7CompressedFlagOffset] = 0x0
123+
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = 0x0
120124
}
121125

122126
sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))
123-
copy(blobBytes[blobEnvelopeV7ByteSizeOffset:blobEnvelopeV7CompressedFlagOffset], sizeSlice)
127+
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
124128
blobBytes = append(blobBytes, payloadBytes...)
125129

126130
if len(blobBytes) > maxEffectiveBlobBytes {
@@ -182,21 +186,21 @@ func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) {
182186
rawBytes := bytesFromBlobCanonical(blob)
183187

184188
// read the blob envelope header
185-
version := rawBytes[blobEnvelopeV7VersionOffset]
189+
version := rawBytes[blobEnvelopeV7OffsetVersion]
186190
if CodecVersion(version) != CodecV7 {
187191
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, version)
188192
}
189193

190194
// read the data size
191-
blobEnvelopeSize := decodeSize3Bytes(rawBytes[blobEnvelopeV7ByteSizeOffset:blobEnvelopeV7CompressedFlagOffset])
192-
if blobEnvelopeSize+blobEnvelopeV7PayloadOffset > uint32(len(rawBytes)) {
195+
blobEnvelopeSize := decodeSize3Bytes(rawBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag])
196+
if blobEnvelopeSize+blobEnvelopeV7OffsetPayload > uint32(len(rawBytes)) {
193197
return nil, fmt.Errorf("blob envelope size exceeds the raw data size: %d > %d", blobEnvelopeSize, len(rawBytes))
194198
}
195199

196-
payloadBytes := rawBytes[blobEnvelopeV7PayloadOffset : blobEnvelopeV7PayloadOffset+blobEnvelopeSize]
200+
payloadBytes := rawBytes[blobEnvelopeV7OffsetPayload : blobEnvelopeV7OffsetPayload+blobEnvelopeSize]
197201

198202
// read the compressed flag and decompress if needed
199-
compressed := rawBytes[blobEnvelopeV7CompressedFlagOffset]
203+
compressed := rawBytes[blobEnvelopeV7OffsetCompressedFlag]
200204
if compressed == 0x1 {
201205
var err error
202206
if payloadBytes, err = decompressV7Bytes(payloadBytes); err != nil {

encoding/codecv7_types.go

Lines changed: 86 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -14,39 +14,77 @@ import (
1414
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
1515
)
1616

17+
// Below is the encoding for `BatchHeader` V7, total 73 bytes.
18+
// * Field Bytes Type Index Comments
19+
// * version 1 uint8 0 The batch version
20+
// * batchIndex 8 uint64 1 The index of the batch
21+
// * blobVersionedHash 32 bytes32 9 The versioned hash of the blob with this batch’s data
22+
// * parentBatchHash 32 bytes32 41 The parent batch hash
23+
1724
const (
1825
daBatchV7EncodedLength = 73
1926
daBatchV7OffsetBlobVersionedHash = 9
2027
daBatchV7OffsetParentBatchHash = 41
2128
)
2229

30+
// Below is the encoding format for BlobEnvelopeV7.
31+
// * Field Bytes Type Index Comments
32+
// * version 1 uint8 0 The version of the DA codec (batch/blob)
33+
// * n_bytes[1] 1 uint8 1 Value denoting the number of bytes, n_bytes[1]
34+
// * n_bytes[2] 1 uint8 2 Value denoting the number of bytes, n_bytes[2]*256
35+
// * n_bytes[3] 1 uint8 3 Value denoting the number of bytes, n_bytes[3]*256^2
36+
// * flag 1 bool 4 1-byte flag to denote zstd-encoded/raw bytes
37+
// * payload N bytes 5 Possibly zstd-encoded payload bytes
38+
// * padding (4096*31 - (N+5)) bytes N+5 Padding to align to 4096*31 bytes
39+
2340
const (
24-
blobEnvelopeV7VersionOffset = 0
25-
blobEnvelopeV7ByteSizeOffset = 1
26-
blobEnvelopeV7CompressedFlagOffset = 4
27-
blobEnvelopeV7PayloadOffset = 5
41+
blobEnvelopeV7OffsetVersion = 0
42+
blobEnvelopeV7OffsetByteSize = 1
43+
blobEnvelopeV7OffsetCompressedFlag = 4
44+
blobEnvelopeV7OffsetPayload = 5
2845
)
2946

47+
// Below is the encoding format for the batch metadata and blocks.
48+
// * Field Bytes Type Index Comments
49+
// * initialL1MessageIndex 8 uint64 0 Queue index of the first L1 message contained in this batch
50+
// * initialL1MessageQueueHash 32 bytes32 8 Hash of the L1 message queue at the last message in the previous batch
51+
// * lastL1MessageQueueHash 32 bytes32 40 Hash of the L1 message queue at the last message in this batch
52+
// * initialL2BlockNumber 8 uint64 72 The initial L2 block number in this batch
53+
// * numBlocks 2 uint16 80 The number of blocks in this batch
54+
// * block[0] 52 BlockContextV2 82 The first block in this batch
55+
// * block[i] 52 BlockContextV2 82+52*i The (i+1)th block in this batch
56+
// * block[n-1] 52 BlockContextV2 82+52*(n-1) The last block in this batch
57+
// * l2Transactions dynamic bytes 82+52*n L2 transactions for this batch
58+
3059
const (
31-
blobPayloadV7EncodedLength = 8 + 2*common.HashLength + 8 + 2
60+
blobPayloadV7MinEncodedLength = 8 + 2*common.HashLength + 8 + 2
3261
blobPayloadV7OffsetInitialL1MessageIndex = 0
33-
blobPayloadV7OffsetInitialL1MessageQueue = blobPayloadV7OffsetInitialL1MessageIndex + 8
34-
blobPayloadV7OffsetLastL1MessageQueue = blobPayloadV7OffsetInitialL1MessageQueue + common.HashLength
35-
blobPayloadV7OffsetInitialL2BlockNumber = blobPayloadV7OffsetLastL1MessageQueue + common.HashLength
36-
blobPayloadV7OffsetNumBlocks = blobPayloadV7OffsetInitialL2BlockNumber + 8
37-
blobPayloadV7OffsetBlocks = blobPayloadV7OffsetNumBlocks + 2
62+
blobPayloadV7OffsetInitialL1MessageQueue = 8
63+
blobPayloadV7OffsetLastL1MessageQueue = 40
64+
blobPayloadV7OffsetInitialL2BlockNumber = 72
65+
blobPayloadV7OffsetNumBlocks = 80
66+
blobPayloadV7OffsetBlocks = 82
3867
)
3968

69+
// Below is the encoding for DABlockV7, total 52 bytes.
70+
// * Field Bytes Type Index Comments
71+
// * blockNumber 8 uint64 0 The height of this block.
72+
// * timestamp 8 uint64 0 The timestamp of this block.
73+
// * baseFee 32 uint256 8 The base fee of this block.
74+
// * gasLimit 8 uint64 40 The gas limit of this block.
75+
// * numTransactions 2 uint16 48 The number of transactions in this block, both L1 & L2 txs.
76+
// * numL1Messages 2 uint16 50 The number of l1 messages in this block.
77+
4078
const (
41-
daBlockV7BlockContextByteSize = 52
42-
daBlockV7OffsetTimestamp = 0
43-
daBlockV7OffsetBaseFee = daBlockV7OffsetTimestamp + 8
44-
daBlockV7OffsetGasLimit = daBlockV7OffsetBaseFee + 32
45-
daBlockV7numTransactionsOffset = daBlockV7OffsetGasLimit + 8
46-
daBlockV7numL1MessagesOffset = daBlockV7numTransactionsOffset + 2
79+
daBlockV7BlockContextEncodedLength = 52
80+
daBlockV7OffsetTimestamp = 0
81+
daBlockV7OffsetBaseFee = 8
82+
daBlockV7OffsetGasLimit = 40
83+
daBlockV7OffsetNumTransactions = 48
84+
daBlockV7OffsetNumL1Messages = 50
4785
)
4886

49-
// daBatchV3 contains metadata about a batch of DAChunks.
87+
// daBatchV7 contains V7 batch metadata and payload.
5088
type daBatchV7 struct {
5189
version CodecVersion
5290
batchIndex uint64
@@ -57,7 +95,6 @@ type daBatchV7 struct {
5795
blobBytes []byte
5896
}
5997

60-
// newDABatchV7 is a constructor for daBatchV7 that calls blobDataProofForPICircuit internally.
6198
func newDABatchV7(version CodecVersion, batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash, blob *kzg4844.Blob, blobBytes []byte) (*daBatchV7, error) {
6299
daBatch := &daBatchV7{
63100
version: version,
@@ -84,7 +121,7 @@ func decodeDABatchV7(data []byte) (*daBatchV7, error) {
84121
return newDABatchV7(version, batchIndex, blobVersionedHash, parentBatchHash, nil, nil)
85122
}
86123

87-
// Encode serializes the DABatchV3 into bytes.
124+
// Encode serializes the dABatchV7 into bytes.
88125
func (b *daBatchV7) Encode() []byte {
89126
batchBytes := make([]byte, daBatchV7EncodedLength)
90127
batchBytes[daBatchOffsetVersion] = byte(b.version)
@@ -115,7 +152,7 @@ func (b *daBatchV7) BlobBytes() []byte {
115152
return b.blobBytes
116153
}
117154

118-
// MarshalJSON implements the custom JSON serialization for daBatchV3.
155+
// MarshalJSON implements the custom JSON serialization for daBatchV7.
119156
// This method is designed to provide prover with batch info in snake_case format.
120157
func (b *daBatchV7) MarshalJSON() ([]byte, error) {
121158
type daBatchV7JSON struct {
@@ -159,24 +196,24 @@ type blobPayloadV7 struct {
159196
blocks []*Block
160197

161198
// used for decoding
162-
daBlocks []DABlock
163-
transactions []types.Transactions
199+
daBlocks []DABlock
200+
l2Transactions []types.Transactions
164201
}
165202

166203
func (b *blobPayloadV7) Blocks() []DABlock {
167204
return b.daBlocks
168205
}
169206

170207
func (b *blobPayloadV7) Transactions() []types.Transactions {
171-
return b.transactions
208+
return b.l2Transactions
172209
}
173210

174211
func (b *blobPayloadV7) InitialL1MessageIndex() uint64 {
175212
return b.initialL1MessageIndex
176213
}
177214

178215
func (b *blobPayloadV7) Encode() ([]byte, error) {
179-
payloadBytes := make([]byte, blobPayloadV7EncodedLength)
216+
payloadBytes := make([]byte, blobPayloadV7MinEncodedLength)
180217

181218
binary.BigEndian.PutUint64(payloadBytes[blobPayloadV7OffsetInitialL1MessageIndex:blobPayloadV7OffsetInitialL1MessageQueue], b.initialL1MessageIndex)
182219
copy(payloadBytes[blobPayloadV7OffsetInitialL1MessageQueue:blobPayloadV7OffsetLastL1MessageQueue], b.initialL1MessageQueueHash[:])
@@ -188,7 +225,11 @@ func (b *blobPayloadV7) Encode() ([]byte, error) {
188225

189226
var transactionBytes []byte
190227
for _, block := range b.blocks {
191-
daBlock := newDABlockV7(block.Header.Number.Uint64(), block.Header.Time, block.Header.BaseFee, block.Header.GasLimit, uint16(len(block.Transactions)), block.NumL1MessagesNoSkipping())
228+
numL1Messages, _, err := block.NumL1MessagesNoSkipping()
229+
if err != nil {
230+
return nil, fmt.Errorf("failed to get numL1Messages: %w", err)
231+
}
232+
daBlock := newDABlockV7(block.Header.Number.Uint64(), block.Header.Time, block.Header.BaseFee, block.Header.GasLimit, uint16(len(block.Transactions)), numL1Messages)
192233
payloadBytes = append(payloadBytes, daBlock.Encode()...)
193234

194235
// encode L2 txs as RLP and append to transactionBytes
@@ -210,8 +251,8 @@ func (b *blobPayloadV7) Encode() ([]byte, error) {
210251
}
211252

212253
func decodeBlobPayloadV7(data []byte) (*blobPayloadV7, error) {
213-
if len(data) < blobPayloadV7EncodedLength {
214-
return nil, fmt.Errorf("invalid data length for blobPayloadV7, expected at least %d bytes but got %d", blobPayloadV7EncodedLength, len(data))
254+
if len(data) < blobPayloadV7MinEncodedLength {
255+
return nil, fmt.Errorf("invalid data length for blobPayloadV7, expected at least %d bytes but got %d", blobPayloadV7MinEncodedLength, len(data))
215256
}
216257

217258
initialL1MessageIndex := binary.BigEndian.Uint64(data[blobPayloadV7OffsetInitialL1MessageIndex:blobPayloadV7OffsetInitialL1MessageQueue])
@@ -221,22 +262,26 @@ func decodeBlobPayloadV7(data []byte) (*blobPayloadV7, error) {
221262
initialL2BlockNumber := binary.BigEndian.Uint64(data[blobPayloadV7OffsetInitialL2BlockNumber:blobPayloadV7OffsetNumBlocks])
222263
numBlocks := int(binary.BigEndian.Uint16(data[blobPayloadV7OffsetNumBlocks:blobPayloadV7OffsetBlocks]))
223264

265+
if len(data) < blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks {
266+
return nil, fmt.Errorf("invalid data length for blobPayloadV7, expected at least %d bytes but got %d", blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks, len(data))
267+
}
268+
224269
// decode DA Blocks from the blob
225270
daBlocks := make([]DABlock, numBlocks)
226271
for i := uint64(0); i < uint64(numBlocks); i++ {
227272
daBlock := newDABlockV7WithNumber(initialL2BlockNumber + i)
228273

229-
startBytes := blobPayloadV7OffsetBlocks + i*daBlockV7BlockContextByteSize
230-
endBytes := startBytes + daBlockV7BlockContextByteSize
274+
startBytes := blobPayloadV7OffsetBlocks + i*daBlockV7BlockContextEncodedLength
275+
endBytes := startBytes + daBlockV7BlockContextEncodedLength
231276
if err := daBlock.Decode(data[startBytes:endBytes]); err != nil {
232277
return nil, fmt.Errorf("failed to decode DA block: %w", err)
233278
}
234279

235280
daBlocks = append(daBlocks, daBlock)
236281
}
237282

238-
// decode transactions for each block from the blob
239-
txBytes := data[blobPayloadV7OffsetBlocks+daBlockV7BlockContextByteSize*numBlocks:]
283+
// decode l2Transactions for each block from the blob
284+
txBytes := data[blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks:]
240285
curIndex := 0
241286
var transactions []types.Transactions
242287

@@ -264,7 +309,7 @@ func decodeBlobPayloadV7(data []byte) (*blobPayloadV7, error) {
264309
initialL1MessageQueueHash: initialL1MessageQueueHash,
265310
lastL1MessageQueueHash: lastL1MessageQueueHash,
266311
daBlocks: daBlocks,
267-
transactions: transactions,
312+
l2Transactions: transactions,
268313
}, nil
269314
}
270315

@@ -296,28 +341,28 @@ func newDABlockV7WithNumber(number uint64) *daBlockV7 {
296341

297342
// Encode serializes the DABlock into a slice of bytes.
298343
func (b *daBlockV7) Encode() []byte {
299-
daBlockBytes := make([]byte, daBlockV7BlockContextByteSize)
344+
daBlockBytes := make([]byte, daBlockV7BlockContextEncodedLength)
300345
binary.BigEndian.PutUint64(daBlockBytes[daBlockV7OffsetTimestamp:daBlockV7OffsetBaseFee], b.timestamp)
301346
if b.baseFee != nil {
302347
b.baseFee.FillBytes(daBlockBytes[daBlockV7OffsetBaseFee:daBlockV7OffsetGasLimit])
303348
}
304-
binary.BigEndian.PutUint64(daBlockBytes[daBlockV7OffsetGasLimit:daBlockV7numTransactionsOffset], b.gasLimit)
305-
binary.BigEndian.PutUint16(daBlockBytes[daBlockV7numTransactionsOffset:daBlockV7numL1MessagesOffset], b.numTransactions)
306-
binary.BigEndian.PutUint16(daBlockBytes[daBlockV7numL1MessagesOffset:], b.numL1Messages)
349+
binary.BigEndian.PutUint64(daBlockBytes[daBlockV7OffsetGasLimit:daBlockV7OffsetNumTransactions], b.gasLimit)
350+
binary.BigEndian.PutUint16(daBlockBytes[daBlockV7OffsetNumTransactions:daBlockV7OffsetNumL1Messages], b.numTransactions)
351+
binary.BigEndian.PutUint16(daBlockBytes[daBlockV7OffsetNumL1Messages:], b.numL1Messages)
307352
return daBlockBytes
308353
}
309354

310355
// Decode populates the fields of a DABlock from a byte slice.
311356
func (b *daBlockV7) Decode(data []byte) error {
312-
if len(data) != daBlockV7BlockContextByteSize {
313-
return fmt.Errorf("block encoding is not blockContextByteSize bytes long expected %d, got %d", daBlockV7BlockContextByteSize, len(data))
357+
if len(data) != daBlockV7BlockContextEncodedLength {
358+
return fmt.Errorf("block encoding is not blockContextByteSize bytes long expected %d, got %d", daBlockV7BlockContextEncodedLength, len(data))
314359
}
315360

316361
b.timestamp = binary.BigEndian.Uint64(data[daBlockV7OffsetTimestamp:daBlockV7OffsetBaseFee])
317362
b.baseFee = new(big.Int).SetBytes(data[daBlockV7OffsetBaseFee:daBlockV7OffsetGasLimit])
318-
b.gasLimit = binary.BigEndian.Uint64(data[daBlockV7OffsetGasLimit:daBlockV7numTransactionsOffset])
319-
b.numTransactions = binary.BigEndian.Uint16(data[daBlockV7numTransactionsOffset:daBlockV7numL1MessagesOffset])
320-
b.numL1Messages = binary.BigEndian.Uint16(data[daBlockV7numL1MessagesOffset:])
363+
b.gasLimit = binary.BigEndian.Uint64(data[daBlockV7OffsetGasLimit:daBlockV7OffsetNumTransactions])
364+
b.numTransactions = binary.BigEndian.Uint16(data[daBlockV7OffsetNumTransactions:daBlockV7OffsetNumL1Messages])
365+
b.numL1Messages = binary.BigEndian.Uint16(data[daBlockV7OffsetNumL1Messages:])
321366

322367
return nil
323368
}

encoding/da.go

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,15 +140,35 @@ func (b *Block) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
140140

141141
// NumL1MessagesNoSkipping returns the number of L1 messages in this block.
142142
// This method assumes that L1 messages can't be skipped.
143-
func (b *Block) NumL1MessagesNoSkipping() uint16 {
143+
func (b *Block) NumL1MessagesNoSkipping() (uint16, uint64, error) {
144144
var count uint16
145+
var prevQueueIndex *uint64
146+
145147
for _, txData := range b.Transactions {
146-
if txData.Type == types.L1MessageTxType {
148+
if txData.Type != types.L1MessageTxType {
149+
continue
150+
}
151+
152+
// If prevQueueIndex is nil, it means this is the first L1 message in the block.
153+
if prevQueueIndex == nil {
154+
prevQueueIndex = &txData.Nonce
147155
count++
156+
continue
148157
}
158+
159+
// Check if the queue index is consecutive.
160+
if txData.Nonce != *prevQueueIndex+1 {
161+
return 0, 0, fmt.Errorf("unexpected queue index: expected %d, got %d", *prevQueueIndex+1, txData.Nonce)
162+
}
163+
164+
count++
165+
prevQueueIndex = &txData.Nonce
149166
}
150167

151-
return count
168+
if prevQueueIndex == nil {
169+
return 0, 0, nil
170+
}
171+
return count, *prevQueueIndex, nil
152172
}
153173

154174
// NumL2Transactions returns the number of L2 transactions in this block.
@@ -683,7 +703,7 @@ func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uin
683703
} else if !config.IsEuclid(blockTimestamp) {
684704
return CodecV4
685705
} else {
686-
// V5 is skipped, because it is only used for the special Euclid transition batch that we handle explicitly
706+
// V5 is skipped, because it is only used for the special Euclid transition batch that we handle explicitly
687707
return CodecV6
688708
}
689709
}

0 commit comments

Comments
 (0)