Skip to content

Commit ba3a9d0

Browse files
noel2004colinlyguocolinlyguo
authored
Turn rs-zstd into official version (#51)
* update zstd dep * turn zstd dependening to official * support 2 zstd libs * address comments * tweak comments * typo fix * add CompressScrollBatchBytes interface --------- Co-authored-by: colin <[email protected]> Co-authored-by: colinlyguo <[email protected]>
1 parent 17bf3e4 commit ba3a9d0

35 files changed

+1028
-176
lines changed

encoding/codecv0.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,3 +430,7 @@ func (d *DACodecV0) computeBatchDataHash(chunks []*Chunk, totalL1MessagePoppedBe
430430
dataHash := crypto.Keccak256Hash(dataBytes)
431431
return dataHash, nil
432432
}
433+
434+
func (d *DACodecV0) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
435+
return batchBytes, nil
436+
}

encoding/codecv2.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ func (d *DACodecV2) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
154154
copy(challengePreimage[0:], hash[:])
155155

156156
// blobBytes represents the compressed blob payload (batchBytes)
157-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
157+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
158158
if err != nil {
159159
return nil, common.Hash{}, nil, nil, common.Hash{}, err
160160
}
@@ -236,7 +236,7 @@ func (d *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,
236236
if err != nil {
237237
return 0, 0, fmt.Errorf("failed to construct batch payload in blob: %w", err)
238238
}
239-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
239+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
240240
if err != nil {
241241
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
242242
}
@@ -249,7 +249,7 @@ func (d *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64,
249249
if err != nil {
250250
return 0, 0, err
251251
}
252-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
252+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
253253
if err != nil {
254254
return 0, 0, err
255255
}
@@ -263,7 +263,7 @@ func (d *DACodecV2) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
263263
if err != nil {
264264
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
265265
}
266-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
266+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
267267
if err != nil {
268268
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
269269
}
@@ -289,3 +289,8 @@ func (d *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error
289289
func (d *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
290290
return d.checkCompressedDataCompatibility(b.Chunks)
291291
}
292+
293+
// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
294+
func (d *DACodecV2) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
295+
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
296+
}

encoding/codecv4.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@ import (
1414
"github.com/scroll-tech/go-ethereum/crypto"
1515
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
1616
"github.com/scroll-tech/go-ethereum/log"
17-
18-
"github.com/scroll-tech/da-codec/encoding/zstd"
1917
)
2018

2119
type DACodecV4 struct {
@@ -205,7 +203,7 @@ func (d *DACodecV4) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
205203
if enableCompression {
206204
// blobBytes represents the compressed blob payload (batchBytes)
207205
var err error
208-
blobBytes, err = zstd.CompressScrollBatchBytes(batchBytes)
206+
blobBytes, err = d.CompressScrollBatchBytes(batchBytes)
209207
if err != nil {
210208
return nil, common.Hash{}, nil, nil, common.Hash{}, err
211209
}
@@ -267,7 +265,7 @@ func (d *DACodecV4) estimateL1CommitBatchSizeAndBlobSize(chunks []*Chunk) (uint6
267265
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
268266
}
269267
if enableCompression {
270-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
268+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
271269
if err != nil {
272270
return 0, 0, err
273271
}
@@ -295,7 +293,7 @@ func (d *DACodecV4) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
295293
if err != nil {
296294
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
297295
}
298-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
296+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
299297
if err != nil {
300298
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
301299
}

encoding/codecv7.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx
234234
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
235235
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
236236
func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
237-
compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes)
237+
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
238238
if err != nil {
239239
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
240240
}
@@ -388,3 +388,8 @@ func (d *DACodecV7) JSONFromBytes(data []byte) ([]byte, error) {
388388

389389
return jsonBytes, nil
390390
}
391+
392+
// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
393+
func (d *DACodecV7) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
394+
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
395+
}

encoding/codecv8.go

Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,31 @@
11
package encoding
22

3+
import (
4+
"crypto/sha256"
5+
"encoding/hex"
6+
"errors"
7+
"fmt"
8+
9+
"github.com/scroll-tech/go-ethereum/common"
10+
"github.com/scroll-tech/go-ethereum/crypto"
11+
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
12+
"github.com/scroll-tech/go-ethereum/log"
13+
14+
"github.com/scroll-tech/da-codec/encoding/zstd"
15+
)
16+
17+
// DACodecV8 uses CompressScrollBatchBytesStandard for compression instead of CompressScrollBatchBytesLegacy.
18+
//
19+
// Note: Due to Go's method receiver behavior, we need to override all methods that call checkCompressedDataCompatibility.
20+
// When a method in DACodecV7 calls d.checkCompressedDataCompatibility(), it will always use DACodecV7's version,
21+
// even if the instance is actually a DACodecV8. Therefore, we must override:
22+
// - checkCompressedDataCompatibility (core method using the new compression)
23+
// - constructBlob (calls checkCompressedDataCompatibility)
24+
// - NewDABatch (calls constructBlob)
25+
// - CheckBatchCompressedDataCompatibility (calls checkCompressedDataCompatibility)
26+
// - estimateL1CommitBatchSizeAndBlobSize (calls checkCompressedDataCompatibility)
27+
// - EstimateChunkL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)
28+
// - EstimateBatchL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)
329
type DACodecV8 struct {
430
DACodecV7
531
}
@@ -12,3 +38,176 @@ func NewDACodecV8() *DACodecV8 {
1238
},
1339
}
1440
}
41+
42+
// checkCompressedDataCompatibility checks the compressed data compatibility for a batch.
43+
// It constructs a blob payload, compresses the data, and checks the compressed data compatibility.
44+
// flag checkLength indicates whether to check the length of the compressed data against the original data.
45+
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
46+
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
47+
func (d *DACodecV8) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
48+
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
49+
if err != nil {
50+
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
51+
}
52+
53+
if err = checkCompressedDataCompatibilityV7(compressedPayloadBytes); err != nil {
54+
log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
55+
return nil, false, nil
56+
}
57+
58+
// check if compressed data is bigger or equal to the original data -> no need to compress
59+
if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) {
60+
log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
61+
return nil, false, nil
62+
}
63+
64+
return compressedPayloadBytes, true, nil
65+
}
66+
67+
// NewDABatch creates a DABatch including blob from the provided Batch.
68+
func (d *DACodecV8) NewDABatch(batch *Batch) (DABatch, error) {
69+
if len(batch.Blocks) == 0 {
70+
return nil, errors.New("batch must contain at least one block")
71+
}
72+
73+
if err := checkBlocksBatchVSChunksConsistency(batch); err != nil {
74+
return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
75+
}
76+
77+
blob, blobVersionedHash, blobBytes, challengeDigest, err := d.constructBlob(batch)
78+
if err != nil {
79+
return nil, fmt.Errorf("failed to construct blob: %w", err)
80+
}
81+
82+
daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
83+
if err != nil {
84+
return nil, fmt.Errorf("failed to construct DABatch: %w", err)
85+
}
86+
87+
return daBatch, nil
88+
}
89+
90+
func (d *DACodecV8) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, common.Hash, error) {
91+
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)
92+
93+
payloadBytes, err := d.constructBlobPayload(batch)
94+
if err != nil {
95+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err)
96+
}
97+
98+
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
99+
if err != nil {
100+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
101+
}
102+
103+
isCompressedFlag := uint8(0x0)
104+
if enableCompression {
105+
isCompressedFlag = 0x1
106+
payloadBytes = compressedPayloadBytes
107+
}
108+
109+
sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))
110+
111+
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version())
112+
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
113+
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag
114+
blobBytes = append(blobBytes, payloadBytes...)
115+
116+
if len(blobBytes) > maxEffectiveBlobBytes {
117+
log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes))
118+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes)
119+
}
120+
121+
// convert raw data to BLSFieldElements
122+
blob, err := makeBlobCanonical(blobBytes)
123+
if err != nil {
124+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err)
125+
}
126+
127+
// compute blob versioned hash
128+
c, err := kzg4844.BlobToCommitment(blob)
129+
if err != nil {
130+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err)
131+
}
132+
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
133+
134+
// compute challenge digest for codecv7, different from previous versions,
135+
// the blob bytes are padded to the max effective blob size, which is 131072 / 32 * 31 due to the blob encoding
136+
paddedBlobBytes := make([]byte, maxEffectiveBlobBytes)
137+
copy(paddedBlobBytes, blobBytes)
138+
139+
challengeDigest := crypto.Keccak256Hash(crypto.Keccak256(paddedBlobBytes), blobVersionedHash[:])
140+
141+
return blob, blobVersionedHash, blobBytes, challengeDigest, nil
142+
}
143+
144+
// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
145+
func (d *DACodecV8) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
146+
if len(b.Blocks) == 0 {
147+
return false, errors.New("batch must contain at least one block")
148+
}
149+
150+
if err := checkBlocksBatchVSChunksConsistency(b); err != nil {
151+
return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
152+
}
153+
154+
payloadBytes, err := d.constructBlobPayload(b)
155+
if err != nil {
156+
return false, fmt.Errorf("failed to construct blob payload: %w", err)
157+
}
158+
159+
// This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected.
160+
// rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected.
161+
// Since length check is used for DA and proving efficiency, it does not need to be checked here.
162+
_, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */)
163+
if err != nil {
164+
return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
165+
}
166+
167+
return compatible, nil
168+
}
169+
170+
func (d *DACodecV8) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
171+
if len(batch.Blocks) == 0 {
172+
return 0, 0, errors.New("batch must contain at least one block")
173+
}
174+
175+
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)
176+
177+
payloadBytes, err := d.constructBlobPayload(batch)
178+
if err != nil {
179+
return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err)
180+
}
181+
182+
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
183+
if err != nil {
184+
return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
185+
}
186+
187+
if enableCompression {
188+
blobBytes = append(blobBytes, compressedPayloadBytes...)
189+
} else {
190+
blobBytes = append(blobBytes, payloadBytes...)
191+
}
192+
193+
return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil
194+
}
195+
196+
// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk.
197+
func (d *DACodecV8) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) {
198+
return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{
199+
Blocks: chunk.Blocks,
200+
PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash,
201+
PostL1MessageQueueHash: chunk.PostL1MessageQueueHash,
202+
})
203+
}
204+
205+
// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch.
206+
func (d *DACodecV8) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
207+
return d.estimateL1CommitBatchSizeAndBlobSize(batch)
208+
}
209+
210+
// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
211+
func (d *DACodecV8) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
212+
return zstd.CompressScrollBatchBytesStandard(batchBytes)
213+
}

0 commit comments

Comments
 (0)