Skip to content

Commit 58b8994

Browse files
colinlyguonoel2004
andauthored
feat: support codecv8 (#54)
* feat: support codecv8 * Turn rs-zstd into official version (#51) * update zstd dep * turn zstd dependening to official * support 2 zstd libs * address comments * tweak comments * typo fix * add CompressScrollBatchBytes interface --------- Co-authored-by: colin <[email protected]> Co-authored-by: colinlyguo <[email protected]> * overwrite CheckChunkCompressedDataCompatibility * remove Makefile * fix readme * update l2geth commit * fix script verification logic and regenerate .a files * do not change rust function and regenerate .a files * fix a typo --------- Co-authored-by: Ho <[email protected]>
1 parent bfa7133 commit 58b8994

40 files changed

+1420
-206
lines changed

README.md

Lines changed: 43 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,46 @@ A: `linux/amd64`, `linux/arm64`, `darwin/arm64`. Pull requests for other platfor
1919

2020
**Q: I don't trust `libscroll_zstd*.a` binary files from the repo or these files don't work on my OS/ARCH. How to rebuild them?**
2121

22-
A: Just run `cd libzstd && make libzstd` if your OS/ARCH is supported.
22+
A: To rebuild the libraries for your platform:
23+
24+
1. Build the legacy encoder:
25+
26+
```bash
27+
cd libzstd/encoder-legacy
28+
make install
29+
```
30+
31+
2. Build the standard encoder:
32+
33+
```bash
34+
cd libzstd/encoder-standard
35+
make install
36+
```
37+
38+
3. Add symbol prefixes to avoid conflicts:
39+
40+
```bash
41+
cd encoding/zstd
42+
./add_symbol_prefix.sh
43+
```
44+
45+
**Note**: The symbol prefix script currently only works on macOS. For Linux builds, perform steps 1-2 in Docker, then run step 3 on macOS.
46+
47+
For macOS builds, ensure you have Rust and necessary build tools installed:
48+
49+
```bash
50+
# Install Rust if not already installed
51+
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
52+
```
53+
54+
For Linux builds, use Docker with build dependencies:
55+
56+
```bash
57+
# Linux ARM64
58+
docker run -it --rm --platform linux/arm64 -v $(pwd):/workspace -w /workspace rust:1.75-slim bash
59+
apt update && apt install -y build-essential
60+
61+
# Linux AMD64
62+
docker run -it --rm --platform linux/amd64 -v $(pwd):/workspace -w /workspace rust:1.75-slim bash
63+
apt update && apt install -y build-essential
64+
```

encoding/codecv0.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,3 +430,7 @@ func (d *DACodecV0) computeBatchDataHash(chunks []*Chunk, totalL1MessagePoppedBe
430430
dataHash := crypto.Keccak256Hash(dataBytes)
431431
return dataHash, nil
432432
}
433+
434+
func (d *DACodecV0) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
435+
return batchBytes, nil
436+
}

encoding/codecv2.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ func (d *DACodecV2) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
154154
copy(challengePreimage[0:], hash[:])
155155

156156
// blobBytes represents the compressed blob payload (batchBytes)
157-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
157+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
158158
if err != nil {
159159
return nil, common.Hash{}, nil, nil, common.Hash{}, err
160160
}
@@ -236,7 +236,7 @@ func (d *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64,
236236
if err != nil {
237237
return 0, 0, fmt.Errorf("failed to construct batch payload in blob: %w", err)
238238
}
239-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
239+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
240240
if err != nil {
241241
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
242242
}
@@ -249,7 +249,7 @@ func (d *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64,
249249
if err != nil {
250250
return 0, 0, err
251251
}
252-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
252+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
253253
if err != nil {
254254
return 0, 0, err
255255
}
@@ -263,7 +263,7 @@ func (d *DACodecV2) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
263263
if err != nil {
264264
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
265265
}
266-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
266+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
267267
if err != nil {
268268
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
269269
}
@@ -289,3 +289,8 @@ func (d *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error
289289
func (d *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
290290
return d.checkCompressedDataCompatibility(b.Chunks)
291291
}
292+
293+
// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
294+
func (d *DACodecV2) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
295+
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
296+
}

encoding/codecv4.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@ import (
1414
"github.com/scroll-tech/go-ethereum/crypto"
1515
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
1616
"github.com/scroll-tech/go-ethereum/log"
17-
18-
"github.com/scroll-tech/da-codec/encoding/zstd"
1917
)
2018

2119
type DACodecV4 struct {
@@ -205,7 +203,7 @@ func (d *DACodecV4) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch i
205203
if enableCompression {
206204
// blobBytes represents the compressed blob payload (batchBytes)
207205
var err error
208-
blobBytes, err = zstd.CompressScrollBatchBytes(batchBytes)
206+
blobBytes, err = d.CompressScrollBatchBytes(batchBytes)
209207
if err != nil {
210208
return nil, common.Hash{}, nil, nil, common.Hash{}, err
211209
}
@@ -267,7 +265,7 @@ func (d *DACodecV4) estimateL1CommitBatchSizeAndBlobSize(chunks []*Chunk) (uint6
267265
return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
268266
}
269267
if enableCompression {
270-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
268+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
271269
if err != nil {
272270
return 0, 0, err
273271
}
@@ -295,7 +293,7 @@ func (d *DACodecV4) checkCompressedDataCompatibility(chunks []*Chunk) (bool, err
295293
if err != nil {
296294
return false, fmt.Errorf("failed to construct batch payload in blob: %w", err)
297295
}
298-
blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes)
296+
blobBytes, err := d.CompressScrollBatchBytes(batchBytes)
299297
if err != nil {
300298
return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err)
301299
}

encoding/codecv7.go

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,15 @@ import (
1717
"github.com/scroll-tech/da-codec/encoding/zstd"
1818
)
1919

20-
type DACodecV7 struct{}
20+
type DACodecV7 struct {
21+
forcedVersion *CodecVersion
22+
}
2123

2224
// Version returns the codec version.
2325
func (d *DACodecV7) Version() CodecVersion {
26+
if d.forcedVersion != nil {
27+
return *d.forcedVersion
28+
}
2429
return CodecV7
2530
}
2631

@@ -86,7 +91,7 @@ func (d *DACodecV7) NewDABatch(batch *Batch) (DABatch, error) {
8691
return nil, fmt.Errorf("failed to construct blob: %w", err)
8792
}
8893

89-
daBatch, err := newDABatchV7(CodecV7, batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
94+
daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
9095
if err != nil {
9196
return nil, fmt.Errorf("failed to construct DABatch: %w", err)
9297
}
@@ -115,7 +120,7 @@ func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []b
115120

116121
sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))
117122

118-
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(CodecV7)
123+
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version())
119124
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
120125
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag
121126
blobBytes = append(blobBytes, payloadBytes...)
@@ -166,15 +171,15 @@ func (d *DACodecV7) NewDABatchFromBytes(data []byte) (DABatch, error) {
166171
return nil, fmt.Errorf("failed to decode DA batch: %w", err)
167172
}
168173

169-
if daBatch.version != CodecV7 {
170-
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, daBatch.version)
174+
if daBatch.version != d.Version() {
175+
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", d.Version(), daBatch.version)
171176
}
172177

173178
return daBatch, nil
174179
}
175180

176181
func (d *DACodecV7) NewDABatchFromParams(batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash) (DABatch, error) {
177-
return newDABatchV7(CodecV7, batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{})
182+
return newDABatchV7(d.Version(), batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{})
178183
}
179184

180185
func (d *DACodecV7) DecodeDAChunksRawTx(_ [][]byte) ([]*DAChunkRawTx, error) {
@@ -186,8 +191,8 @@ func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) {
186191

187192
// read the blob envelope header
188193
version := rawBytes[blobEnvelopeV7OffsetVersion]
189-
if CodecVersion(version) != CodecV7 {
190-
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, version)
194+
if CodecVersion(version) != d.Version() {
195+
return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", d.Version(), version)
191196
}
192197

193198
// read the data size
@@ -229,7 +234,7 @@ func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx
229234
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
230235
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
231236
func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
232-
compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes)
237+
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
233238
if err != nil {
234239
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
235240
}
@@ -383,3 +388,8 @@ func (d *DACodecV7) JSONFromBytes(data []byte) ([]byte, error) {
383388

384389
return jsonBytes, nil
385390
}
391+
392+
// CompressScrollBatchBytes compresses the batch bytes using zstd compression.
393+
func (d *DACodecV7) CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
394+
return zstd.CompressScrollBatchBytesLegacy(batchBytes)
395+
}

0 commit comments

Comments
 (0)