Skip to content

Commit 0c725b7

Browse files
committed
refactor
1 parent 84df923 commit 0c725b7

File tree

9 files changed

+148
-127
lines changed

9 files changed

+148
-127
lines changed

contract/contract.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -220,16 +220,16 @@ func TransactWithGasAdjustment(
220220
}
221221
tx, err := contract.FlowTransactor.contract.Transact(opts, method, params...)
222222

223-
var receipt *types.Receipt
224223
if err == nil {
225-
// Wait for successful execution
224+
// Wait for successful execution in a separate goroutine.
225+
// Use local variables to avoid racing with the outer loop's err.
226226
go func() {
227-
receipt, err = contract.WaitForReceipt(ctx, tx.Hash(), true, blockchain.RetryOption{NRetries: retryOpts.MaxNonGasRetries})
228-
if err == nil {
229-
receiptCh <- receipt
227+
r, e := contract.WaitForReceipt(ctx, tx.Hash(), true, blockchain.RetryOption{NRetries: retryOpts.MaxNonGasRetries})
228+
if e == nil {
229+
receiptCh <- r
230230
return
231231
}
232-
errCh <- err
232+
errCh <- e
233233
}()
234234
// even if the receipt is received, this loop will continue until the context is canceled
235235
time.Sleep(30 * time.Second)

core/merkle/proof.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,12 @@ func (proof *Proof) ValidateHash(root, contentHash common.Hash, position, numLea
5858
}
5959

6060
// content hash mismatch
61-
if contentHash.Hex() != proof.Lemma[0].Hex() {
61+
if contentHash != proof.Lemma[0] {
6262
return errProofContentMismatch
6363
}
6464

6565
// root mismatch
66-
if len(proof.Lemma) > 1 && root.Hex() != proof.Lemma[len(proof.Lemma)-1].Hex() {
66+
if len(proof.Lemma) > 1 && root != proof.Lemma[len(proof.Lemma)-1] {
6767
return errProofRootMismatch
6868
}
6969

@@ -109,5 +109,5 @@ func (proof *Proof) validateRoot() bool {
109109
}
110110
}
111111

112-
return hash.Hex() == proof.Lemma[len(proof.Lemma)-1].Hex()
112+
return hash == proof.Lemma[len(proof.Lemma)-1]
113113
}

indexer/client.go

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -184,30 +184,39 @@ func (c *Client) SplitableUpload(ctx context.Context, w3Client *web3go.Client, d
184184

185185
// BatchUpload submit multiple data to 0g storage contract batchly in single on-chain transaction, then transfer the data to the storage nodes selected from indexer service.
186186
func (c *Client) BatchUpload(ctx context.Context, w3Client *web3go.Client, datas []core.IterableData, option ...transfer.BatchUploadOption) (eth_common.Hash, []eth_common.Hash, error) {
187-
expectedReplica := uint(1)
187+
var opts transfer.BatchUploadOption
188188
if len(option) > 0 {
189-
for _, opt := range option[0].DataOptions {
190-
expectedReplica = max(expectedReplica, opt.ExpectedReplica)
191-
}
189+
opts = option[0]
190+
}
191+
192+
expectedReplica := uint(1)
193+
for _, opt := range opts.DataOptions {
194+
expectedReplica = max(expectedReplica, opt.ExpectedReplica)
192195
}
193196
var maxSegNum uint64
194197
for _, data := range datas {
195198
maxSegNum = max(maxSegNum, data.NumSegments())
196199
}
197200
dropped := make([]string, 0)
201+
maxRetry := 3
202+
attempts := 0
198203
for {
199-
uploader, err := c.NewUploaderFromIndexerNodes(ctx, maxSegNum, w3Client, expectedReplica, dropped, option[0].Method, option[0].FullTrusted)
204+
uploader, err := c.NewUploaderFromIndexerNodes(ctx, maxSegNum, w3Client, expectedReplica, dropped, opts.Method, opts.FullTrusted)
200205
if err != nil {
201206
return eth_common.Hash{}, nil, err
202207
}
203-
hash, roots, err := uploader.BatchUpload(ctx, datas, option...)
208+
hash, roots, err := uploader.BatchUpload(ctx, datas, opts)
204209
var rpcError *node.RPCError
205210
if errors.As(err, &rpcError) {
206211
dropped = append(dropped, rpcError.URL)
207212
c.logger.Infof("dropped problematic node and retry: %v", rpcError.Error())
208213
} else {
209214
return hash, roots, err
210215
}
216+
attempts++
217+
if attempts >= maxRetry {
218+
return hash, roots, err
219+
}
211220
}
212221
}
213222

@@ -245,28 +254,36 @@ func (c *Client) UploadFileSegments(
245254
return errors.New("segment data is empty")
246255
}
247256

248-
expectedReplica := uint(1)
257+
var opt transfer.UploadOption
249258
if len(option) > 0 {
250-
expectedReplica = max(expectedReplica, option[0].ExpectedReplica)
259+
opt = option[0]
251260
}
252261

262+
expectedReplica := max(uint(1), opt.ExpectedReplica)
263+
253264
numSeg := core.NumSplits(int64(fileSeg.FileInfo.Tx.Size), core.DefaultSegmentSize)
254265
dropped := make([]string, 0)
266+
maxRetry := 3
267+
attempts := 0
255268
for {
256-
uploaders, err := c.NewFileSegmentUploaderFromIndexerNodes(ctx, numSeg, expectedReplica, dropped, option[0].Method, true)
269+
uploaders, err := c.NewFileSegmentUploaderFromIndexerNodes(ctx, numSeg, expectedReplica, dropped, opt.Method, true)
257270
if err != nil {
258271
return err
259272
}
260273

261274
var rpcError *node.RPCError
262275
for _, uploader := range uploaders {
263-
if err := uploader.Upload(ctx, fileSeg, option...); errors.As(err, &rpcError) {
276+
if err := uploader.Upload(ctx, fileSeg, opt); errors.As(err, &rpcError) {
264277
dropped = append(dropped, rpcError.URL)
265278
c.logger.Infof("dropped problematic node and retry: %v", rpcError.Error())
266279
} else {
267280
return err
268281
}
269282
}
283+
attempts++
284+
if attempts >= maxRetry {
285+
return rpcError
286+
}
270287
}
271288
}
272289

kv/builder.go

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,16 @@ func newStreamDataBuilder(version uint64) *streamDataBuilder {
3939
}
4040
}
4141

42+
// compareStreamKey orders entries by (StreamId, Key) lexicographically.
43+
func compareStreamKey(idA common.Hash, keyA []byte, idB common.Hash, keyB []byte) bool {
44+
hexA := idA.Hex()
45+
hexB := idB.Hex()
46+
if hexA == hexB {
47+
return hexutil.Encode(keyA) < hexutil.Encode(keyB)
48+
}
49+
return hexA < hexB
50+
}
51+
4252
// Build serialize all cached KV operations to StreamData.
4353
func (builder *streamDataBuilder) Build(sorted ...bool) (*StreamData, error) {
4454
var err error
@@ -97,22 +107,10 @@ func (builder *streamDataBuilder) Build(sorted ...bool) (*StreamData, error) {
97107
if len(sorted) > 0 {
98108
if sorted[0] {
99109
sort.SliceStable(data.Reads, func(i, j int) bool {
100-
streamIdI := data.Reads[i].StreamId.Hex()
101-
streamIdJ := data.Reads[j].StreamId.Hex()
102-
if streamIdI == streamIdJ {
103-
return hexutil.Encode(data.Reads[i].Key) < hexutil.Encode(data.Reads[j].Key)
104-
} else {
105-
return streamIdI < streamIdJ
106-
}
110+
return compareStreamKey(data.Reads[i].StreamId, data.Reads[i].Key, data.Reads[j].StreamId, data.Reads[j].Key)
107111
})
108112
sort.SliceStable(data.Writes, func(i, j int) bool {
109-
streamIdI := data.Writes[i].StreamId.Hex()
110-
streamIdJ := data.Writes[j].StreamId.Hex()
111-
if streamIdI == streamIdJ {
112-
return hexutil.Encode(data.Writes[i].Key) < hexutil.Encode(data.Writes[j].Key)
113-
} else {
114-
return streamIdI < streamIdJ
115-
}
113+
return compareStreamKey(data.Writes[i].StreamId, data.Writes[i].Key, data.Writes[j].StreamId, data.Writes[j].Key)
116114
})
117115
}
118116
}

tests/config/cosmos-genesis.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"app_name": "0gchaind",
33
"app_version": "v0.2.0-alpha.4-892-g6b920eb40",
4-
"genesis_time": "2026-02-24T02:16:23.477051Z",
4+
"genesis_time": "2026-02-24T05:22:49.947997Z",
55
"chain_id": "0gchaind-local",
66
"initial_height": 1,
77
"app_hash": null,

transfer/download/metadata.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ func LoadMetadata(file *os.File) (*Metadata, error) {
4242
return nil, errors.Errorf("Read metadata length mismatch, expected = %v, actual = %v", MetadataSize, n)
4343
}
4444

45-
metadata, err := DeserializeMedata(buf)
45+
metadata, err := DeserializeMetadata(buf)
4646
if err != nil {
4747
return nil, errors.WithMessage(err, "Failed to deserialize metadata")
4848
}
@@ -60,7 +60,7 @@ func (md *Metadata) Serialize() []byte {
6060
return encoded
6161
}
6262

63-
func DeserializeMedata(encoded []byte) (*Metadata, error) {
63+
func DeserializeMetadata(encoded []byte) (*Metadata, error) {
6464
if len(encoded) != MetadataSize {
6565
return nil, errors.Errorf("Invalid data length, expected = %v, actual = %v", MetadataSize, len(encoded))
6666
}

transfer/download/metadata_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ func TestMetadataSerde(t *testing.T) {
1919

2020
encoded := md.Serialize()
2121

22-
md2, err := DeserializeMedata(encoded)
22+
md2, err := DeserializeMetadata(encoded)
2323
assert.NoError(t, err)
2424
assert.Equal(t, md, *md2)
2525
}

transfer/upload_dir.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ func (uploader *Uploader) UploadDir(ctx context.Context, folder string, fragment
2828
return n.Type == dir.FileTypeFile && n.Size > 0
2929
})
3030

31-
logrus.Infof("Total %d files to be uploaded", len(nodes))
31+
uploader.logger.Infof("Total %d files to be uploaded", len(nodes))
3232

3333
// Upload each file via SplitableUpload (handles encryption + splitting).
3434
for i := range nodes {
@@ -51,7 +51,7 @@ func (uploader *Uploader) UploadDir(ctx context.Context, folder string, fragment
5151
}
5252
nodes[i].Roots = rootStrs
5353

54-
logrus.WithFields(logrus.Fields{
54+
uploader.logger.WithFields(logrus.Fields{
5555
"roots": rootStrs,
5656
"path": path,
5757
}).Info("File uploaded successfully")

0 commit comments

Comments
 (0)