Skip to content

Commit 7411cc9

Browse files
committed
test: redecorder
1 parent 5a7d642 commit 7411cc9

File tree

1 file changed

+188
-0
lines changed

1 file changed

+188
-0
lines changed

pkg/file/joiner/redecoder_test.go

Lines changed: 188 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,188 @@
1+
// Copyright 2025 The Swarm Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package joiner_test
6+
7+
import (
8+
"context"
9+
"testing"
10+
11+
"github.com/ethersphere/bee/v2/pkg/cac"
12+
"github.com/ethersphere/bee/v2/pkg/file/joiner"
13+
"github.com/ethersphere/bee/v2/pkg/file/redundancy/getter"
14+
"github.com/ethersphere/bee/v2/pkg/log"
15+
"github.com/ethersphere/bee/v2/pkg/storage"
16+
"github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
17+
"github.com/ethersphere/bee/v2/pkg/swarm"
18+
"github.com/klauspost/reedsolomon"
19+
)
20+
21+
// TestReDecoderFlow tests the complete flow of:
22+
// 1. Loading data with redundancy getter
23+
// 2. Successful recovery which nulls the decoder
24+
// 3. Chunk eviction from cache
25+
// 4. Reloading the same data through ReDecoder fallback
26+
func TestReDecoderFlow(t *testing.T) {
27+
ctx := context.Background()
28+
dataShardCount := 4
29+
parityShardCount := 2
30+
totalShardCount := dataShardCount + parityShardCount
31+
32+
// Create real data chunks with proper content
33+
dataShards := make([][]byte, dataShardCount)
34+
for i := 0; i < dataShardCount; i++ {
35+
// Create chunks with span + data
36+
dataShards[i] = make([]byte, swarm.ChunkWithSpanSize)
37+
// First 8 bytes are span
38+
copy(dataShards[i][:8], []byte{0, 0, 0, 0, 0, 0, 0x10, 0}) // 4KB span
39+
// Rest is actual data
40+
for j := 8; j < swarm.ChunkWithSpanSize; j++ {
41+
dataShards[i][j] = byte((i*16 + j) % 256) // Distinct data for each chunk
42+
}
43+
}
44+
45+
// Create parity chunks using Reed-Solomon encoding
46+
parityShards := make([][]byte, parityShardCount)
47+
for i := 0; i < parityShardCount; i++ {
48+
parityShards[i] = make([]byte, swarm.ChunkWithSpanSize)
49+
}
50+
51+
// Create Reed-Solomon encoder
52+
enc, err := reedsolomon.New(dataShardCount, parityShardCount)
53+
if err != nil {
54+
t.Fatalf("Failed to create Reed-Solomon encoder: %v", err)
55+
}
56+
57+
// Combine data and parity shards
58+
allShards := make([][]byte, totalShardCount)
59+
copy(allShards, dataShards)
60+
copy(allShards[dataShardCount:], parityShards)
61+
62+
// Encode to generate parity chunks
63+
if err := enc.Encode(allShards); err != nil {
64+
t.Fatalf("Failed to encode data: %v", err)
65+
}
66+
67+
// Create content-addressed chunks for all shards
68+
addresses := make([]swarm.Address, totalShardCount)
69+
chunks := make([]swarm.Chunk, totalShardCount)
70+
71+
for i := 0; i < totalShardCount; i++ {
72+
// Create proper content-addressed chunks
73+
chunk, err := cac.NewWithDataSpan(allShards[i])
74+
if err != nil {
75+
t.Fatalf("Failed to create content-addressed chunk %d: %v", i, err)
76+
}
77+
chunks[i] = chunk
78+
addresses[i] = chunk.Address()
79+
}
80+
81+
// Select a data chunk to be missing (which will be recovered)
82+
missingChunkIndex := 2 // The third data chunk will be missing
83+
84+
// Store all chunks except the missing one
85+
mockStore := inmemchunkstore.New()
86+
for i := 0; i < totalShardCount; i++ {
87+
if i != missingChunkIndex {
88+
if err := mockStore.Put(ctx, chunks[i]); err != nil {
89+
t.Fatalf("Failed to store chunk %d: %v", i, err)
90+
}
91+
}
92+
}
93+
94+
netFetcher := newMockNetworkFetcher(addresses, addresses[missingChunkIndex])
95+
96+
// Create a decoder config
97+
config := getter.Config{
98+
Strategy: getter.RACE,
99+
Logger: log.Noop,
100+
}
101+
102+
j := joiner.NewDecoderCache(netFetcher, mockStore, config)
103+
104+
// Step 1: Initializing decoder and triggering recovery
105+
decoder := j.GetOrCreate(addresses, dataShardCount)
106+
if decoder == nil {
107+
t.Fatal("Failed to create decoder")
108+
}
109+
110+
// Verify we can now fetch the previously missing chunk through recovery
111+
recoveredChunk, err := decoder.Get(ctx, addresses[missingChunkIndex])
112+
if err != nil {
113+
t.Fatalf("Failed to recover missing chunk: %v", err)
114+
}
115+
// Verify the recovered chunk has the correct content
116+
if !recoveredChunk.Address().Equal(addresses[missingChunkIndex]) {
117+
t.Fatalf("Recovered chunk has incorrect address")
118+
}
119+
// Check if the recovered chunk is now in the store
120+
_, err = mockStore.Get(ctx, addresses[missingChunkIndex])
121+
if err != nil {
122+
t.Fatalf("Recovered chunk not saved to store: %v", err)
123+
}
124+
125+
// Step 2: The original decoder should be automatically nulled after successful recovery
126+
// This is an internal state check, we can't directly test it but we can verify that
127+
// we can still access the chunks
128+
129+
// Sanity check - verify we can still fetch chunks through the cache
130+
for i := 0; i < dataShardCount; i++ {
131+
_, err := decoder.Get(ctx, addresses[i])
132+
if err != nil {
133+
t.Fatalf("Failed to get chunk %d after recovery: %v", i, err)
134+
}
135+
}
136+
137+
// Step 3: Testing ReDecoder fallback
138+
newDecoder := j.GetOrCreate(addresses, dataShardCount)
139+
if newDecoder == nil {
140+
t.Fatal("Failed to create ReDecoder")
141+
}
142+
143+
// Verify all chunks can be fetched through the ReDecoder
144+
for i := 0; i < dataShardCount; i++ {
145+
_, err := newDecoder.Get(ctx, addresses[i])
146+
if err != nil {
147+
t.Fatalf("Failed to get chunk %d through ReDecoder: %v", i, err)
148+
}
149+
}
150+
151+
// Verify that we can also access the first missing chunk - now from the store
152+
// This would be using the local store and not network or recovery mechanisms
153+
retrievedChunk, err := newDecoder.Get(ctx, addresses[missingChunkIndex])
154+
if err != nil {
155+
t.Fatalf("Failed to retrieve previously recovered chunk: %v", err)
156+
}
157+
158+
if !retrievedChunk.Address().Equal(addresses[missingChunkIndex]) {
159+
t.Fatalf("Retrieved chunk has incorrect address")
160+
}
161+
}
162+
163+
// Mock implementation of storage.Getter for testing
164+
type mockNetworkFetcher struct {
165+
allAddresses []swarm.Address
166+
missingAddr swarm.Address
167+
}
168+
169+
// newMockNetworkFetcher creates a new mock fetcher that will return ErrNotFound for specific addresses
170+
func newMockNetworkFetcher(allAddrs []swarm.Address, missingAddr swarm.Address) *mockNetworkFetcher {
171+
return &mockNetworkFetcher{
172+
allAddresses: allAddrs,
173+
missingAddr: missingAddr,
174+
}
175+
}
176+
177+
// Get implements storage.Getter interface
178+
func (m *mockNetworkFetcher) Get(ctx context.Context, addr swarm.Address) (swarm.Chunk, error) {
179+
// Simulate network fetch - fail for the missing chunk
180+
if addr.Equal(m.missingAddr) {
181+
return nil, storage.ErrNotFound
182+
}
183+
184+
// Simulate successful fetch for other addresses
185+
data := make([]byte, swarm.ChunkSize)
186+
copy(data, []byte("test-data-"+addr.String()))
187+
return swarm.NewChunk(addr, data), nil
188+
}

0 commit comments

Comments
 (0)