Skip to content

Commit 8a3055c

Browse files
authored
Merge pull request #6749 from onflow/leo/add-testcase-for-offchain-evm-backward-compatibilities
Add testcase for offchain evm backward compatibilities
2 parents a164070 + 27c0f3a commit 8a3055c

File tree

2 files changed

+276
-26
lines changed

2 files changed

+276
-26
lines changed

fvm/evm/offchain/utils/collection_test.go

Lines changed: 254 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2,45 +2,205 @@ package utils_test
22

33
import (
44
"bufio"
5+
"encoding/gob"
56
"encoding/hex"
67
"encoding/json"
8+
"fmt"
79
"os"
10+
"path/filepath"
811
"strings"
912
"testing"
1013

11-
"github.com/onflow/cadence"
12-
"github.com/onflow/cadence/encoding/ccf"
1314
"github.com/rs/zerolog"
15+
"github.com/rs/zerolog/log"
1416
"github.com/stretchr/testify/require"
1517

18+
"github.com/onflow/cadence"
19+
"github.com/onflow/cadence/encoding/ccf"
20+
1621
"github.com/onflow/flow-go/fvm/environment"
1722
"github.com/onflow/flow-go/fvm/evm"
1823
"github.com/onflow/flow-go/fvm/evm/events"
1924
"github.com/onflow/flow-go/fvm/evm/offchain/blocks"
25+
"github.com/onflow/flow-go/fvm/evm/offchain/storage"
2026
"github.com/onflow/flow-go/fvm/evm/offchain/sync"
2127
"github.com/onflow/flow-go/fvm/evm/offchain/utils"
2228
. "github.com/onflow/flow-go/fvm/evm/testutils"
23-
"github.com/onflow/flow-go/fvm/evm/types"
2429
"github.com/onflow/flow-go/model/flow"
2530
)
2631

27-
func ReplyingCollectionFromScratch(
32+
func TestTestnetBackwardCompatibility(t *testing.T) {
33+
t.Skip("TIME CONSUMING TESTS. Enable the tests with the events files saved in local")
34+
// how to run this tests
35+
// Note: this is a time consuming tests, so please run it in local
36+
//
37+
// 1) run the following cli to get the events files across different sporks
38+
39+
// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted
40+
// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000
41+
// > ~/Downloads/events_devnet51_1.jsonl
42+
// ...
43+
//
44+
// 2) comment the above t.Skip, and update the events file paths and checkpoint dir
45+
// to run the tests
46+
BackwardCompatibleSinceEVMGenesisBlock(
47+
t, flow.Testnet, []string{
48+
"~/Downloads/events_devnet51_1.jsonl",
49+
"~/Downloads/events_devnet51_2.jsonl",
50+
},
51+
"~/Downloads/",
52+
0,
53+
)
54+
}
55+
56+
// BackwardCompatibilityTestSinceEVMGenesisBlock ensures that the offchain package
57+
// can read EVM events from the provided file paths, replay blocks starting from
58+
// the EVM genesis block, and derive a consistent state matching the latest on-chain EVM state.
59+
//
60+
// The parameter `eventsFilePaths` is a list of file paths containing ordered EVM events in JSONL format.
61+
// These EVM event files can be generated using the Flow CLI query command, for example:
62+
//
63+
// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted
64+
//
65+
// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000
66+
//
67+
// During the replay process, it will generate `values_<height>.gob` and
68+
// `allocators_<height>.gob` checkpoint files for each height. If these checkpoint files exist,
69+
// the corresponding event JSON files will be skipped to optimize replay.
70+
func BackwardCompatibleSinceEVMGenesisBlock(
2871
t *testing.T,
2972
chainID flow.ChainID,
30-
storage types.BackendStorage,
31-
filePath string,
73+
eventsFilePaths []string, // ordered EVM events in JSONL format
74+
checkpointDir string,
75+
checkpointEndHeight uint64, // EVM height of an EVM state that a checkpoint was created for
3276
) {
77+
// ensure that event files is not an empty array
78+
require.True(t, len(eventsFilePaths) > 0)
79+
80+
log.Info().Msgf("replaying EVM events from %v to %v, with checkpoints in %s, and checkpointEndHeight: %v",
81+
eventsFilePaths[0], eventsFilePaths[len(eventsFilePaths)-1],
82+
checkpointDir, checkpointEndHeight)
83+
84+
store, checkpointEndHeightOrZero := initStorageWithCheckpoints(t, chainID, checkpointDir, checkpointEndHeight)
3385

86+
// the events to replay
87+
nextHeight := checkpointEndHeightOrZero + 1
88+
89+
// replay each event files
90+
for _, eventsFilePath := range eventsFilePaths {
91+
log.Info().Msgf("replaying events from %v, nextHeight: %v", eventsFilePath, nextHeight)
92+
93+
checkpointEndHeight := replayEvents(t, chainID, store, eventsFilePath, checkpointDir, nextHeight)
94+
nextHeight = checkpointEndHeight + 1
95+
}
96+
97+
log.Info().
98+
Msgf("succhessfully replayed all events and state changes are consistent with onchain state change. nextHeight: %v", nextHeight)
99+
}
100+
101+
func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDir string, checkpointEndHeight uint64) (
102+
*TestValueStore, uint64,
103+
) {
34104
rootAddr := evm.StorageAccountAddress(chainID)
35105

36-
// setup the rootAddress account
37-
as := environment.NewAccountStatus()
38-
err := storage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes())
106+
// if there is no checkpoint, create a empty store and initialize the account status,
107+
// return 0 as the genesis height
108+
if checkpointEndHeight == 0 {
109+
store := GetSimpleValueStore()
110+
as := environment.NewAccountStatus()
111+
require.NoError(t, store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()))
112+
113+
return store, 0
114+
}
115+
116+
valueFileName, allocatorFileName := checkpointFileNamesByEndHeight(checkpointDir, checkpointEndHeight)
117+
values, err := deserialize(valueFileName)
39118
require.NoError(t, err)
119+
allocators, err := deserializeAllocator(allocatorFileName)
120+
require.NoError(t, err)
121+
store := GetSimpleValueStorePopulated(values, allocators)
122+
return store, checkpointEndHeight
123+
}
124+
125+
func replayEvents(
126+
t *testing.T,
127+
chainID flow.ChainID,
128+
store *TestValueStore, eventsFilePath string, checkpointDir string, initialNextHeight uint64) uint64 {
40129

41-
bp, err := blocks.NewBasicProvider(chainID, storage, rootAddr)
130+
rootAddr := evm.StorageAccountAddress(chainID)
131+
132+
bpStorage := storage.NewEphemeralStorage(store)
133+
bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr)
42134
require.NoError(t, err)
43135

136+
nextHeight := initialNextHeight
137+
138+
scanEventFilesAndRun(t, eventsFilePath,
139+
func(blockEventPayload *events.BlockEventPayload, txEvents []events.TransactionEventPayload) error {
140+
if blockEventPayload.Height != nextHeight {
141+
return fmt.Errorf(
142+
"expected height for next block event to be %v, but got %v",
143+
nextHeight, blockEventPayload.Height)
144+
}
145+
146+
err = bp.OnBlockReceived(blockEventPayload)
147+
require.NoError(t, err)
148+
149+
sp := NewTestStorageProvider(store, blockEventPayload.Height)
150+
cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true)
151+
res, err := cr.ReplayBlock(txEvents, blockEventPayload)
152+
require.NoError(t, err)
153+
154+
// commit all changes
155+
for k, v := range res.StorageRegisterUpdates() {
156+
err = store.SetValue([]byte(k.Owner), []byte(k.Key), v)
157+
require.NoError(t, err)
158+
}
159+
160+
err = bp.OnBlockExecuted(blockEventPayload.Height, res)
161+
require.NoError(t, err)
162+
163+
// commit all block hash list changes
164+
for k, v := range bpStorage.StorageRegisterUpdates() {
165+
err = store.SetValue([]byte(k.Owner), []byte(k.Key), v)
166+
require.NoError(t, err)
167+
}
168+
169+
// verify the block height is sequential without gap
170+
nextHeight++
171+
172+
return nil
173+
})
174+
175+
checkpointEndHeight := nextHeight - 1
176+
177+
log.Info().Msgf("finished replaying events from %v to %v, creating checkpoint", initialNextHeight, checkpointEndHeight)
178+
valuesFile, allocatorsFile := dumpCheckpoint(t, store, checkpointDir, checkpointEndHeight)
179+
log.Info().Msgf("checkpoint created: %v, %v", valuesFile, allocatorsFile)
180+
181+
return checkpointEndHeight
182+
}
183+
184+
func checkpointFileNamesByEndHeight(dir string, endHeight uint64) (string, string) {
185+
return filepath.Join(dir, fmt.Sprintf("values_%d.gob", endHeight)),
186+
filepath.Join(dir, fmt.Sprintf("allocators_%d.gob", endHeight))
187+
}
188+
189+
func dumpCheckpoint(t *testing.T, store *TestValueStore, dir string, checkpointEndHeight uint64) (string, string) {
190+
valuesFileName, allocatorsFileName := checkpointFileNamesByEndHeight(dir, checkpointEndHeight)
191+
values, allocators := store.Dump()
192+
193+
require.NoError(t, serialize(valuesFileName, values))
194+
require.NoError(t, serializeAllocator(allocatorsFileName, allocators))
195+
return valuesFileName, allocatorsFileName
196+
}
197+
198+
// scanEventFilesAndRun
199+
func scanEventFilesAndRun(
200+
t *testing.T,
201+
filePath string,
202+
handler func(*events.BlockEventPayload, []events.TransactionEventPayload) error,
203+
) {
44204
file, err := os.Open(filePath)
45205
require.NoError(t, err)
46206
defer file.Close()
@@ -65,21 +225,8 @@ func ReplyingCollectionFromScratch(
65225
blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event))
66226
require.NoError(t, err)
67227

68-
err = bp.OnBlockReceived(blockEventPayload)
69-
require.NoError(t, err)
70-
71-
sp := NewTestStorageProvider(storage, blockEventPayload.Height)
72-
cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true)
73-
res, err := cr.ReplayBlock(txEvents, blockEventPayload)
74-
require.NoError(t, err)
75-
// commit all changes
76-
for k, v := range res.StorageRegisterUpdates() {
77-
err = storage.SetValue([]byte(k.Owner), []byte(k.Key), v)
78-
require.NoError(t, err)
79-
}
80-
81-
err = bp.OnBlockExecuted(blockEventPayload.Height, res)
82-
require.NoError(t, err)
228+
require.NoError(t, handler(blockEventPayload, txEvents), fmt.Sprintf("fail to handle block at height %d",
229+
blockEventPayload.Height))
83230

84231
txEvents = make([]events.TransactionEventPayload, 0)
85232
continue
@@ -97,3 +244,85 @@ func ReplyingCollectionFromScratch(
97244
t.Fatal(err)
98245
}
99246
}
247+
248+
// Serialize function: saves map data to a file
249+
func serialize(filename string, data map[string][]byte) error {
250+
// Create a file to save data
251+
file, err := os.Create(filename)
252+
if err != nil {
253+
return err
254+
}
255+
defer file.Close()
256+
257+
// Use gob to encode data
258+
encoder := gob.NewEncoder(file)
259+
err = encoder.Encode(data)
260+
if err != nil {
261+
return err
262+
}
263+
264+
return nil
265+
}
266+
267+
// Deserialize function: reads map data from a file
268+
func deserialize(filename string) (map[string][]byte, error) {
269+
// Open the file for reading
270+
file, err := os.Open(filename)
271+
if err != nil {
272+
return nil, err
273+
}
274+
defer file.Close()
275+
276+
// Prepare the map to store decoded data
277+
var data map[string][]byte
278+
279+
// Use gob to decode data
280+
decoder := gob.NewDecoder(file)
281+
err = decoder.Decode(&data)
282+
if err != nil {
283+
return nil, err
284+
}
285+
286+
return data, nil
287+
}
288+
289+
// Serialize function: saves map data to a file
290+
func serializeAllocator(filename string, data map[string]uint64) error {
291+
// Create a file to save data
292+
file, err := os.Create(filename)
293+
if err != nil {
294+
return err
295+
}
296+
defer file.Close()
297+
298+
// Use gob to encode data
299+
encoder := gob.NewEncoder(file)
300+
err = encoder.Encode(data)
301+
if err != nil {
302+
return err
303+
}
304+
305+
return nil
306+
}
307+
308+
// Deserialize function: reads map data from a file
309+
func deserializeAllocator(filename string) (map[string]uint64, error) {
310+
// Open the file for reading
311+
file, err := os.Open(filename)
312+
if err != nil {
313+
return nil, err
314+
}
315+
defer file.Close()
316+
317+
// Prepare the map to store decoded data
318+
var data map[string]uint64
319+
320+
// Use gob to decode data
321+
decoder := gob.NewDecoder(file)
322+
err = decoder.Decode(&data)
323+
if err != nil {
324+
return nil, err
325+
}
326+
327+
return data, nil
328+
}

fvm/evm/testutils/backend.go

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ func ConvertToCadence(data []byte) []cadence.Value {
6060
}
6161

6262
func fullKey(owner, key []byte) string {
63-
return string(owner) + "~" + string(key)
63+
return fmt.Sprintf("%x~%s", owner, key)
6464
}
6565

6666
func GetSimpleValueStore() *TestValueStore {
@@ -145,6 +145,19 @@ func GetSimpleValueStorePopulated(
145145
// clone allocator
146146
return GetSimpleValueStorePopulated(newData, newAllocator)
147147
},
148+
149+
DumpFunc: func() (map[string][]byte, map[string]uint64) {
150+
// clone data
151+
newData := make(map[string][]byte)
152+
for k, v := range data {
153+
newData[k] = v
154+
}
155+
newAllocator := make(map[string]uint64)
156+
for k, v := range allocator {
157+
newAllocator[k] = v
158+
}
159+
return newData, newAllocator
160+
},
148161
}
149162
}
150163

@@ -253,6 +266,7 @@ type TestValueStore struct {
253266
TotalStorageItemsFunc func() int
254267
ResetStatsFunc func()
255268
CloneFunc func() *TestValueStore
269+
DumpFunc func() (map[string][]byte, map[string]uint64)
256270
}
257271

258272
var _ environment.ValueStore = &TestValueStore{}
@@ -327,6 +341,13 @@ func (vs *TestValueStore) Clone() *TestValueStore {
327341
return vs.CloneFunc()
328342
}
329343

344+
func (vs *TestValueStore) Dump() (map[string][]byte, map[string]uint64) {
345+
if vs.DumpFunc == nil {
346+
panic("method not set")
347+
}
348+
return vs.DumpFunc()
349+
}
350+
330351
type testMeter struct {
331352
meterComputation func(common.ComputationKind, uint) error
332353
hasComputationCapacity func(common.ComputationKind, uint) bool

0 commit comments

Comments
 (0)