|
| 1 | +package runtime |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "strconv" |
| 7 | + "time" |
| 8 | + |
| 9 | + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env" |
| 10 | + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/log" |
| 11 | + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis" |
| 12 | + "github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario" |
| 13 | + "github.com/oasisprotocol/oasis-core/go/storage/mkvs/checkpoint" |
| 14 | +) |
| 15 | + |
| 16 | +// CheckpointSync tests interoperability of the new checkpoint and diff sync |
| 17 | +// p2p protocols with the legacy storage sync p2p protocol. |
| 18 | +// |
| 19 | +// The test checks that hosts that serve both protocols are compatible |
| 20 | +// with clients that fallback to both. |
| 21 | +// |
| 22 | +// To simulate legacy host comment out fallback to the new protocols |
| 23 | +// inside storage committee worker and disable registration of new checkpoint |
| 24 | +// and diff sync protocols. This is not tested automatically as it would |
| 25 | +// further pollute existing code and require additional config flags. |
| 26 | +var CheckpointSync scenario.Scenario = newCheckpointSyncImpl() |
| 27 | + |
| 28 | +type checkpointSync struct { |
| 29 | + Scenario |
| 30 | +} |
| 31 | + |
| 32 | +func newCheckpointSyncImpl() scenario.Scenario { |
| 33 | + return &checkpointSync{ |
| 34 | + Scenario: *NewScenario( |
| 35 | + "checkpoint-sync", |
| 36 | + NewTestClient().WithScenario(SimpleScenario), |
| 37 | + ), |
| 38 | + } |
| 39 | +} |
| 40 | + |
| 41 | +func (sc *checkpointSync) Clone() scenario.Scenario { |
| 42 | + return &checkpointSync{ |
| 43 | + Scenario: *sc.Scenario.Clone().(*Scenario), |
| 44 | + } |
| 45 | +} |
| 46 | + |
| 47 | +func (sc *checkpointSync) Fixture() (*oasis.NetworkFixture, error) { |
| 48 | + f, err := sc.Scenario.Fixture() |
| 49 | + if err != nil { |
| 50 | + return nil, err |
| 51 | + } |
| 52 | + |
| 53 | + // Make the first compute worker check for checkpoints more often. |
| 54 | + f.ComputeWorkers[0].CheckpointCheckInterval = time.Second |
| 55 | + // Configure runtime for storage checkpointing. |
| 56 | + f.Runtimes[1].Storage.CheckpointInterval = 10 |
| 57 | + f.Runtimes[1].Storage.CheckpointNumKept = 10 |
| 58 | + f.Runtimes[1].Storage.CheckpointChunkSize = 1024 |
| 59 | + // Serve both legacy and new protocols. |
| 60 | + for i := range f.ComputeWorkers { |
| 61 | + f.ComputeWorkers[i].LegacySyncServerDisabled = false |
| 62 | + } |
| 63 | + f.ComputeWorkers = append(f.ComputeWorkers, oasis.ComputeWorkerFixture{ |
| 64 | + NodeFixture: oasis.NodeFixture{ |
| 65 | + NoAutoStart: true, |
| 66 | + }, |
| 67 | + Entity: 1, |
| 68 | + Runtimes: []int{1}, |
| 69 | + CheckpointSyncEnabled: true, |
| 70 | + LogWatcherHandlerFactories: []log.WatcherHandlerFactory{ |
| 71 | + oasis.LogAssertCheckpointSync(), |
| 72 | + }, |
| 73 | + }) |
| 74 | + |
| 75 | + return f, nil |
| 76 | +} |
| 77 | + |
| 78 | +func (sc *checkpointSync) Run(ctx context.Context, _ *env.Env) error { |
| 79 | + if err := sc.Net.Start(); err != nil { |
| 80 | + return err |
| 81 | + } |
| 82 | + |
| 83 | + if err := sc.WaitForClientSync(ctx); err != nil { |
| 84 | + return fmt.Errorf("failed to wait for client sync: %w", err) |
| 85 | + } |
| 86 | + |
| 87 | + // Generate some more rounds to trigger checkpointing. |
| 88 | + for i := 0; i < 15; i++ { |
| 89 | + sc.Logger.Info("submitting transaction to runtime", "seq", i) |
| 90 | + if _, err := sc.submitKeyValueRuntimeInsertTx(ctx, KeyValueRuntimeID, uint64(i), "checkpoint", strconv.Itoa(i), 0, 0, plaintextTxKind); err != nil { |
| 91 | + return err |
| 92 | + } |
| 93 | + } |
| 94 | + |
| 95 | + // Make sure that the first compute node created checkpoints. |
| 96 | + ctrl, err := oasis.NewController(sc.Net.ComputeWorkers()[0].SocketPath()) |
| 97 | + if err != nil { |
| 98 | + return fmt.Errorf("failed to connect with the first compute node: %w", err) |
| 99 | + } |
| 100 | + if _, err = ctrl.Storage.GetCheckpoints(ctx, &checkpoint.GetCheckpointsRequest{Version: 1, Namespace: KeyValueRuntimeID}); err != nil { |
| 101 | + return fmt.Errorf("failed to get checkpoints: %w", err) |
| 102 | + } |
| 103 | + |
| 104 | + // Start late compute worker and check if it syncs with a checkpoint. |
| 105 | + sc.Logger.Info("running late compute worker") |
| 106 | + lateWorker := sc.Net.ComputeWorkers()[len(sc.Net.ComputeWorkers())-1] |
| 107 | + if err = lateWorker.Start(); err != nil { |
| 108 | + return fmt.Errorf("failed to start late compute worker: %w", err) |
| 109 | + } |
| 110 | + if err = lateWorker.WaitReady(ctx); err != nil { |
| 111 | + return fmt.Errorf("failed to wait for late compute worker to become ready: %w", err) |
| 112 | + } |
| 113 | + |
| 114 | + // Wait a bit to give the logger in the node time to sync to disk. |
| 115 | + <-time.After(1 * time.Second) |
| 116 | + |
| 117 | + return sc.Net.CheckLogWatchers() |
| 118 | +} |
0 commit comments