|
| 1 | +package checkpointsync |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + |
| 6 | + "github.com/libp2p/go-libp2p/core" |
| 7 | + |
| 8 | + "github.com/oasisprotocol/oasis-core/go/common" |
| 9 | + "github.com/oasisprotocol/oasis-core/go/common/crypto/hash" |
| 10 | + "github.com/oasisprotocol/oasis-core/go/p2p/protocol" |
| 11 | + "github.com/oasisprotocol/oasis-core/go/p2p/rpc" |
| 12 | + "github.com/oasisprotocol/oasis-core/go/storage/mkvs/checkpoint" |
| 13 | + "github.com/oasisprotocol/oasis-core/go/worker/storage/p2p/sync" |
| 14 | +) |
| 15 | + |
| 16 | +const ( |
| 17 | + // minProtocolPeers is the minimum number of peers from the registry we want to have connected |
| 18 | + // for checkpoint sync protocol. |
| 19 | + minProtocolPeers = 5 |
| 20 | + |
| 21 | + // totalProtocolPeers is the number of peers we want to have connected for checkpoint sync protocol. |
| 22 | + totalProtocolPeers = 10 |
| 23 | +) |
| 24 | + |
| 25 | +// Client is a checkpoint sync protocol client. |
| 26 | +type Client interface { |
| 27 | + // GetCheckpoints returns a list of checkpoint metadata for all known checkpoints. |
| 28 | + GetCheckpoints(ctx context.Context, request *GetCheckpointsRequest) ([]*Checkpoint, error) |
| 29 | + |
| 30 | + // GetCheckpointChunk requests a specific checkpoint chunk. |
| 31 | + GetCheckpointChunk( |
| 32 | + ctx context.Context, |
| 33 | + request *GetCheckpointChunkRequest, |
| 34 | + cp *Checkpoint, |
| 35 | + ) (*GetCheckpointChunkResponse, rpc.PeerFeedback, error) |
| 36 | +} |
| 37 | + |
| 38 | +// Checkpoint contains checkpoint metadata together with peer information. |
| 39 | +type Checkpoint struct { |
| 40 | + *checkpoint.Metadata |
| 41 | + |
| 42 | + // Peers are the feedback structures of all the peers that have advertised this checkpoint. |
| 43 | + Peers []rpc.PeerFeedback |
| 44 | +} |
| 45 | + |
| 46 | +type client struct { |
| 47 | + rc rpc.Client |
| 48 | + mgr rpc.PeerManager |
| 49 | + fallbackMgr rpc.PeerManager |
| 50 | +} |
| 51 | + |
| 52 | +func (c *client) GetCheckpoints(ctx context.Context, request *GetCheckpointsRequest) ([]*Checkpoint, error) { |
| 53 | + var rsp GetCheckpointsResponse |
| 54 | + rsps, pfs, err := c.rc.CallMulti(ctx, c.getBestPeers(), MethodGetCheckpoints, request, rsp) |
| 55 | + if err != nil { |
| 56 | + return nil, err |
| 57 | + } |
| 58 | + |
| 59 | + // Combine deduplicated results into a single result. |
| 60 | + var checkpoints []*Checkpoint |
| 61 | + cps := make(map[hash.Hash]*Checkpoint) |
| 62 | + for i, peerRsp := range rsps { |
| 63 | + peerCps := peerRsp.(*GetCheckpointsResponse).Checkpoints |
| 64 | + |
| 65 | + for _, cpMeta := range peerCps { |
| 66 | + h := cpMeta.EncodedHash() |
| 67 | + cp := cps[h] |
| 68 | + if cp == nil { |
| 69 | + cp = &Checkpoint{ |
| 70 | + Metadata: cpMeta, |
| 71 | + } |
| 72 | + cps[h] = cp |
| 73 | + checkpoints = append(checkpoints, cp) |
| 74 | + } |
| 75 | + cp.Peers = append(cp.Peers, pfs[i]) |
| 76 | + } |
| 77 | + |
| 78 | + // Record success for a peer if it returned at least one checkpoint. |
| 79 | + if len(peerCps) > 0 { |
| 80 | + pfs[i].RecordSuccess() |
| 81 | + } |
| 82 | + } |
| 83 | + return checkpoints, nil |
| 84 | +} |
| 85 | + |
| 86 | +func (c *client) GetCheckpointChunk( |
| 87 | + ctx context.Context, |
| 88 | + request *GetCheckpointChunkRequest, |
| 89 | + cp *Checkpoint, |
| 90 | +) (*GetCheckpointChunkResponse, rpc.PeerFeedback, error) { |
| 91 | + var opts []rpc.BestPeersOption |
| 92 | + // When a checkpoint is passed, we limit requests to only those peers that actually advertised |
| 93 | + // having the checkpoint in question to avoid needless requests. |
| 94 | + if cp != nil { |
| 95 | + peers := make([]core.PeerID, 0, len(cp.Peers)) |
| 96 | + for _, pf := range cp.Peers { |
| 97 | + peers = append(peers, pf.PeerID()) |
| 98 | + } |
| 99 | + opts = append(opts, rpc.WithLimitPeers(peers)) |
| 100 | + } |
| 101 | + |
| 102 | + var rsp GetCheckpointChunkResponse |
| 103 | + peers := c.getBestPeers(opts...) |
| 104 | + pf, err := c.rc.CallOne(ctx, peers, MethodGetCheckpointChunk, request, &rsp, |
| 105 | + rpc.WithMaxPeerResponseTime(MaxGetCheckpointChunkResponseTime), |
| 106 | + ) |
| 107 | + if err != nil { |
| 108 | + return nil, nil, err |
| 109 | + } |
| 110 | + return &rsp, pf, nil |
| 111 | +} |
| 112 | + |
| 113 | +func (c *client) getBestPeers(opts ...rpc.BestPeersOption) []core.PeerID { |
| 114 | + return append(c.mgr.GetBestPeers(opts...), c.fallbackMgr.GetBestPeers(opts...)...) |
| 115 | +} |
| 116 | + |
| 117 | +// NewClient creates a new checkpoint sync protocol client. |
| 118 | +// |
| 119 | +// Previously, it was part of the storage sync protocol. To enable seamless rolling |
| 120 | +// upgrades of the network, this client has a fallback to the old legacy protocol. |
| 121 | +// The new protocol is prioritized. |
| 122 | +// |
| 123 | +// Warning: This client only registers the checkpoint sync protocol with the P2P |
| 124 | +// service. To enable advertisement of the legacy protocol, it must be registered |
| 125 | +// separately. |
| 126 | +func NewClient(p2p rpc.P2P, chainContext string, runtimeID common.Namespace) Client { |
| 127 | + pid := protocol.NewRuntimeProtocolID(chainContext, runtimeID, CheckpointSyncProtocolID, CheckpointSyncProtocolVersion) |
| 128 | + falbackPid := sync.GetStorageSyncProtocolID(chainContext, runtimeID) |
| 129 | + rc := rpc.NewClient(p2p.Host(), pid, falbackPid) |
| 130 | + mgr := rpc.NewPeerManager(p2p, pid) |
| 131 | + rc.RegisterListener(mgr) |
| 132 | + |
| 133 | + // Fallback protocol requires a separate manager to manage peers that also support legacy protocol. |
| 134 | + fallbackMgr := rpc.NewPeerManager(p2p, falbackPid) |
| 135 | + rc.RegisterListener(fallbackMgr) |
| 136 | + |
| 137 | + p2p.RegisterProtocol(pid, minProtocolPeers, totalProtocolPeers) |
| 138 | + |
| 139 | + return &client{ |
| 140 | + rc: rc, |
| 141 | + mgr: mgr, |
| 142 | + fallbackMgr: fallbackMgr, |
| 143 | + } |
| 144 | +} |
0 commit comments