|
| 1 | +package buffered |
| 2 | + |
| 3 | +import ( |
| 4 | + "errors" |
| 5 | + "sync" |
| 6 | + |
| 7 | + "github.com/ipfs/go-datastore" |
| 8 | + "github.com/ipfs/go-dsqueue" |
| 9 | + logging "github.com/ipfs/go-log/v2" |
| 10 | + "github.com/libp2p/go-libp2p-kad-dht/provider" |
| 11 | + "github.com/libp2p/go-libp2p-kad-dht/provider/internal" |
| 12 | + mh "github.com/multiformats/go-multihash" |
| 13 | +) |
| 14 | + |
| 15 | +var logger = logging.Logger(provider.LoggerName) |
| 16 | + |
| 17 | +const ( |
| 18 | + // provideOnceOp represents a one-time provide operation. |
| 19 | + provideOnceOp byte = iota |
| 20 | + // startProvidingOp represents starting continuous providing. |
| 21 | + startProvidingOp |
| 22 | + // forceStartProvidingOp represents forcefully starting providing (overrides existing). |
| 23 | + forceStartProvidingOp |
| 24 | + // stopProvidingOp represents stopping providing. |
| 25 | + stopProvidingOp |
| 26 | + // lastOp is used for array sizing. |
| 27 | + lastOp |
| 28 | +) |
| 29 | + |
| 30 | +var _ internal.Provider = (*SweepingProvider)(nil) |
| 31 | + |
| 32 | +// SweepingProvider implements a buffered provider that queues operations and |
| 33 | +// processes them asynchronously in batches. |
| 34 | +type SweepingProvider struct { |
| 35 | + closeOnce sync.Once |
| 36 | + done chan struct{} |
| 37 | + closed chan struct{} |
| 38 | + provider internal.Provider |
| 39 | + queue *dsqueue.DSQueue |
| 40 | + batchSize int |
| 41 | +} |
| 42 | + |
| 43 | +// New creates a new SweepingProvider that wraps the given provider with |
| 44 | +// buffering capabilities. Operations are queued and processed asynchronously |
| 45 | +// in batches for improved performance. |
| 46 | +func New(prov internal.Provider, ds datastore.Batching, opts ...Option) *SweepingProvider { |
| 47 | + cfg := getOpts(opts) |
| 48 | + s := &SweepingProvider{ |
| 49 | + done: make(chan struct{}), |
| 50 | + closed: make(chan struct{}), |
| 51 | + |
| 52 | + provider: prov, |
| 53 | + queue: dsqueue.New(ds, cfg.dsName, |
| 54 | + dsqueue.WithDedupCacheSize(0), // disable deduplication |
| 55 | + dsqueue.WithIdleWriteTime(cfg.idleWriteTime), |
| 56 | + ), |
| 57 | + batchSize: cfg.batchSize, |
| 58 | + } |
| 59 | + go s.worker() |
| 60 | + return s |
| 61 | +} |
| 62 | + |
| 63 | +// Close stops the provider and releases all resources. |
| 64 | +// |
| 65 | +// It waits for the worker goroutine to finish processing current operations |
| 66 | +// and closes the underneath provider. The queue current state is persisted on |
| 67 | +// the datastore. |
| 68 | +func (s *SweepingProvider) Close() error { |
| 69 | + var err error |
| 70 | + s.closeOnce.Do(func() { |
| 71 | + close(s.closed) |
| 72 | + err = errors.Join(s.queue.Close(), s.provider.Close()) |
| 73 | + <-s.done |
| 74 | + }) |
| 75 | + return err |
| 76 | +} |
| 77 | + |
| 78 | +// toBytes serializes an operation and multihash into a byte slice for storage. |
| 79 | +func toBytes(op byte, key mh.Multihash) []byte { |
| 80 | + return append([]byte{op}, key...) |
| 81 | +} |
| 82 | + |
| 83 | +// fromBytes deserializes a byte slice back into an operation and multihash. |
| 84 | +func fromBytes(data []byte) (byte, mh.Multihash, error) { |
| 85 | + op := data[0] |
| 86 | + h, err := mh.Cast(data[1:]) |
| 87 | + return op, h, err |
| 88 | +} |
| 89 | + |
| 90 | +// getOperations processes a batch of dequeued operations and groups them by |
| 91 | +// type. |
| 92 | +// |
| 93 | +// It discards multihashes from the `StopProviding` operation if |
| 94 | +// `StartProviding` was called after `StopProviding` for the same multihash. |
| 95 | +func getOperations(dequeued [][]byte) ([][]mh.Multihash, error) { |
| 96 | + ops := [lastOp][]mh.Multihash{} |
| 97 | + stopProv := make(map[string]struct{}) |
| 98 | + |
| 99 | + for _, bs := range dequeued { |
| 100 | + op, h, err := fromBytes(bs) |
| 101 | + if err != nil { |
| 102 | + return nil, err |
| 103 | + } |
| 104 | + switch op { |
| 105 | + case provideOnceOp: |
| 106 | + ops[provideOnceOp] = append(ops[provideOnceOp], h) |
| 107 | + case startProvidingOp, forceStartProvidingOp: |
| 108 | + delete(stopProv, string(h)) |
| 109 | + ops[op] = append(ops[op], h) |
| 110 | + case stopProvidingOp: |
| 111 | + stopProv[string(h)] = struct{}{} |
| 112 | + } |
| 113 | + } |
| 114 | + for hstr := range stopProv { |
| 115 | + ops[stopProvidingOp] = append(ops[stopProvidingOp], mh.Multihash(hstr)) |
| 116 | + } |
| 117 | + return ops[:], nil |
| 118 | +} |
| 119 | + |
| 120 | +// worker processes operations from the queue in batches. |
| 121 | +// It runs in a separate goroutine and continues until the provider is closed. |
| 122 | +func (s *SweepingProvider) worker() { |
| 123 | + defer close(s.done) |
| 124 | + for { |
| 125 | + select { |
| 126 | + case <-s.closed: |
| 127 | + return |
| 128 | + default: |
| 129 | + } |
| 130 | + |
| 131 | + res, err := s.queue.GetN(s.batchSize) |
| 132 | + if err != nil { |
| 133 | + logger.Warnf("BufferedSweepingProvider unable to dequeue: %v", err) |
| 134 | + continue |
| 135 | + } |
| 136 | + ops, err := getOperations(res) |
| 137 | + if err != nil { |
| 138 | + logger.Warnf("BufferedSweepingProvider unable to parse dequeued item: %v", err) |
| 139 | + continue |
| 140 | + } |
| 141 | + |
| 142 | + // Process `StartProviding` (force=true) ops first, so that if |
| 143 | + // `StartProviding` (force=false) is called after, there is no need to |
| 144 | + // enqueue the multihash a second time to the provide queue. |
| 145 | + err = s.provider.StartProviding(true, ops[forceStartProvidingOp]...) |
| 146 | + if err != nil { |
| 147 | + logger.Warnf("BufferedSweepingProvider unable to start providing (force): %v", err) |
| 148 | + } |
| 149 | + err = s.provider.StartProviding(false, ops[startProvidingOp]...) |
| 150 | + if err != nil { |
| 151 | + logger.Warnf("BufferedSweepingProvider unable to start providing: %v", err) |
| 152 | + } |
| 153 | + err = s.provider.ProvideOnce(ops[provideOnceOp]...) |
| 154 | + if err != nil { |
| 155 | + logger.Warnf("BufferedSweepingProvider unable to provide once: %v", err) |
| 156 | + } |
| 157 | + // Process `StopProviding` last, so that multihashes that should have been |
| 158 | + // provided, and then stopped provided in the same batch are provided only |
| 159 | + // once. Don't `StopProviding` multihashes, for which `StartProviding` has |
| 160 | + // been called after `StopProviding`. |
| 161 | + err = s.provider.StopProviding(ops[stopProvidingOp]...) |
| 162 | + if err != nil { |
| 163 | + logger.Warnf("BufferedSweepingProvider unable to stop providing: %v", err) |
| 164 | + } |
| 165 | + } |
| 166 | +} |
| 167 | + |
| 168 | +// enqueue adds operations to the queue for asynchronous processing. |
| 169 | +func (s *SweepingProvider) enqueue(op byte, keys ...mh.Multihash) error { |
| 170 | + for _, h := range keys { |
| 171 | + if err := s.queue.Put(toBytes(op, h)); err != nil { |
| 172 | + return err |
| 173 | + } |
| 174 | + } |
| 175 | + return nil |
| 176 | +} |
| 177 | + |
| 178 | +// ProvideOnce enqueues multihashes for which the provider will send provider |
| 179 | +// records out only once to the DHT swarm. It does NOT take the responsibility |
| 180 | +// to reprovide these keys. |
| 181 | +// |
| 182 | +// Returns immediately after enqueuing the keys, the actual provide operation |
| 183 | +// happens asynchronously. Returns an error if the multihashes couldn't be |
| 184 | +// enqueued. |
| 185 | +func (s *SweepingProvider) ProvideOnce(keys ...mh.Multihash) error { |
| 186 | + return s.enqueue(provideOnceOp, keys...) |
| 187 | +} |
| 188 | + |
| 189 | +// StartProviding adds the supplied keys to the queue of keys that will be |
| 190 | +// provided to the DHT swarm unless they were already provided in the past. The |
| 191 | +// keys will be periodically reprovided until StopProviding is called for the |
| 192 | +// same keys or the keys are removed from the Keystore. |
| 193 | +// |
| 194 | +// If force is true, the keys are provided to the DHT swarm regardless of |
| 195 | +// whether they were already being reprovided in the past. |
| 196 | +// |
| 197 | +// Returns immediately after enqueuing the keys, the actual provide operation |
| 198 | +// happens asynchronously. Returns an error if the multihashes couldn't be |
| 199 | +// enqueued. |
| 200 | +func (s *SweepingProvider) StartProviding(force bool, keys ...mh.Multihash) error { |
| 201 | + op := startProvidingOp |
| 202 | + if force { |
| 203 | + op = forceStartProvidingOp |
| 204 | + } |
| 205 | + return s.enqueue(op, keys...) |
| 206 | +} |
| 207 | + |
| 208 | +// StopProviding adds the supplied multihashes to the BufferedSweepingProvider |
| 209 | +// queue, to stop reproviding the given keys to the DHT swarm. |
| 210 | +// |
| 211 | +// The node stops being referred as a provider when the provider records in the |
| 212 | +// DHT swarm expire. |
| 213 | +// |
| 214 | +// Returns immediately after enqueuing the keys, the actual provide operation |
| 215 | +// happens asynchronously. Returns an error if the multihashes couldn't be |
| 216 | +// enqueued. |
| 217 | +func (s *SweepingProvider) StopProviding(keys ...mh.Multihash) error { |
| 218 | + return s.enqueue(stopProvidingOp, keys...) |
| 219 | +} |
| 220 | + |
| 221 | +// Clear clears the all the keys from the provide queue and returns the number |
| 222 | +// of keys that were cleared. |
| 223 | +// |
| 224 | +// The keys are not deleted from the keystore, so they will continue to be |
| 225 | +// reprovided as scheduled. |
| 226 | +func (s *SweepingProvider) Clear() int { |
| 227 | + return s.provider.Clear() |
| 228 | +} |
| 229 | + |
| 230 | +// RefreshSchedule scans the KeyStore for any keys that are not currently |
| 231 | +// scheduled for reproviding. If such keys are found, it schedules their |
| 232 | +// associated keyspace region to be reprovided. |
| 233 | +// |
| 234 | +// This function doesn't remove prefixes that have no keys from the schedule. |
| 235 | +// This is done automatically during the reprovide operation if a region has no |
| 236 | +// keys. |
| 237 | +// |
| 238 | +// Returns an error if the provider is closed or if the node is currently |
| 239 | +// Offline (either never bootstrapped, or disconnected since more than |
| 240 | +// `OfflineDelay`). The schedule depends on the network size, hence recent |
| 241 | +// network connectivity is essential. |
| 242 | +func (s *SweepingProvider) RefreshSchedule() error { |
| 243 | + return s.provider.RefreshSchedule() |
| 244 | +} |
0 commit comments