diff --git a/.cspell.json b/.cspell.json index ce02f35e1a..0d89ece356 100644 --- a/.cspell.json +++ b/.cspell.json @@ -7,5 +7,8 @@ "name": "project", "path": "./.github/dictionary.txt", "addWords": true - }] + }], + "ignorePaths": [ + "packages/gossipsub/**" + ] } diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9842d4ab44..542da0cb74 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1 +1 @@ -{"packages/config":"1.1.14","packages/connection-encrypter-plaintext":"2.0.29","packages/connection-encrypter-tls":"2.2.7","packages/crypto":"5.1.8","packages/interface":"2.11.0","packages/interface-compliance-tests":"6.5.0","packages/interface-internal":"2.3.19","packages/kad-dht":"15.1.11","packages/keychain":"5.2.9","packages/libp2p":"2.10.0","packages/logger":"5.2.0","packages/metrics-opentelemetry":"1.0.21","packages/metrics-prometheus":"4.3.30","packages/metrics-simple":"1.3.16","packages/multistream-select":"6.0.29","packages/peer-collections":"6.0.35","packages/peer-discovery-bootstrap":"11.0.47","packages/peer-discovery-mdns":"11.0.47","packages/peer-id":"5.1.9","packages/peer-record":"8.0.35","packages/peer-store":"11.2.7","packages/pnet":"2.0.47","packages/protocol-autonat":"2.0.38","packages/protocol-dcutr":"2.0.38","packages/protocol-echo":"2.1.28","packages/protocol-fetch":"3.0.22","packages/protocol-identify":"3.0.39","packages/protocol-perf":"4.0.47","packages/protocol-ping":"2.0.37","packages/pubsub":"10.1.18","packages/pubsub-floodsub":"10.1.46","packages/record":"4.0.7","packages/stream-multiplexer-mplex":"11.0.47","packages/transport-circuit-relay-v2":"3.2.24","packages/transport-memory":"1.1.14","packages/transport-tcp":"10.1.19","packages/transport-webrtc":"5.2.24","packages/transport-websockets":"9.2.19","packages/transport-webtransport":"5.0.51","packages/upnp-nat":"3.1.22","packages/utils":"6.7.2","packages/protocol-autonat-v2":"1.0.1"} +{"packages/config":"1.1.14","packages/connection-encrypter-plaintext":"2.0.29","packages/connection-encrypter-tls":"2.2.7","packages/crypto":"5.1.8","packages/interface":"2.11.0","packages/interface-compliance-tests":"6.5.0","packages/interface-internal":"2.3.19","packages/kad-dht":"15.1.11","packages/keychain":"5.2.9","packages/libp2p":"2.10.0","packages/logger":"5.2.0","packages/metrics-opentelemetry":"1.0.21","packages/metrics-prometheus":"4.3.30","packages/metrics-simple":"1.3.16","packages/multistream-select":"6.0.29","packages/peer-collections":"6.0.35","packages/peer-discovery-bootstrap":"11.0.47","packages/peer-discovery-mdns":"11.0.47","packages/peer-id":"5.1.9","packages/peer-record":"8.0.35","packages/peer-store":"11.2.7","packages/pnet":"2.0.47","packages/protocol-autonat":"2.0.38","packages/protocol-dcutr":"2.0.38","packages/protocol-echo":"2.1.28","packages/protocol-fetch":"3.0.22","packages/protocol-identify":"3.0.39","packages/protocol-perf":"4.0.47","packages/protocol-ping":"2.0.37","packages/floodsub":"10.1.46","packages/record":"4.0.7","packages/stream-multiplexer-mplex":"11.0.47","packages/transport-circuit-relay-v2":"3.2.24","packages/transport-memory":"1.1.14","packages/transport-tcp":"10.1.19","packages/transport-webrtc":"5.2.24","packages/transport-websockets":"9.2.19","packages/transport-webtransport":"5.0.51","packages/upnp-nat":"3.1.22","packages/utils":"6.7.2","packages/protocol-autonat-v2":"1.0.1"} diff --git a/.release-please.json b/.release-please.json index 543832af56..7b2db49569 100644 --- a/.release-please.json +++ b/.release-please.json @@ -39,8 +39,7 @@ "packages/protocol-identify": {}, "packages/protocol-perf": {}, "packages/protocol-ping": {}, - "packages/pubsub": {}, - "packages/pubsub-floodsub": {}, + "packages/floodsub": {}, "packages/record": {}, "packages/stream-multiplexer-mplex": {}, "packages/transport-circuit-relay-v2": {}, diff --git a/README.md b/README.md index a78511816c..3201f41554 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ You can find multiple examples on the [examples repo](https://github.com/libp2p/ - [`/packages/protocol-echo`](https://github.com/libp2p/js-libp2p/tree/main/packages/protocol-echo) Implementation of an Echo protocol - [`/packages/protocol-perf`](https://github.com/libp2p/js-libp2p/tree/main/packages/protocol-perf) Implementation of the Perf protocol - [`/packages/pubsub`](https://github.com/libp2p/js-libp2p/tree/main/packages/pubsub) libp2p pubsub base class -- [`/packages/pubsub-floodsub`](https://github.com/libp2p/js-libp2p/tree/main/packages/pubsub-floodsub) libp2p-floodsub, also known as pubsub-flood or just dumbsub, this implementation of pubsub focused on delivering an API for Publish/Subscribe, but with no CastTree Forming (it just floods the network). +- [`/packages/floodsub`](https://github.com/libp2p/js-libp2p/tree/main/packages/floodsub) libp2p-floodsub, also known as pubsub-flood or just dumbsub, this implementation of pubsub focused on delivering an API for Publish/Subscribe, but with no CastTree Forming (it just floods the network). - [`/packages/record`](https://github.com/libp2p/js-libp2p/tree/main/packages/record) libp2p record implementation - [`/packages/stream-multiplexer-mplex`](https://github.com/libp2p/js-libp2p/tree/main/packages/stream-multiplexer-mplex) JavaScript implementation of - [`/packages/transport-tcp`](https://github.com/libp2p/js-libp2p/tree/main/packages/transport-tcp) A TCP transport for libp2p @@ -174,7 +174,7 @@ List of packages currently in existence for libp2p | [`@libp2p/peer-record`](//github.com/libp2p/js-libp2p/tree/main/packages/peer-record) | [![npm](https://img.shields.io/npm/v/%40libp2p%2Fpeer-record.svg?maxAge=86400&style=flat-square)](//npmjs.com/package/@libp2p/peer-record) | [![Deps](https://img.shields.io/librariesio/release/npm/%40libp2p%2Fpeer-record?logo=Libraries.io&logoColor=white&style=flat-square)](//libraries.io/npm/%40libp2p%2Fpeer-record) | [![GitHub CI](https://img.shields.io/github/actions/workflow/status/libp2p/js-libp2p/main.yml?branch=main&label=ci&style=flat-square)](//github.com/libp2p/js-libp2p/actions?query=branch%3Amain+workflow%3Aci+) | [![codecov](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/peer-record/branch/main/graph/badge.svg?style=flat-square)](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/peer-record) | | **pubsub** | | [`@chainsafe/libp2p-gossipsub`](//github.com/ChainSafe/js-libp2p-gossipsub) | [![npm](https://img.shields.io/npm/v/%40chainsafe%2Flibp2p-gossipsub.svg?maxAge=86400&style=flat-square)](//npmjs.com/package/@chainsafe/libp2p-gossipsub) | [![Deps](https://img.shields.io/librariesio/release/npm/%40chainsafe%2Flibp2p-gossipsub?logo=Libraries.io&logoColor=white&style=flat-square)](//libraries.io/npm/%40chainsafe%2Flibp2p-gossipsub) | [![GitHub CI](https://img.shields.io/github/actions/workflow/status/ChainSafe/js-libp2p-gossipsub/main.yml?branch=master&label=ci&style=flat-square)](//github.com/ChainSafe/js-libp2p-gossipsub/actions?query=branch%3Amaster+workflow%3Aci+) | [![codecov](https://codecov.io/gh/ChainSafe/js-libp2p-gossipsub/branch/master/graph/badge.svg?style=flat-square)](https://codecov.io/gh/ChainSafe/js-libp2p-gossipsub) | -| [`@libp2p/floodsub`](//github.com/libp2p/js-libp2p/tree/main/packages/pubsub-floodsub) | [![npm](https://img.shields.io/npm/v/%40libp2p%2Ffloodsub.svg?maxAge=86400&style=flat-square)](//npmjs.com/package/@libp2p/floodsub) | [![Deps](https://img.shields.io/librariesio/release/npm/%40libp2p%2Ffloodsub?logo=Libraries.io&logoColor=white&style=flat-square)](//libraries.io/npm/%40libp2p%2Ffloodsub) | [![GitHub CI](https://img.shields.io/github/actions/workflow/status/libp2p/js-libp2p/main.yml?branch=main&label=ci&style=flat-square)](//github.com/libp2p/js-libp2p/actions?query=branch%3Amain+workflow%3Aci+) | [![codecov](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/pubsub-floodsub/branch/main/graph/badge.svg?style=flat-square)](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/pubsub-floodsub) | +| [`@libp2p/floodsub`](//github.com/libp2p/js-libp2p/tree/main/packages/floodsub) | [![npm](https://img.shields.io/npm/v/%40libp2p%2Ffloodsub.svg?maxAge=86400&style=flat-square)](//github.com/libp2p/js-libp2p/tree/main/packages/floodsub/releases) | [![Deps](https://img.shields.io/librariesio/release/npm/%40libp2p%2Ffloodsub?logo=Libraries.io&logoColor=white&style=flat-square)](//libraries.io/npm/%40libp2p%2Ffloodsub) | [![GitHub CI](https://img.shields.io/github/actions/workflow/status/libp2p/js-libp2p/tree/main/packages/floodsub/main.yml?branch=main&label=ci&style=flat-square)](//github.com/libp2p/js-libp2p/tree/main/packages/floodsub/actions?query=branch%3Amain+workflow%3Aci+) | [![codecov](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/floodsub/branch/main/graph/badge.svg?style=flat-square)](https://codecov.io/gh/libp2p/js-libp2p/tree/main/packages/floodsub) | ## Used by diff --git a/packages/pubsub-floodsub/.aegir.js b/packages/floodsub/.aegir.js similarity index 100% rename from packages/pubsub-floodsub/.aegir.js rename to packages/floodsub/.aegir.js diff --git a/packages/pubsub-floodsub/CHANGELOG.md b/packages/floodsub/CHANGELOG.md similarity index 100% rename from packages/pubsub-floodsub/CHANGELOG.md rename to packages/floodsub/CHANGELOG.md diff --git a/packages/pubsub-floodsub/CODE_OF_CONDUCT.md b/packages/floodsub/CODE_OF_CONDUCT.md similarity index 100% rename from packages/pubsub-floodsub/CODE_OF_CONDUCT.md rename to packages/floodsub/CODE_OF_CONDUCT.md diff --git a/packages/pubsub-floodsub/LICENSE-APACHE b/packages/floodsub/LICENSE-APACHE similarity index 100% rename from packages/pubsub-floodsub/LICENSE-APACHE rename to packages/floodsub/LICENSE-APACHE diff --git a/packages/pubsub-floodsub/LICENSE-MIT b/packages/floodsub/LICENSE-MIT similarity index 100% rename from packages/pubsub-floodsub/LICENSE-MIT rename to packages/floodsub/LICENSE-MIT diff --git a/packages/pubsub-floodsub/README.md b/packages/floodsub/README.md similarity index 94% rename from packages/pubsub-floodsub/README.md rename to packages/floodsub/README.md index c4937bf291..8573dd0ea6 100644 --- a/packages/pubsub-floodsub/README.md +++ b/packages/floodsub/README.md @@ -76,8 +76,8 @@ Loading this module through a script tag will make its exports available as `Lib Licensed under either of -- Apache 2.0, ([LICENSE-APACHE](https://github.com/libp2p/js-libp2p/blob/main/packages/pubsub-floodsub/LICENSE-APACHE) / ) -- MIT ([LICENSE-MIT](https://github.com/libp2p/js-libp2p/blob/main/packages/pubsub-floodsub/LICENSE-MIT) / ) +- Apache 2.0, ([LICENSE-APACHE](https://github.com/libp2p/js-libp2p/blob/main/packages/floodsub/LICENSE-APACHE) / ) +- MIT ([LICENSE-MIT](https://github.com/libp2p/js-libp2p/blob/main/packages/floodsub/LICENSE-MIT) / ) # Contribution diff --git a/packages/pubsub-floodsub/img/test-cases.monopic b/packages/floodsub/img/test-cases.monopic similarity index 100% rename from packages/pubsub-floodsub/img/test-cases.monopic rename to packages/floodsub/img/test-cases.monopic diff --git a/packages/pubsub-floodsub/img/test-cases.txt b/packages/floodsub/img/test-cases.txt similarity index 100% rename from packages/pubsub-floodsub/img/test-cases.txt rename to packages/floodsub/img/test-cases.txt diff --git a/packages/pubsub-floodsub/package.json b/packages/floodsub/package.json similarity index 87% rename from packages/pubsub-floodsub/package.json rename to packages/floodsub/package.json index 12851edaab..4e666458d5 100644 --- a/packages/pubsub-floodsub/package.json +++ b/packages/floodsub/package.json @@ -3,7 +3,7 @@ "version": "10.1.46", "description": "libp2p-floodsub, also known as pubsub-flood or just dumbsub, this implementation of pubsub focused on delivering an API for Publish/Subscribe, but with no CastTree Forming (it just floods the network).", "license": "Apache-2.0 OR MIT", - "homepage": "https://github.com/libp2p/js-libp2p/tree/main/packages/pubsub-floodsub#readme", + "homepage": "https://github.com/libp2p/js-libp2p/tree/main/packages/floodsub#readme", "repository": { "type": "git", "url": "git+https://github.com/libp2p/js-libp2p.git" @@ -53,22 +53,30 @@ "test:electron-main": "aegir test -t electron-main" }, "dependencies": { + "@libp2p/crypto": "^5.1.8", "@libp2p/interface": "^2.11.0", - "@libp2p/pubsub": "^10.1.18", + "@libp2p/interface-internal": "^2.3.19", + "@libp2p/peer-collections": "^6.0.35", + "@libp2p/peer-id": "^5.1.9", + "@libp2p/utils": "^6.7.2", + "it-length-prefixed": "^10.0.1", + "it-pipe": "^3.0.1", + "it-pushable": "^3.2.3", + "main-event": "^1.0.1", + "multiformats": "^13.4.1", + "p-event": "^7.0.0", + "p-queue": "^8.1.1", "protons-runtime": "^5.6.0", "uint8arraylist": "^2.4.8", "uint8arrays": "^5.1.0" }, "devDependencies": { - "@libp2p/crypto": "^5.1.8", - "@libp2p/interface-internal": "^2.3.19", "@libp2p/logger": "^5.2.0", - "@libp2p/peer-collections": "^6.0.35", - "@libp2p/peer-id": "^5.1.9", "@multiformats/multiaddr": "^13.0.1", "@types/sinon": "^17.0.4", "aegir": "^47.0.22", - "multiformats": "^13.4.0", + "delay": "^6.0.0", + "it-all": "^3.0.9", "p-wait-for": "^5.0.2", "protons": "^7.7.0", "sinon": "^21.0.0", diff --git a/packages/pubsub-floodsub/src/cache.ts b/packages/floodsub/src/cache.ts similarity index 100% rename from packages/pubsub-floodsub/src/cache.ts rename to packages/floodsub/src/cache.ts diff --git a/packages/floodsub/src/constants.ts b/packages/floodsub/src/constants.ts new file mode 100644 index 0000000000..44cbcc898e --- /dev/null +++ b/packages/floodsub/src/constants.ts @@ -0,0 +1,5 @@ +/** + * All Pubsub implementations must use this symbol as the name of a property + * with a boolean `true` value + */ +export const pubSubSymbol = Symbol.for('@libp2p/pubsub') diff --git a/packages/pubsub/src/index.ts b/packages/floodsub/src/floodsub.ts similarity index 82% rename from packages/pubsub/src/index.ts rename to packages/floodsub/src/floodsub.ts index a73bb55399..6f992ce983 100644 --- a/packages/pubsub/src/index.ts +++ b/packages/floodsub/src/floodsub.ts @@ -1,62 +1,44 @@ -/** - * @packageDocumentation - * - * A set of components to be extended in order to create a pubsub implementation. - * - * @example - * - * ```TypeScript - * import { PubSubBaseProtocol } from '@libp2p/pubsub' - * import type { PubSubRPC, PublishResult, PubSubRPCMessage, PeerId, Message } from '@libp2p/interface' - * import type { Uint8ArrayList } from 'uint8arraylist' - * - * class MyPubsubImplementation extends PubSubBaseProtocol { - * decodeRpc (bytes: Uint8Array | Uint8ArrayList): PubSubRPC { - * throw new Error('Not implemented') - * } - * - * encodeRpc (rpc: PubSubRPC): Uint8Array { - * throw new Error('Not implemented') - * } - * - * encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - * throw new Error('Not implemented') - * } - * - * async publishMessage (sender: PeerId, message: Message): Promise { - * throw new Error('Not implemented') - * } - * } - * ``` - */ - -import { TopicValidatorResult, InvalidMessageError, NotStartedError, InvalidParametersError } from '@libp2p/interface' +import { InvalidMessageError, NotStartedError, InvalidParametersError, serviceCapabilities, serviceDependencies } from '@libp2p/interface' import { PeerMap, PeerSet } from '@libp2p/peer-collections' import { pipe } from 'it-pipe' import { TypedEventEmitter } from 'main-event' import Queue from 'p-queue' -import { PeerStreams as PeerStreamsImpl } from './peer-streams.js' -import { - signMessage, - verifySignature -} from './sign.js' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { SimpleTimeCache } from './cache.js' +import { pubSubSymbol } from './constants.ts' +import { RPC } from './message/rpc.js' +import { PeerStreams, PeerStreams as PeerStreamsImpl } from './peer-streams.js' +import { signMessage, verifySignature } from './sign.js' import { toMessage, ensureArray, noSignMsgId, msgId, toRpcMessage, randomSeqno } from './utils.js' -import type { PubSub, Message, StrictNoSign, StrictSign, PubSubInit, PubSubEvents, PeerStreams, PubSubRPCMessage, PubSubRPC, PubSubRPCSubscription, SubscriptionChangeData, PublishResult, TopicValidatorFn, ComponentLogger, Logger, Connection, PeerId, PrivateKey, Stream, Topology } from '@libp2p/interface' -import type { Registrar } from '@libp2p/interface-internal' +import { protocol, StrictNoSign, TopicValidatorResult, StrictSign } from './index.js' +import type { FloodSubComponents, FloodSubEvents, FloodSubInit, FloodSub as FloodSubInterface, Message, PublishResult, SubscriptionChangeData, TopicValidatorFn } from './index.js' +import type { Logger, Connection, PeerId, Stream, Topology } from '@libp2p/interface' import type { Uint8ArrayList } from 'uint8arraylist' -export interface PubSubComponents { - peerId: PeerId - privateKey: PrivateKey - registrar: Registrar - logger: ComponentLogger +export interface PubSubRPCMessage { + from?: Uint8Array + topic?: string + data?: Uint8Array + sequenceNumber?: Uint8Array + signature?: Uint8Array + key?: Uint8Array +} + +export interface PubSubRPCSubscription { + subscribe?: boolean + topic?: string +} + +export interface PubSubRPC { + subscriptions: PubSubRPCSubscription[] + messages: PubSubRPCMessage[] } /** * PubSubBaseProtocol handles the peers and connections logic for pubsub routers * and specifies the API that pubsub routers should have. */ -export abstract class PubSubBaseProtocol = PubSubEvents> extends TypedEventEmitter implements PubSub { +export class FloodSub extends TypedEventEmitter implements FloodSubInterface { protected log: Logger public started: boolean @@ -92,55 +74,61 @@ export abstract class PubSubBaseProtocol = Pu */ public topicValidators: Map public queue: Queue - public multicodecs: string[] - public components: PubSubComponents + public protocols: string[] + public components: FloodSubComponents private _registrarTopologyIds: string[] | undefined - protected enabled: boolean private readonly maxInboundStreams: number private readonly maxOutboundStreams: number + public seenCache: SimpleTimeCache - constructor (components: PubSubComponents, props: PubSubInit) { + constructor (components: FloodSubComponents, init: FloodSubInit) { super() - const { - multicodecs = [], - globalSignaturePolicy = 'StrictSign', - canRelayMessage = false, - emitSelf = false, - messageProcessingConcurrency = 10, - maxInboundStreams = 1, - maxOutboundStreams = 1 - } = props - - this.log = components.logger.forComponent('libp2p:pubsub') + this.log = components.logger.forComponent('libp2p:floodsub') this.components = components - this.multicodecs = ensureArray(multicodecs) - this.enabled = props.enabled !== false + this.protocols = ensureArray(init.protocols ?? protocol) this.started = false this.topics = new Map() this.subscriptions = new Set() this.peers = new PeerMap() - this.globalSignaturePolicy = globalSignaturePolicy === 'StrictNoSign' ? 'StrictNoSign' : 'StrictSign' - this.canRelayMessage = canRelayMessage - this.emitSelf = emitSelf + this.globalSignaturePolicy = init.globalSignaturePolicy === 'StrictNoSign' ? 'StrictNoSign' : 'StrictSign' + this.canRelayMessage = init.canRelayMessage ?? true + this.emitSelf = init.emitSelf ?? false this.topicValidators = new Map() - this.queue = new Queue({ concurrency: messageProcessingConcurrency }) - this.maxInboundStreams = maxInboundStreams - this.maxOutboundStreams = maxOutboundStreams + this.queue = new Queue({ + concurrency: init.messageProcessingConcurrency ?? 10 + }) + this.maxInboundStreams = init.maxInboundStreams ?? 1 + this.maxOutboundStreams = init.maxOutboundStreams ?? 1 + this.seenCache = new SimpleTimeCache({ + validityMs: init?.seenTTL ?? 30000 + }) this._onIncomingStream = this._onIncomingStream.bind(this) this._onPeerConnected = this._onPeerConnected.bind(this) this._onPeerDisconnected = this._onPeerDisconnected.bind(this) } + readonly [pubSubSymbol] = true + + readonly [Symbol.toStringTag] = '@libp2p/floodsub' + + readonly [serviceCapabilities]: string[] = [ + '@libp2p/pubsub' + ] + + readonly [serviceDependencies]: string[] = [ + '@libp2p/identify' + ] + // LIFECYCLE METHODS /** * Register the pubsub protocol onto the libp2p node. */ async start (): Promise { - if (this.started || !this.enabled) { + if (this.started) { return } @@ -149,7 +137,7 @@ export abstract class PubSubBaseProtocol = Pu const registrar = this.components.registrar // Incoming streams // Called after a peer dials us - await Promise.all(this.multicodecs.map(async multicodec => { + await Promise.all(this.protocols.map(async multicodec => { await registrar.handle(multicodec, this._onIncomingStream, { maxInboundStreams: this.maxInboundStreams, maxOutboundStreams: this.maxOutboundStreams @@ -162,7 +150,7 @@ export abstract class PubSubBaseProtocol = Pu onConnect: this._onPeerConnected, onDisconnect: this._onPeerDisconnected } - this._registrarTopologyIds = await Promise.all(this.multicodecs.map(async multicodec => registrar.register(multicodec, topology))) + this._registrarTopologyIds = await Promise.all(this.protocols.map(async multicodec => registrar.register(multicodec, topology))) this.log('started') this.started = true @@ -172,7 +160,7 @@ export abstract class PubSubBaseProtocol = Pu * Unregister the pubsub protocol and the streams with other peers will be closed. */ async stop (): Promise { - if (!this.started || !this.enabled) { + if (!this.started) { return } @@ -185,7 +173,7 @@ export abstract class PubSubBaseProtocol = Pu }) } - await Promise.all(this.multicodecs.map(async multicodec => { + await Promise.all(this.protocols.map(async multicodec => { await registrar.unhandle(multicodec) })) @@ -229,12 +217,12 @@ export abstract class PubSubBaseProtocol = Pu this.log('connected %p', peerId) // if this connection is already in use for pubsub, ignore it - if (conn.streams.find(stream => stream.direction === 'outbound' && stream.protocol != null && this.multicodecs.includes(stream.protocol)) != null) { + if (conn.streams.find(stream => stream.direction === 'outbound' && stream.protocol != null && this.protocols.includes(stream.protocol)) != null) { this.log('outbound pubsub streams already present on connection from %p', peerId) return } - const stream = await conn.newStream(this.multicodecs) + const stream = await conn.newStream(this.protocols) if (stream.protocol == null) { stream.abort(new Error('Stream was not multiplexed')) @@ -445,6 +433,16 @@ export abstract class PubSubBaseProtocol = Pu return } + // Check if I've seen the message, if yes, ignore + const seqno = await this.getMsgId(msg) + const msgIdStr = uint8ArrayToString(seqno, 'base64') + + if (this.seenCache.has(msgIdStr)) { + return + } + + this.seenCache.put(msgIdStr, true) + // Ensure the message is valid before processing it try { await this.validate(from, msg) @@ -506,19 +504,25 @@ export abstract class PubSubBaseProtocol = Pu * Decode Uint8Array into an RPC object. * This can be override to use a custom router protobuf. */ - abstract decodeRpc (bytes: Uint8Array | Uint8ArrayList): PubSubRPC + decodeRpc (bytes: Uint8Array | Uint8ArrayList): PubSubRPC { + return RPC.decode(bytes) + } /** * Encode RPC object into a Uint8Array. * This can be override to use a custom router protobuf. */ - abstract encodeRpc (rpc: PubSubRPC): Uint8Array + encodeRpc (rpc: PubSubRPC): Uint8Array { + return RPC.encode(rpc) + } /** * Encode RPC object into a Uint8Array. * This can be override to use a custom router protobuf. */ - abstract encodeMessage (rpc: PubSubRPCMessage): Uint8Array + encodeMessage (rpc: PubSubRPCMessage): Uint8Array { + return RPC.Message.encode(rpc) + } /** * Send an rpc object to a peer @@ -700,7 +704,34 @@ export abstract class PubSubBaseProtocol = Pu * `sender` might be this peer, or we might be forwarding a message on behalf of another peer, in which case sender * is the peer we received the message from, which may not be the peer the message was created by. */ - abstract publishMessage (sender: PeerId, message: Message): Promise + async publishMessage (from: PeerId, message: Message): Promise { + const peers = this.getSubscribers(message.topic) + const recipients: PeerId[] = [] + + if (peers == null || peers.length === 0) { + this.log('no peers are subscribed to topic %s', message.topic) + return { recipients } + } + + peers.forEach(id => { + if (this.components.peerId.equals(id)) { + this.log('not sending message on topic %s to myself', message.topic) + return + } + + if (id.equals(from)) { + this.log('not sending message on topic %s to sender %p', message.topic, id) + return + } + + this.log('publish msgs on topics %s %p', message.topic, id) + + recipients.push(id) + this.send(id, { messages: [message] }) + }) + + return { recipients } + } /** * Subscribes to a given topic. @@ -729,8 +760,6 @@ export abstract class PubSubBaseProtocol = Pu throw new Error('Pubsub is not started') } - super.removeEventListener(topic) - const wasSubscribed = this.subscriptions.has(topic) this.log('unsubscribe from %s - am subscribed %s', topic, wasSubscribed) diff --git a/packages/interface/src/pubsub.ts b/packages/floodsub/src/index.ts similarity index 72% rename from packages/interface/src/pubsub.ts rename to packages/floodsub/src/index.ts index 257eec8cc3..1dfede8777 100644 --- a/packages/interface/src/pubsub.ts +++ b/packages/floodsub/src/index.ts @@ -1,7 +1,43 @@ -import type { Stream, PublicKey, PeerId } from './index.js' -import type { Pushable } from 'it-pushable' -import type { TypedEventTarget } from 'main-event' -import type { Uint8ArrayList } from 'uint8arraylist' +/** + * @packageDocumentation + * + * > Don't use this module + * + * This module is a naive implementation of pubsub. It broadcasts all messages to all network peers, cannot provide older messages and has no protection against bad actors. + * + * It exists for academic purposes only, you should not use it in production. + * + * Instead please use [gossipsub](https://www.npmjs.com/package/@chainsafe/libp2p-gossipsub) - a more complete implementation which is also compatible with floodsub. + * + * @example Configuring libp2p to use floodsub + * + * ```TypeScript + * import { createLibp2p } from 'libp2p' + * import { floodsub } from '@libp2p/floodsub' + * + * const node = await createLibp2p({ + * services: { + * pubsub: floodsub() + * } + * //... other options + * }) + * await node.start() + * + * node.services.pubsub.subscribe('fruit') + * node.services.pubsub.addEventListener('message', (evt) => { + * console.log(evt) + * }) + * + * node.services.pubsub.publish('fruit', new TextEncoder().encode('banana')) + * ``` + */ + +import { pubSubSymbol } from './constants.ts' +import { FloodSub as FloodSubClass } from './floodsub.js' +import type { ComponentLogger, PeerId, PrivateKey, PublicKey, TypedEventTarget } from '@libp2p/interface' +import type { Registrar } from '@libp2p/interface-internal' + +export const protocol = '/floodsub/1.0.0' /** * On the producing side: @@ -45,74 +81,6 @@ export interface UnsignedMessage { export type Message = SignedMessage | UnsignedMessage -export interface PubSubRPCMessage { - from?: Uint8Array - topic?: string - data?: Uint8Array - sequenceNumber?: Uint8Array - signature?: Uint8Array - key?: Uint8Array -} - -export interface PubSubRPCSubscription { - subscribe?: boolean - topic?: string -} - -export interface PubSubRPC { - subscriptions: PubSubRPCSubscription[] - messages: PubSubRPCMessage[] -} - -export interface PeerStreams extends TypedEventTarget { - id: PeerId - protocol: string - outboundStream?: Pushable - inboundStream?: AsyncIterable - isWritable: boolean - - close(): void - write(buf: Uint8Array | Uint8ArrayList): void - attachInboundStream(stream: Stream): AsyncIterable - attachOutboundStream(stream: Stream): Promise> -} - -export interface PubSubInit { - enabled?: boolean - - multicodecs?: string[] - - /** - * defines how signatures should be handled - */ - globalSignaturePolicy?: SignaturePolicy - - /** - * if can relay messages not subscribed - */ - canRelayMessage?: boolean - - /** - * if publish should emit to self, if subscribed - */ - emitSelf?: boolean - - /** - * handle this many incoming pubsub messages concurrently - */ - messageProcessingConcurrency?: number - - /** - * How many parallel incoming streams to allow on the pubsub protocol per-connection - */ - maxInboundStreams?: number - - /** - * How many parallel outgoing streams to allow on the pubsub protocol per-connection - */ - maxOutboundStreams?: number -} - export interface Subscription { topic: string subscribe: boolean @@ -123,7 +91,7 @@ export interface SubscriptionChangeData { subscriptions: Subscription[] } -export interface PubSubEvents { +export interface FloodSubEvents { 'subscription-change': CustomEvent message: CustomEvent } @@ -151,10 +119,22 @@ export interface TopicValidatorFn { (peer: PeerId, message: Message): TopicValidatorResult | Promise } +export interface PeerStreamEvents { + 'stream:inbound': CustomEvent + 'stream:outbound': CustomEvent + close: CustomEvent +} + +export { pubSubSymbol } + /** - * @deprecated This will be removed from `@libp2p/interface` in a future release, pubsub implementations should declare their own types + * Returns true if the passed argument is a PubSub implementation */ -export interface PubSub = PubSubEvents> extends TypedEventTarget { +export function isPubSub (obj?: any): obj is FloodSub { + return Boolean(obj?.[pubSubSymbol]) +} + +export interface FloodSub extends TypedEventTarget { /** * The global signature policy controls whether or not we sill send and receive * signed or unsigned messages. @@ -167,7 +147,7 @@ export interface PubSub = PubSubEvents> exten /** * A list of multicodecs that contain the pubsub protocol name. */ - multicodecs: string[] + protocols: string[] /** * Pubsub routers support message validators per topic, which will validate the message @@ -261,24 +241,57 @@ export interface PubSub = PubSubEvents> exten * await libp2p.pubsub.publish(topic, data) * ``` */ - publish(topic: string, data: Uint8Array): Promise + publish(topic: string, data?: Uint8Array): Promise } -export interface PeerStreamEvents { - 'stream:inbound': CustomEvent - 'stream:outbound': CustomEvent - close: CustomEvent +export interface FloodSubComponents { + peerId: PeerId + privateKey: PrivateKey + registrar: Registrar + logger: ComponentLogger } -/** - * All Pubsub implementations must use this symbol as the name of a property - * with a boolean `true` value - */ -export const pubSubSymbol = Symbol.for('@libp2p/pubsub') +export interface FloodSubInit { + seenTTL?: number -/** - * Returns true if the passed argument is a PubSub implementation - */ -export function isPubSub (obj?: any): obj is PubSub { - return Boolean(obj?.[pubSubSymbol]) + /** + * Override the protocol registered with the registrar + * + * @default ['/floodsub/1.0.0'] + */ + protocols?: string[] + + /** + * defines how signatures should be handled + */ + globalSignaturePolicy?: SignaturePolicy + + /** + * if can relay messages not subscribed + */ + canRelayMessage?: boolean + + /** + * if publish should emit to self, if subscribed + */ + emitSelf?: boolean + + /** + * handle this many incoming pubsub messages concurrently + */ + messageProcessingConcurrency?: number + + /** + * How many parallel incoming streams to allow on the pubsub protocol per-connection + */ + maxInboundStreams?: number + + /** + * How many parallel outgoing streams to allow on the pubsub protocol per-connection + */ + maxOutboundStreams?: number +} + +export function floodsub (init: FloodSubInit = {}): (components: FloodSubComponents) => FloodSub { + return (components: FloodSubComponents) => new FloodSubClass(components, init) } diff --git a/packages/pubsub-floodsub/src/message/rpc.proto b/packages/floodsub/src/message/rpc.proto similarity index 100% rename from packages/pubsub-floodsub/src/message/rpc.proto rename to packages/floodsub/src/message/rpc.proto diff --git a/packages/pubsub-floodsub/src/message/rpc.ts b/packages/floodsub/src/message/rpc.ts similarity index 100% rename from packages/pubsub-floodsub/src/message/rpc.ts rename to packages/floodsub/src/message/rpc.ts diff --git a/packages/pubsub/src/peer-streams.ts b/packages/floodsub/src/peer-streams.ts similarity index 97% rename from packages/pubsub/src/peer-streams.ts rename to packages/floodsub/src/peer-streams.ts index a0a1b83a7f..de700185a9 100644 --- a/packages/pubsub/src/peer-streams.ts +++ b/packages/floodsub/src/peer-streams.ts @@ -5,7 +5,8 @@ import { pushable } from 'it-pushable' import { TypedEventEmitter } from 'main-event' import { pEvent } from 'p-event' import { Uint8ArrayList } from 'uint8arraylist' -import type { ComponentLogger, Logger, Stream, PeerId, PeerStreamEvents } from '@libp2p/interface' +import type { PeerStreamEvents } from './index.ts' +import type { ComponentLogger, Logger, Stream, PeerId } from '@libp2p/interface' import type { DecoderOptions as LpDecoderOptions } from 'it-length-prefixed' import type { Pushable } from 'it-pushable' diff --git a/packages/pubsub/src/sign.ts b/packages/floodsub/src/sign.ts similarity index 94% rename from packages/pubsub/src/sign.ts rename to packages/floodsub/src/sign.ts index 3c2dc7f7d0..557fb86bb5 100644 --- a/packages/pubsub/src/sign.ts +++ b/packages/floodsub/src/sign.ts @@ -2,7 +2,9 @@ import { peerIdFromPrivateKey } from '@libp2p/peer-id' import { concat as uint8ArrayConcat } from 'uint8arrays/concat' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { toRpcMessage } from './utils.js' -import type { PeerId, PrivateKey, PubSubRPCMessage, PublicKey, SignedMessage } from '@libp2p/interface' +import type { PubSubRPCMessage } from './floodsub.ts' +import type { SignedMessage } from './index.ts' +import type { PeerId, PrivateKey, PublicKey } from '@libp2p/interface' export const SignPrefix = uint8ArrayFromString('libp2p-pubsub:') diff --git a/packages/pubsub/src/utils.ts b/packages/floodsub/src/utils.ts similarity index 96% rename from packages/pubsub/src/utils.ts rename to packages/floodsub/src/utils.ts index a6133c1e6b..534419e6b3 100644 --- a/packages/pubsub/src/utils.ts +++ b/packages/floodsub/src/utils.ts @@ -6,7 +6,9 @@ import * as Digest from 'multiformats/hashes/digest' import { sha256 } from 'multiformats/hashes/sha2' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { toString as uint8ArrayToString } from 'uint8arrays/to-string' -import type { Message, PubSubRPCMessage, PublicKey } from '@libp2p/interface' +import type { PubSubRPCMessage } from './floodsub.ts' +import type { Message } from './index.ts' +import type { PublicKey } from '@libp2p/interface' /** * Generate a random sequence number diff --git a/packages/pubsub/test/emit-self.spec.ts b/packages/floodsub/test/emit-self.spec.ts similarity index 52% rename from packages/pubsub/test/emit-self.spec.ts rename to packages/floodsub/test/emit-self.spec.ts index 10045a4ae0..384ce881bd 100644 --- a/packages/pubsub/test/emit-self.spec.ts +++ b/packages/floodsub/test/emit-self.spec.ts @@ -1,45 +1,44 @@ import { generateKeyPair } from '@libp2p/crypto/keys' +import { start, stop } from '@libp2p/interface' import { defaultLogger } from '@libp2p/logger' import { peerIdFromPrivateKey } from '@libp2p/peer-id' -import { expect } from 'aegir/chai' -import delay from 'delay' +import { stubInterface } from 'sinon-ts' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' -import { - MockRegistrar, - PubsubImplementation -} from './utils/index.js' +import { floodsub } from '../src/index.js' +import type { FloodSub } from '../src/index.js' +import type { Registrar } from '@libp2p/interface-internal' +import type { StubbedInstance } from 'sinon-ts' -const protocol = '/pubsub/1.0.0' const topic = 'foo' const data = uint8ArrayFromString('bar') -const shouldNotHappen = (): void => expect.fail() describe('emitSelf', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub + let registrar: StubbedInstance describe('enabled', () => { before(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) + registrar = stubInterface() - pubsub = new PubsubImplementation({ + pubsub = floodsub({ + emitSelf: true + })({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar, logger: defaultLogger() - }, { - multicodecs: [protocol], - emitSelf: true }) }) before(async () => { - await pubsub.start() + await start(pubsub) pubsub.subscribe(topic) }) after(async () => { - await pubsub.stop() + await stop(pubsub) }) it('should emit to self on publish', async () => { @@ -74,40 +73,4 @@ describe('emitSelf', () => { await promise }) }) - - describe('disabled', () => { - before(async () => { - const privateKey = await generateKeyPair('Ed25519') - const peerId = peerIdFromPrivateKey(privateKey) - - pubsub = new PubsubImplementation({ - peerId, - privateKey, - registrar: new MockRegistrar(), - logger: defaultLogger() - }, { - multicodecs: [protocol], - emitSelf: false - }) - }) - - before(async () => { - await pubsub.start() - pubsub.subscribe(topic) - }) - - after(async () => { - await pubsub.stop() - }) - - it('should not emit to self on publish', async () => { - pubsub.subscribe(topic) - pubsub.addEventListener('message', shouldNotHappen) - - await pubsub.publish(topic, data) - - // Wait 1 second to guarantee that self is not noticed - await delay(1000) - }) - }) }) diff --git a/packages/floodsub/test/fixtures/connection.ts b/packages/floodsub/test/fixtures/connection.ts new file mode 100644 index 0000000000..58a56c061a --- /dev/null +++ b/packages/floodsub/test/fixtures/connection.ts @@ -0,0 +1,26 @@ +import { streamPair } from '@libp2p/utils' +import { stubInterface } from 'sinon-ts' +import type { Connection, PeerId } from '@libp2p/interface' + +/** + * Returns two connections: + * + * 1. peerA -> peerB + * 2. peerB -> peerA + */ +export const connectionPair = async (peerA: PeerId, peerB: PeerId): Promise<[Connection, Connection]> => { + const [d0, d1] = await streamPair() + + return [ + stubInterface({ + newStream: async () => d0, + streams: [], + remotePeer: peerB + }), + stubInterface({ + newStream: async () => d1, + streams: [], + remotePeer: peerA + }) + ] +} diff --git a/packages/pubsub-floodsub/test/fixtures/peers.ts b/packages/floodsub/test/fixtures/peers.ts similarity index 100% rename from packages/pubsub-floodsub/test/fixtures/peers.ts rename to packages/floodsub/test/fixtures/peers.ts diff --git a/packages/pubsub-floodsub/test/fixtures/relay.ts b/packages/floodsub/test/fixtures/relay.ts similarity index 100% rename from packages/pubsub-floodsub/test/fixtures/relay.ts rename to packages/floodsub/test/fixtures/relay.ts diff --git a/packages/pubsub-floodsub/test/floodsub.spec.ts b/packages/floodsub/test/floodsub.spec.ts similarity index 93% rename from packages/pubsub-floodsub/test/floodsub.spec.ts rename to packages/floodsub/test/floodsub.spec.ts index 4997ad8b41..73c8fd2b24 100644 --- a/packages/pubsub-floodsub/test/floodsub.spec.ts +++ b/packages/floodsub/test/floodsub.spec.ts @@ -1,11 +1,10 @@ /* eslint-env mocha */ import { generateKeyPair } from '@libp2p/crypto/keys' -import { StrictNoSign, start, stop } from '@libp2p/interface' +import { start, stop } from '@libp2p/interface' import { defaultLogger } from '@libp2p/logger' import { PeerSet } from '@libp2p/peer-collections' import { peerIdFromPrivateKey } from '@libp2p/peer-id' -import { PeerStreams } from '@libp2p/pubsub/peer-streams' import { expect } from 'aegir/chai' import { sha256 } from 'multiformats/hashes/sha2' import pWaitFor from 'p-wait-for' @@ -13,8 +12,10 @@ import sinon from 'sinon' import { stubInterface } from 'sinon-ts' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { toString as uint8ArrayToString } from 'uint8arrays/to-string' -import { floodsub, multicodec } from '../src/index.js' -import type { Message, PubSubRPC } from '@libp2p/interface' +import { floodsub, protocol, StrictNoSign } from '../src/index.js' +import { PeerStreams } from '../src/peer-streams.js' +import type { PubSubRPC } from '../src/floodsub.js' +import type { Message } from '../src/index.js' import type { Registrar } from '@libp2p/interface-internal' import type { StubbedInstance } from 'sinon-ts' @@ -26,7 +27,7 @@ describe('floodsub', () => { let registrar: StubbedInstance before(async () => { - expect(multicodec).to.exist() + expect(protocol).to.exist() const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) diff --git a/packages/floodsub/test/lifecycle.spec.ts b/packages/floodsub/test/lifecycle.spec.ts new file mode 100644 index 0000000000..d8e3fd84e2 --- /dev/null +++ b/packages/floodsub/test/lifecycle.spec.ts @@ -0,0 +1,235 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { start, stop } from '@libp2p/interface' +import { defaultLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' +import { floodsub } from '../src/index.js' +import { connectionPair } from './fixtures/connection.js' +import type { FloodSub } from '../src/index.js' +import type { PeerId } from '@libp2p/interface' +import type { Registrar } from '@libp2p/interface-internal' +import type { StubbedInstance } from 'sinon-ts' + +describe('pubsub base life cycle', () => { + describe('should start and stop properly', () => { + let pubsub: FloodSub + let registrar: StubbedInstance + + beforeEach(async () => { + const privateKey = await generateKeyPair('Ed25519') + const peerId = peerIdFromPrivateKey(privateKey) + + registrar = stubInterface() + registrar.register.resolves(`id-${Math.random()}`) + + pubsub = floodsub()({ + peerId, + privateKey, + registrar, + logger: defaultLogger() + }) + }) + + afterEach(() => { + sinon.restore() + }) + + it('should be able to start and stop', async () => { + await start(pubsub) + expect(registrar.handle).to.have.property('calledOnce', true) + expect(registrar.register).to.have.property('calledOnce', true) + + expect(pubsub.getPeers()).to.be.empty() + + await stop(pubsub) + expect(registrar.unhandle).to.have.property('calledOnce', true) + expect(registrar.unregister).to.have.property('calledOnce', true) + }) + + it('starting should not throw if already started', async () => { + await start(pubsub) + await start(pubsub) + expect(registrar.handle).to.have.property('calledOnce', true) + expect(registrar.register).to.have.property('calledOnce', true) + + await stop(pubsub) + expect(registrar.unhandle).to.have.property('calledOnce', true) + expect(registrar.unregister).to.have.property('calledOnce', true) + }) + + it('stopping should not throw if not started', async () => { + await stop(pubsub) + expect(registrar.handle).to.have.property('calledOnce', false) + expect(registrar.unhandle).to.have.property('calledOnce', false) + expect(registrar.register).to.have.property('calledOnce', false) + expect(registrar.unregister).to.have.property('calledOnce', false) + }) + }) + + describe('should be able to register two nodes', () => { + let pubsubA: FloodSub + let pubsubB: FloodSub + let peerIdA: PeerId + let peerIdB: PeerId + let registrarA: StubbedInstance + let registrarB: StubbedInstance + + // mount pubsub + beforeEach(async () => { + const privateKeyA = await generateKeyPair('Ed25519') + peerIdA = peerIdFromPrivateKey(privateKeyA) + + const privateKeyB = await generateKeyPair('Ed25519') + peerIdB = peerIdFromPrivateKey(privateKeyB) + + registrarA = stubInterface() + registrarB = stubInterface() + + pubsubA = floodsub()({ + peerId: peerIdA, + privateKey: privateKeyA, + registrar: registrarA, + logger: defaultLogger() + }) + pubsubB = floodsub()({ + peerId: peerIdB, + privateKey: privateKeyB, + registrar: registrarB, + logger: defaultLogger() + }) + }) + + // start pubsub + beforeEach(async () => { + await start(pubsubA, pubsubB) + + expect(registrarA.handle.calledWith(pubsubA.protocols[0])).to.be.true() + expect(registrarB.handle.calledWith(pubsubB.protocols[0])).to.be.true() + }) + + afterEach(async () => { + sinon.restore() + + await stop(pubsubA, pubsubB) + }) + + it('should handle onConnect as expected', async () => { + const topologyA = registrarA.register.getCall(0).args[1] + const handlerB = registrarB.handle.getCall(0).args[1] + + if (topologyA == null || handlerB == null) { + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) + } + + const [c0, c1] = await connectionPair(peerIdA, peerIdB) + + // Notify peers of connection + topologyA.onConnect?.(peerIdB, c0) + await handlerB(await c1.newStream([pubsubA.protocols[0]]), c1) + + expect(pubsubA.getPeers()).to.have.lengthOf(1) + expect(pubsubB.getPeers()).to.have.lengthOf(1) + }) + + it('should use the latest connection if onConnect is called more than once', async () => { + const topologyA = registrarA.register.getCall(0).args[1] + const handlerB = registrarB.handle.getCall(0).args[1] + + if (topologyA == null || handlerB == null) { + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) + } + + // Notify peers of connection + const [c0, c1] = await connectionPair(peerIdA, peerIdB) + const [c2] = await connectionPair(peerIdA, peerIdB) + + sinon.spy(c0, 'newStream') + + topologyA.onConnect?.(peerIdB, c0) + handlerB(await c1.newStream(pubsubA.protocols[0]), c1) + expect(c0.newStream).to.have.property('callCount', 1) + + // @ts-expect-error _removePeer is a protected method + sinon.spy(pubsubA, '_removePeer') + + sinon.spy(c2, 'newStream') + + await topologyA?.onConnect?.(peerIdB, c2) + // newStream invocation takes place in a resolved promise + expect(c2.newStream).to.have.property('callCount', 1) + + // @ts-expect-error _removePeer is a protected method + expect(pubsubA._removePeer).to.have.property('callCount', 0) + + // Verify the first stream was closed + // @ts-expect-error .returnValues is a sinon property + const { stream: firstStream } = await c0.newStream.returnValues[0] + try { + await firstStream.sink(['test']) + } catch (err: any) { + expect(err).to.exist() + return + } + expect.fail('original stream should have ended') + }) + + it('should handle newStream errors in onConnect', async () => { + const topologyA = registrarA.register.getCall(0).args[1] + const handlerB = registrarB.handle.getCall(0).args[1] + + if (topologyA == null || handlerB == null) { + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) + } + + // Notify peers of connection + const [c0, c1] = await connectionPair(peerIdA, peerIdB) + const error = new Error('new stream error') + sinon.stub(c0, 'newStream').throws(error) + + topologyA.onConnect?.(peerIdB, c0) + handlerB(await c1.newStream(pubsubA.protocols[0]), c1) + + expect(c0.newStream).to.have.property('callCount', 1) + }) + + it('should handle onDisconnect as expected', async () => { + const topologyA = registrarA.register.getCall(0).args[1] + const topologyB = registrarB.register.getCall(0).args[1] + const handlerB = registrarB.handle.getCall(0).args[1] + + if (topologyA == null || handlerB == null) { + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) + } + + // Notify peers of connection + const [c0, c1] = await connectionPair(peerIdA, peerIdB) + + topologyA.onConnect?.(peerIdB, c0) + await handlerB(await c1.newStream(pubsubA.protocols[0]), c1) + + // Notify peers of disconnect + topologyA?.onDisconnect?.(peerIdB) + topologyB?.onDisconnect?.(peerIdA) + + expect(pubsubA.getPeers()).to.be.empty() + expect(pubsubB.getPeers()).to.be.empty() + }) + + it('should handle onDisconnect for unknown peers', () => { + const topologyA = registrarA.register.getCall(0).args[1] + + if (topologyA == null) { + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) + } + + expect(pubsubA.getPeers()).to.be.empty() + + // Notice peers of disconnect + topologyA?.onDisconnect?.(peerIdB) + + expect(pubsubA.getPeers()).to.be.empty() + }) + }) +}) diff --git a/packages/pubsub/test/message.spec.ts b/packages/floodsub/test/message.spec.ts similarity index 54% rename from packages/pubsub/test/message.spec.ts rename to packages/floodsub/test/message.spec.ts index 8c7130be80..35f6535683 100644 --- a/packages/pubsub/test/message.spec.ts +++ b/packages/floodsub/test/message.spec.ts @@ -4,29 +4,30 @@ import { defaultLogger } from '@libp2p/logger' import { peerIdFromPrivateKey } from '@libp2p/peer-id' import { expect } from 'aegir/chai' import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { floodsub } from '../src/index.js' import { randomSeqno } from '../src/utils.js' -import { - MockRegistrar, - PubsubImplementation -} from './utils/index.js' -import type { PeerId, Message } from '@libp2p/interface' +import type { FloodSub } from '../src/index.js' +import type { PeerId } from '@libp2p/interface' +import type { Registrar } from '@libp2p/interface-internal' +import type { StubbedInstance } from 'sinon-ts' describe('pubsub base messages', () => { let peerId: PeerId - let pubsub: PubsubImplementation + let pubsub: FloodSub + let registrar: StubbedInstance before(async () => { const privateKey = await generateKeyPair('Ed25519') peerId = peerIdFromPrivateKey(privateKey) + registrar = stubInterface() - pubsub = new PubsubImplementation({ + pubsub = floodsub()({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar, logger: defaultLogger() - }, { - multicodecs: ['/pubsub/1.0.0'] }) }) @@ -42,9 +43,11 @@ describe('pubsub base messages', () => { sequenceNumber: randomSeqno() } - const signedMessage = await pubsub.buildMessage(message) + // @ts-expect-error private method + const signedMessage = await pubsub['buildMessage'](message) - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.not.be.rejected() + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.not.be.rejected() }) it('validate with StrictNoSign will reject a message with from, signature, key, seqno present', async () => { @@ -57,26 +60,28 @@ describe('pubsub base messages', () => { sinon.stub(pubsub, 'globalSignaturePolicy').value('StrictSign') - const signedMessage = await pubsub.buildMessage(message) + // @ts-expect-error private method + const signedMessage = await pubsub['buildMessage'](message) if (signedMessage.type === 'unsigned') { throw new Error('Message was not signed') } sinon.stub(pubsub, 'globalSignaturePolicy').value('StrictNoSign') - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.be.rejected() - // @ts-expect-error this field is not optional + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.be.rejected() delete signedMessage.from - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.be.rejected() - // @ts-expect-error this field is not optional + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.be.rejected() delete signedMessage.signature - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.be.rejected() - // @ts-expect-error this field is not optional + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.be.rejected() delete signedMessage.key - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.be.rejected() - // @ts-expect-error this field is not optional + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.be.rejected() delete signedMessage.sequenceNumber - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.not.be.rejected() + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.not.be.rejected() }) it('validate with StrictNoSign will validate a message without a signature, key, and seqno', async () => { @@ -89,8 +94,10 @@ describe('pubsub base messages', () => { sinon.stub(pubsub, 'globalSignaturePolicy').value('StrictNoSign') - const signedMessage = await pubsub.buildMessage(message) - await expect(pubsub.validate(peerId, signedMessage)).to.eventually.not.be.rejected() + // @ts-expect-error private method + const signedMessage = await pubsub['buildMessage'](message) + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, signedMessage)).to.eventually.not.be.rejected() }) it('validate with StrictSign requires a signature', async () => { @@ -101,6 +108,7 @@ describe('pubsub base messages', () => { topic: 'test-topic' } - await expect(pubsub.validate(peerId, message)).to.be.rejectedWith(Error, 'Signing required and no signature was present') + // @ts-expect-error private method + await expect(pubsub['validate'](peerId, message)).to.be.rejectedWith(Error, 'Signing required and no signature was present') }) }) diff --git a/packages/pubsub/test/peer-streams.spec.ts b/packages/floodsub/test/peer-streams.spec.ts similarity index 100% rename from packages/pubsub/test/peer-streams.spec.ts rename to packages/floodsub/test/peer-streams.spec.ts diff --git a/packages/pubsub/test/pubsub.spec.ts b/packages/floodsub/test/pubsub.spec.ts similarity index 68% rename from packages/pubsub/test/pubsub.spec.ts rename to packages/floodsub/test/pubsub.spec.ts index 817172787c..1c68d6df24 100644 --- a/packages/pubsub/test/pubsub.spec.ts +++ b/packages/floodsub/test/pubsub.spec.ts @@ -6,48 +6,54 @@ import { PeerSet } from '@libp2p/peer-collections' import { peerIdFromPrivateKey } from '@libp2p/peer-id' import { expect } from 'aegir/chai' import delay from 'delay' -import pDefer from 'p-defer' import pWaitFor from 'p-wait-for' import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { floodsub } from '../src/index.js' import { PeerStreams } from '../src/peer-streams.js' import { noSignMsgId } from '../src/utils.js' -import { - MockRegistrar, - connectionPair, - PubsubImplementation -} from './utils/index.js' -import type { PeerId, Message, PubSubRPC } from '@libp2p/interface' - -const protocol = '/pubsub/1.0.0' +import { connectionPair } from './fixtures/connection.js' +import type { PubSubRPC } from '../src/floodsub.js' +import type { FloodSub, FloodSubComponents, Message } from '../src/index.js' +import type { PeerId } from '@libp2p/interface' +import type { Registrar } from '@libp2p/interface-internal' +import type { StubbedInstance } from 'sinon-ts' + const topic = 'test-topic' const message = uint8ArrayFromString('hello') describe('pubsub base implementation', () => { describe('publish', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub + let registrar: StubbedInstance + let components: FloodSubComponents beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) - - pubsub = new PubsubImplementation({ + registrar = stubInterface() + components = { peerId, privateKey, - registrar: new MockRegistrar(), + registrar, logger: defaultLogger() - }, { - multicodecs: [protocol], + } + + pubsub = floodsub({ emitSelf: true - }) + })(components) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { + await stop(pubsub) + }) it('calls _publish for router to forward messages', async () => { + // @ts-expect-error private method sinon.spy(pubsub, 'publishMessage') - await pubsub.start() + await start(pubsub) await pubsub.publish(topic, message) // event dispatch is async @@ -61,9 +67,10 @@ describe('pubsub base implementation', () => { }) it('should sign messages on publish', async () => { + // @ts-expect-error private method const publishMessageSpy = sinon.spy(pubsub, 'publishMessage') - await pubsub.start() + await start(pubsub) await pubsub.publish(topic, message) // event dispatch is async @@ -74,13 +81,14 @@ describe('pubsub base implementation', () => { // Get the first message sent to _publish, and validate it const signedMessage: Message = publishMessageSpy.getCall(0).lastArg - await expect(pubsub.validate(pubsub.components.peerId, signedMessage)).to.eventually.be.undefined() + // @ts-expect-error private method + await expect(pubsub['validate'](components.peerId, signedMessage)).to.eventually.be.undefined() }) it('calls publishes messages twice', async () => { let count = 0 - await pubsub.start() + await start(pubsub) pubsub.subscribe(topic) pubsub.addEventListener('message', evt => { @@ -102,38 +110,45 @@ describe('pubsub base implementation', () => { describe('subscribe', () => { describe('basics', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub + let registrar: StubbedInstance + let components: FloodSubComponents beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) - - pubsub = new PubsubImplementation({ + registrar = stubInterface() + components = { peerId, privateKey, - registrar: new MockRegistrar(), + registrar, logger: defaultLogger() - }, { - multicodecs: [protocol] - }) - await pubsub.start() + } + + pubsub = floodsub()(components) + + await start(pubsub) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { + await stop(pubsub) + }) it('should add subscription', () => { pubsub.subscribe(topic) - expect(pubsub.subscriptions.size).to.eql(1) - expect(pubsub.subscriptions.has(topic)).to.be.true() + expect(pubsub.getTopics()).to.have.lengthOf(1) + expect(pubsub.getTopics()).to.include(topic) }) }) describe('two nodes', () => { - let pubsubA: PubsubImplementation, pubsubB: PubsubImplementation - let peerIdA: PeerId, peerIdB: PeerId - let registrarA: MockRegistrar - let registrarB: MockRegistrar + let pubsubA: FloodSub + let pubsubB: FloodSub + let peerIdA: PeerId + let peerIdB: PeerId + let registrarA: StubbedInstance + let registrarB: StubbedInstance beforeEach(async () => { const privateKeyA = await generateKeyPair('Ed25519') @@ -142,41 +157,40 @@ describe('pubsub base implementation', () => { const privateKeyB = await generateKeyPair('Ed25519') peerIdB = peerIdFromPrivateKey(privateKeyB) - registrarA = new MockRegistrar() - registrarB = new MockRegistrar() + registrarA = stubInterface() + registrarB = stubInterface() - pubsubA = new PubsubImplementation({ + pubsubA = floodsub()({ peerId: peerIdA, privateKey: privateKeyA, registrar: registrarA, logger: defaultLogger() - }, { - multicodecs: [protocol] }) - pubsubB = new PubsubImplementation({ + pubsubB = floodsub()({ peerId: peerIdB, privateKey: privateKeyB, registrar: registrarB, logger: defaultLogger() - }, { - multicodecs: [protocol] }) // start pubsub and connect nodes await start(pubsubA, pubsubB) - const topologyA = registrarA.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) + expect(registrarA.register.calledWith(pubsubA.protocols[0])).to.be.true() + const topologyA = registrarA.register.getCall(0).args[1] + + expect(registrarB.handle.calledWith(pubsubA.protocols[0])).to.be.true() + const handlerB = registrarB.handle.getCall(0).args[1] if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) } // Notify peers of connection const [c0, c1] = await connectionPair(peerIdA, peerIdB) topologyA.onConnect?.(peerIdB, c0) - await handlerB.handler(await c1.newStream(protocol), c1) + await handlerB(await c1.newStream(pubsubA.protocols[0]), c1) }) afterEach(async () => { @@ -184,14 +198,15 @@ describe('pubsub base implementation', () => { }) it('should send subscribe message to connected peers', async () => { - sinon.spy(pubsubA, 'send') - sinon.spy(pubsubB, 'processRpcSubOpt') + // @ts-expect-error private method + const sendSpy = sinon.spy(pubsubA, 'send') + // @ts-expect-error private method + const processRpcSubOptSpy = sinon.spy(pubsubB, 'processRpcSubOpt') pubsubA.subscribe(topic) // Should send subscriptions to a peer - // @ts-expect-error .callCount is a added by sinon - expect(pubsubA.send.callCount).to.equal(1) + expect(sendSpy.callCount).to.equal(1) // Other peer should receive subscription message await pWaitFor(() => { @@ -200,50 +215,50 @@ describe('pubsub base implementation', () => { return subscribers.length === 1 }) - // @ts-expect-error .callCount is a added by sinon - expect(pubsubB.processRpcSubOpt.callCount).to.equal(1) + expect(processRpcSubOptSpy.callCount).to.equal(1) }) }) }) describe('unsubscribe', () => { describe('basics', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) - pubsub = new PubsubImplementation({ + pubsub = floodsub()({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar: stubInterface(), logger: defaultLogger() - }, { - multicodecs: [protocol] }) - await pubsub.start() + + await start(pubsub) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { await stop(pubsub) }) it('should remove all subscriptions for a topic', () => { pubsub.subscribe(topic) pubsub.subscribe(topic) - expect(pubsub.subscriptions.size).to.eql(1) + expect(pubsub.getTopics()).to.have.lengthOf(1) pubsub.unsubscribe(topic) - expect(pubsub.subscriptions.size).to.eql(0) + expect(pubsub.getTopics()).to.have.lengthOf(0) }) }) describe('two nodes', () => { - let pubsubA: PubsubImplementation, pubsubB: PubsubImplementation - let peerIdA: PeerId, peerIdB: PeerId - let registrarA: MockRegistrar - let registrarB: MockRegistrar + let pubsubA: FloodSub + let pubsubB: FloodSub + let peerIdA: PeerId + let peerIdB: PeerId + let registrarA: StubbedInstance + let registrarB: StubbedInstance beforeEach(async () => { const privateKeyA = await generateKeyPair('Ed25519') @@ -252,57 +267,52 @@ describe('pubsub base implementation', () => { const privateKeyB = await generateKeyPair('Ed25519') peerIdB = peerIdFromPrivateKey(privateKeyB) - registrarA = new MockRegistrar() - registrarB = new MockRegistrar() + registrarA = stubInterface() + registrarB = stubInterface() - pubsubA = new PubsubImplementation({ + pubsubA = floodsub()({ peerId: peerIdA, privateKey: privateKeyA, registrar: registrarA, logger: defaultLogger() - }, { - multicodecs: [protocol] }) - pubsubB = new PubsubImplementation({ + pubsubB = floodsub()({ peerId: peerIdB, privateKey: privateKeyB, registrar: registrarB, logger: defaultLogger() - }, { - multicodecs: [protocol] }) }) // start pubsub and connect nodes beforeEach(async () => { - await Promise.all([ - pubsubA.start(), - pubsubB.start() - ]) + await start(pubsubA, pubsubB) - const topologyA = registrarA.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) + expect(registrarA.register.calledWith(pubsubA.protocols[0])).to.be.true() + const topologyA = registrarA.register.getCall(0).args[1] + + expect(registrarB.handle.calledWith(pubsubA.protocols[0])).to.be.true() + const handlerB = registrarB.handle.getCall(0).args[1] if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) + throw new Error(`No handler registered for ${pubsubA.protocols[0]}`) } // Notify peers of connection const [c0, c1] = await connectionPair(peerIdA, peerIdB) topologyA.onConnect?.(peerIdB, c0) - await handlerB.handler(await c1.newStream(protocol), c1) + await handlerB(await c1.newStream(pubsubA.protocols[0]), c1) }) afterEach(async () => { - await Promise.all([ - pubsubA.stop(), - pubsubB.stop() - ]) + await stop(pubsubA, peerIdB) }) it('should send unsubscribe message to connected peers', async () => { + // @ts-expect-error private method const pubsubASendSpy = sinon.spy(pubsubA, 'send') + // @ts-expect-error private method const pubsubBProcessRpcSubOptSpy = sinon.spy(pubsubB, 'processRpcSubOpt') pubsubA.subscribe(topic) @@ -336,6 +346,7 @@ describe('pubsub base implementation', () => { }) it('should not send unsubscribe message to connected peers if not subscribed', () => { + // @ts-expect-error private method const pubsubASendSpy = sinon.spy(pubsubA, 'send') // Unsubscribe @@ -348,24 +359,24 @@ describe('pubsub base implementation', () => { }) describe('getTopics', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) - pubsub = new PubsubImplementation({ + pubsub = floodsub()({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar: stubInterface(), logger: defaultLogger() - }, { - multicodecs: [protocol] }) - await pubsub.start() + await start(pubsub) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { + await stop(pubsub) + }) it('returns the subscribed topics', () => { let subsTopics = pubsub.getTopics() @@ -381,23 +392,21 @@ describe('pubsub base implementation', () => { describe('getSubscribers', () => { let peerId: PeerId - let pubsub: PubsubImplementation + let pubsub: FloodSub beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') peerId = peerIdFromPrivateKey(privateKey) - pubsub = new PubsubImplementation({ + pubsub = floodsub()({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar: stubInterface(), logger: defaultLogger() - }, { - multicodecs: [protocol] }) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { await stop(pubsub) }) it('should fail if pubsub is not started', () => { const topic = 'test-topic' @@ -414,7 +423,7 @@ describe('pubsub base implementation', () => { it('should fail if no topic is provided', async () => { // start pubsub - await pubsub.start() + await start(pubsub) try { // @ts-expect-error invalid params @@ -431,7 +440,7 @@ describe('pubsub base implementation', () => { const topic = 'test-topic' // start pubsub - await pubsub.start() + await start(pubsub) let peersSubscribed = pubsub.getSubscribers(topic) expect(peersSubscribed).to.be.empty() @@ -445,8 +454,10 @@ describe('pubsub base implementation', () => { const set = new PeerSet() set.add(id) - pubsub.topics.set(topic, set) - pubsub.peers.set(peer.id, peer) + // @ts-expect-error private method + pubsub['topics'].set(topic, set) + // @ts-expect-error private method + pubsub['peers'].set(peer.id, peer) peersSubscribed = pubsub.getSubscribers(topic) @@ -456,28 +467,28 @@ describe('pubsub base implementation', () => { }) describe('verification', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub const data = uint8ArrayFromString('bar') beforeEach(async () => { const privateKey = await generateKeyPair('Ed25519') const peerId = peerIdFromPrivateKey(privateKey) - pubsub = new PubsubImplementation({ + pubsub = floodsub()({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar: stubInterface(), logger: defaultLogger() - }, { - multicodecs: [protocol] }) - await pubsub.start() + await start(pubsub) }) - afterEach(async () => { await pubsub.stop() }) + afterEach(async () => { await stop(pubsub) }) it('should drop unsigned messages', async () => { + // @ts-expect-error private method const publishSpy = sinon.spy(pubsub, 'publishMessage') + // @ts-expect-error private method sinon.spy(pubsub, 'validate') const peerStream = new PeerStreams({ @@ -498,7 +509,8 @@ describe('pubsub base implementation', () => { pubsub.subscribe(topic) - await pubsub.processRpc(peerStream.id, peerStream, rpc) + // @ts-expect-error private method + await pubsub['processRpc'](peerStream.id, peerStream, rpc) // message should not be delivered await delay(1000) @@ -509,8 +521,10 @@ describe('pubsub base implementation', () => { it('should not drop unsigned messages if strict signing is disabled', async () => { pubsub.globalSignaturePolicy = 'StrictNoSign' + // @ts-expect-error private method const publishSpy = sinon.spy(pubsub, 'publishMessage') - sinon.spy(pubsub, 'validate') + // @ts-expect-error private method + const validateSpy = sinon.spy(pubsub, 'validate') const peerStream = new PeerStreams({ logger: defaultLogger() @@ -530,7 +544,7 @@ describe('pubsub base implementation', () => { pubsub.subscribe(topic) - const deferred = pDefer() + const deferred = Promise.withResolvers() pubsub.addEventListener('message', (evt) => { if (evt.detail.topic === topic) { @@ -538,12 +552,13 @@ describe('pubsub base implementation', () => { } }) - await pubsub.processRpc(peerStream.id, peerStream, rpc) + // @ts-expect-error private method + await pubsub['processRpc'](peerStream.id, peerStream, rpc) // await message delivery await deferred.promise - expect(pubsub.validate).to.have.property('callCount', 1) + expect(validateSpy).to.have.property('callCount', 1) expect(publishSpy).to.have.property('callCount', 1) }) }) diff --git a/packages/pubsub/test/sign.spec.ts b/packages/floodsub/test/sign.spec.ts similarity index 86% rename from packages/pubsub/test/sign.spec.ts rename to packages/floodsub/test/sign.spec.ts index c29b634233..c79606d732 100644 --- a/packages/pubsub/test/sign.spec.ts +++ b/packages/floodsub/test/sign.spec.ts @@ -3,14 +3,16 @@ import { peerIdFromPrivateKey } from '@libp2p/peer-id' import { expect } from 'aegir/chai' import { concat as uint8ArrayConcat } from 'uint8arrays/concat' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { RPC } from '../src/message/rpc.js' import { signMessage, SignPrefix, verifySignature } from '../src/sign.js' import { randomSeqno, toRpcMessage } from '../src/utils.js' -import { RPC } from './message/rpc.js' -import type { PeerId, PrivateKey, PubSubRPCMessage } from '@libp2p/interface' +import type { PubSubRPCMessage } from '../src/floodsub.js' +import type { Message } from '../src/index.ts' +import type { PeerId, PrivateKey } from '@libp2p/interface' function encodeMessage (message: PubSubRPCMessage): Uint8Array { return RPC.Message.encode(message) @@ -26,7 +28,8 @@ describe('message signing', () => { }) it('should be able to sign and verify a message', async () => { - const message = { + // @ts-expect-error incomplete implementation + const message: Message = { type: 'signed', from: peerId, data: uint8ArrayFromString('hello'), @@ -34,11 +37,11 @@ describe('message signing', () => { topic: 'test-topic' } - // @ts-expect-error missing fields const bytesToSign = uint8ArrayConcat([SignPrefix, RPC.Message.encode(toRpcMessage(message)).subarray()]) const expectedSignature = await privateKey.sign(bytesToSign) + // @ts-expect-error missing fields const signedMessage = await signMessage(privateKey, message, encodeMessage) // Check the signature and public key @@ -57,7 +60,8 @@ describe('message signing', () => { const secPrivateKey = await generateKeyPair('secp256k1') const secPeerId = peerIdFromPrivateKey(secPrivateKey) - const message = { + // @ts-expect-error incomplete implementation + const message: Message = { type: 'signed', from: secPeerId, data: uint8ArrayFromString('hello'), @@ -65,14 +69,14 @@ describe('message signing', () => { topic: 'test-topic' } - // @ts-expect-error missing fields const bytesToSign = uint8ArrayConcat([SignPrefix, RPC.Message.encode(toRpcMessage(message)).subarray()]) const expectedSignature = await secPrivateKey.sign(bytesToSign) + // @ts-expect-error required field const signedMessage = await signMessage(secPrivateKey, message, encodeMessage) // Check the signature and public key expect(signedMessage.signature).to.eql(expectedSignature) - // @ts-expect-error field is required + // @ts-expect-error required field signedMessage.key = undefined // Verify the signature @@ -84,7 +88,8 @@ describe('message signing', () => { }) it('should be able to extract the public key from the message', async () => { - const message = { + // @ts-expect-error incomplete implementation + const message: Message = { type: 'signed', from: peerId, data: uint8ArrayFromString('hello'), @@ -92,9 +97,9 @@ describe('message signing', () => { topic: 'test-topic' } - // @ts-expect-error missing fields const bytesToSign = uint8ArrayConcat([SignPrefix, RPC.Message.encode(toRpcMessage(message)).subarray()]) const expectedSignature = await privateKey.sign(bytesToSign) + // @ts-expect-error missing fields const signedMessage = await signMessage(privateKey, message, encodeMessage) // Check the signature and public key diff --git a/packages/pubsub/test/topic-validators.spec.ts b/packages/floodsub/test/topic-validators.spec.ts similarity index 79% rename from packages/pubsub/test/topic-validators.spec.ts rename to packages/floodsub/test/topic-validators.spec.ts index c224b126f2..5040e5e31f 100644 --- a/packages/pubsub/test/topic-validators.spec.ts +++ b/packages/floodsub/test/topic-validators.spec.ts @@ -1,23 +1,22 @@ import { generateKeyPair } from '@libp2p/crypto/keys' -import { TopicValidatorResult } from '@libp2p/interface' +import { start } from '@libp2p/interface' import { defaultLogger } from '@libp2p/logger' import { peerIdFromPrivateKey } from '@libp2p/peer-id' import { expect } from 'aegir/chai' import pWaitFor from 'p-wait-for' import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' import { equals as uint8ArrayEquals } from 'uint8arrays/equals' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { floodsub, TopicValidatorResult } from '../src/index.js' import { PeerStreams } from '../src/peer-streams.js' -import { - MockRegistrar, - PubsubImplementation -} from './utils/index.js' -import type { PubSubRPC, PeerId } from '@libp2p/interface' - -const protocol = '/pubsub/1.0.0' +import type { PubSubRPC } from '../src/floodsub.js' +import type { FloodSub } from '../src/index.js' +import type { PeerId } from '@libp2p/interface' +import type { Registrar } from '@libp2p/interface-internal' describe('topic validators', () => { - let pubsub: PubsubImplementation + let pubsub: FloodSub let otherPeerId: PeerId beforeEach(async () => { @@ -25,17 +24,16 @@ describe('topic validators', () => { const peerId = peerIdFromPrivateKey(privateKey) otherPeerId = peerIdFromPrivateKey(await generateKeyPair('Ed25519')) - pubsub = new PubsubImplementation({ + pubsub = floodsub({ + globalSignaturePolicy: 'StrictNoSign' + })({ peerId, privateKey, - registrar: new MockRegistrar(), + registrar: stubInterface(), logger: defaultLogger() - }, { - multicodecs: [protocol], - globalSignaturePolicy: 'StrictNoSign' }) - await pubsub.start() + await start(pubsub) }) afterEach(() => { @@ -44,7 +42,8 @@ describe('topic validators', () => { it('should filter messages by topic validator', async () => { // use publishMessage.callCount() to see if a message is valid or not - sinon.spy(pubsub, 'publishMessage') + // @ts-expect-error private method + const publishMessageSpy = sinon.spy(pubsub, 'publishMessage') // @ts-expect-error not all fields are implemented in return value sinon.stub(pubsub.peers, 'get').returns({}) const filteredTopic = 't' @@ -72,6 +71,7 @@ describe('topic validators', () => { // process valid message pubsub.subscribe(filteredTopic) + // @ts-expect-error private method void pubsub.processRpc(peer.id, peer, validRpc) // @ts-expect-error .callCount is a property added by sinon @@ -86,6 +86,7 @@ describe('topic validators', () => { }] } + // @ts-expect-error private method void pubsub.processRpc(peer.id, peer, invalidRpc) // @ts-expect-error .callCount is a property added by sinon @@ -105,10 +106,10 @@ describe('topic validators', () => { } // process previously invalid message, now is valid + // @ts-expect-error private method void pubsub.processRpc(peer.id, peer, invalidRpc2) pubsub.unsubscribe(filteredTopic) - // @ts-expect-error .callCount is a property added by sinon - await pWaitFor(() => pubsub.publishMessage.callCount === 2) + await pWaitFor(() => publishMessageSpy.callCount === 2) }) }) diff --git a/packages/pubsub/test/utils.spec.ts b/packages/floodsub/test/utils.spec.ts similarity index 97% rename from packages/pubsub/test/utils.spec.ts rename to packages/floodsub/test/utils.spec.ts index 1cea0f4fbf..b2a7f6d014 100644 --- a/packages/pubsub/test/utils.spec.ts +++ b/packages/floodsub/test/utils.spec.ts @@ -2,7 +2,8 @@ import { generateKeyPair, publicKeyToProtobuf } from '@libp2p/crypto/keys' import { peerIdFromPrivateKey, peerIdFromString } from '@libp2p/peer-id' import { expect } from 'aegir/chai' import * as utils from '../src/utils.js' -import type { Message, PubSubRPCMessage } from '@libp2p/interface' +import type { PubSubRPCMessage } from '../src/floodsub.js' +import type { Message } from '../src/index.js' describe('utils', () => { it('randomSeqno', () => { diff --git a/packages/pubsub-floodsub/tsconfig.json b/packages/floodsub/tsconfig.json similarity index 91% rename from packages/pubsub-floodsub/tsconfig.json rename to packages/floodsub/tsconfig.json index d1cb65cfe1..ed3f23741e 100644 --- a/packages/pubsub-floodsub/tsconfig.json +++ b/packages/floodsub/tsconfig.json @@ -25,9 +25,6 @@ }, { "path": "../peer-id" - }, - { - "path": "../pubsub" } ] } diff --git a/packages/pubsub-floodsub/typedoc.json b/packages/floodsub/typedoc.json similarity index 100% rename from packages/pubsub-floodsub/typedoc.json rename to packages/floodsub/typedoc.json diff --git a/packages/gossipsub/.aegir.js b/packages/gossipsub/.aegir.js new file mode 100644 index 0000000000..b3d7d69e84 --- /dev/null +++ b/packages/gossipsub/.aegir.js @@ -0,0 +1,6 @@ +/** @type {import('aegir').PartialOptions} */ +export default { + build: { + bundlesizeMax: '85KB' + } +} diff --git a/packages/gossipsub/README.md b/packages/gossipsub/README.md new file mode 100644 index 0000000000..6fdff2ec7b --- /dev/null +++ b/packages/gossipsub/README.md @@ -0,0 +1,85 @@ +# js-libp2p-gossipsub + +[![](https://img.shields.io/badge/made%20by-ChainSafe-blue.svg?style=flat-square)](https://chainsafe.io/) +[![Travis CI](https://flat.badgen.net/travis/ChainSafe/gossipsub-js)](https://travis-ci.com/ChainSafe/gossipsub-js) +![ES Version](https://img.shields.io/badge/ES-2017-yellow) +![Node Version](https://img.shields.io/badge/node-10.x-green) + +## Table of Contents + +- [js-libp2p-gossipsub](#js-libp2p-gossipsub) + - [Lead Maintainer](#lead-maintainer) + - [Table of Contents](#table-of-contents) + - [Specs](#specs) + - [Install](#install) + - [Usage](#usage) + - [API](#api) + - [Create a gossipsub implementation](#create-a-gossipsub-implementation) + - [Contribute](#contribute) + - [License](#license) + +## Specs + +Gossipsub is an implementation of pubsub based on meshsub and floodsub. You can read the specification [here](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub). + +`libp2p-gossipsub` currently implements the [`v1.1`](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) of the spec. + +## Install + +`npm install @chainsafe/libp2p-gossipsub` + +## Usage + +```javascript +import { gossipsub } from '@chainsafe/libp2p-gossipsub' + + +const libp2p = await createLibp2p({ + // ... + services: { + pubsub: gossipsub(options) + } +}); + +libp2p.services.pubsub.addEventListener('message', (message) => { + console.log(`${message.detail.topic}:`, new TextDecoder().decode(message.detail.data)) +}) + +libp2p.services.pubsub.subscribe('fruit') + +libp2p.services.pubsub.publish('fruit', new TextEncoder().encode('banana')) +``` + +## API + +### Create a gossipsub implementation + +```js +const options = {…} +const gossipsub = gossipsub(options)(libp2p) +``` + +Options is an optional object with the following key-value pairs: + +- **`emitSelf`**: boolean identifying whether the node should emit to self on publish, in the event of the topic being subscribed (defaults to **false**). +- **`gossipIncoming`**: boolean identifying if incoming messages on a subscribed topic should be automatically gossiped (defaults to **true**). +- **`fallbackToFloodsub`**: boolean identifying whether the node should fallback to the floodsub protocol, if another connecting peer does not support gossipsub (defaults to **true**). +- **`floodPublish`**: boolean identifying if self-published messages should be sent to all peers, (defaults to **true**). +- **`doPX`**: boolean identifying whether PX is enabled; this should be enabled in bootstrappers and other well connected/trusted nodes (defaults to **false**). +- **`msgIdFn`**: a function with signature `(message) => string` defining the message id given a message, used internally to deduplicate gossip (defaults to `(message) => message.from + message.seqno.toString('hex')`) +- **`signMessages`**: boolean identifying if we want to sign outgoing messages or not (default: `true`) +- **`strictSigning`**: boolean identifying if message signing is required for incoming messages or not (default: `true`) +- **`messageCache`**: optional, a customized `MessageCache` instance, see the implementation for the interface. +- **`scoreParams`**: optional, a customized peer score parameters Object. +- **`scoreThresholds`**: optional, a customized peer score thresholds Object. +- **`directPeers`**: optional, an array of `AddrInfo` of peers with which we will maintain direct connections. + +For the remaining API, see [@libp2p/interface-pubsub](https://github.com/libp2p/js-libp2p-interfaces/tree/master/packages/interface-pubsub). + +## Contribute + +This module is actively under development. Please check out the issues and submit PRs! + +## License + +MIT © ChainSafe Systems diff --git a/packages/gossipsub/package.json b/packages/gossipsub/package.json new file mode 100644 index 0000000000..5b8dd8bf56 --- /dev/null +++ b/packages/gossipsub/package.json @@ -0,0 +1,142 @@ +{ + "name": "@libp2p/gossipsub", + "version": "14.1.1", + "description": "A typescript implementation of gossipsub", + "files": [ + "src", + "dist", + "!dist/test", + "!**/*.tsbuildinfo" + ], + "type": "module", + "types": "dist/src/index.d.ts", + "exports": { + ".": { + "types": "./dist/src/index.d.ts", + "import": "./dist/src/index.js" + }, + "./metrics": { + "types": "./dist/src/metrics.d.ts", + "import": "./dist/src/metrics.js" + }, + "./message": { + "types": "./dist/src/message/index.d.ts", + "import": "./dist/src/message/index.js" + }, + "./score": { + "types": "./dist/src/score/index.d.ts", + "import": "./dist/src/score/index.js" + }, + "./types": { + "types": "./dist/src/types.d.ts", + "import": "./dist/src/types.js" + } + }, + "typesVersions": { + "*": { + "*": [ + "*", + "dist/src/*", + "dist/src/*/index" + ] + } + }, + "scripts": { + "lint": "aegir lint", + "release": "aegir release --no-types", + "build": "aegir build", + "generate": "protons ./src/message/rpc.proto", + "pretest": "npm run build", + "pretest:e2e": "npm run build", + "benchmark": "yarn benchmark:files 'test/benchmark/**/*.test.ts'", + "benchmark:files": "NODE_OPTIONS='--max-old-space-size=4096 --loader=ts-node/esm' benchmark --config .benchrc.yaml --defaultBranch master", + "test": "aegir test -f './dist/test/*.spec.js'", + "test:unit": "aegir test -f './dist/test/unit/*.test.js' --target node", + "test:e2e": "aegir test -f './dist/test/e2e/*.spec.js'", + "test:browser": "npm run test -- --target browser" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/ChainSafe/js-libp2p-gossipsub.git" + }, + "keywords": [ + "libp2p", + "pubsub", + "gossip" + ], + "author": "Cayman Nava", + "license": "Apache-2.0", + "bugs": { + "url": "https://github.com/ChainSafe/js-libp2p-gossipsub/issues" + }, + "homepage": "https://github.com/ChainSafe/js-libp2p-gossipsub#readme", + "dependencies": { + "@libp2p/crypto": "^5.0.0", + "@libp2p/interface": "^2.0.0", + "@libp2p/interface-internal": "^2.0.0", + "@libp2p/peer-id": "^5.0.0", + "@libp2p/pubsub": "^10.0.0", + "@multiformats/multiaddr": "^13.0.1", + "denque": "^2.1.0", + "it-length-prefixed": "^10.0.1", + "it-pipe": "^3.0.1", + "it-pushable": "^3.2.3", + "multiformats": "^13.0.1", + "protons-runtime": "^5.5.0", + "uint8arraylist": "^2.4.8", + "uint8arrays": "^5.0.1" + }, + "devDependencies": { + "@chainsafe/as-sha256": "^1.2.0", + "@dapplion/benchmark": "^1.0.0", + "@libp2p/floodsub": "^10.0.0", + "@libp2p/interface-compliance-tests": "^6.0.0", + "@libp2p/logger": "^5.0.0", + "@libp2p/peer-store": "^11.0.0", + "@types/node": "^22.18.1", + "@types/sinon": "^17.0.3", + "abortable-iterator": "^5.1.0", + "aegir": "^47.0.21", + "datastore-core": "^10.0.0", + "delay": "^6.0.0", + "it-all": "^3.0.6", + "mkdirp": "^3.0.1", + "p-defer": "^4.0.0", + "p-event": "^6.0.0", + "p-retry": "^7.0.0", + "p-wait-for": "^5.0.2", + "protons": "^7.5.0", + "sinon": "^21.0.0", + "sinon-ts": "^2.0.0", + "time-cache": "^0.3.0" + }, + "engines": { + "npm": ">=8.7.0" + }, + "eslintConfig": { + "extends": "ipfs", + "ignorePatterns": [ + "src/message/rpc*", + "!.aegir.js" + ] + }, + "contributors": [ + "Cayman ", + "Vasco Santos ", + "Mikerah ", + "Tuyen Nguyen ", + "Alex Potsides ", + "Marin Petrunić ", + "Lion - dapplion <35266934+dapplion@users.noreply.github.com>", + "Gregory Markou <16929357+GregTheGreek@users.noreply.github.com>", + "Alan Shaw ", + "Tuyen ", + "Jacob Heun ", + "Patrick Michot ", + "chainsafe ", + "Hugo Dias ", + "Franck Royer ", + "ChainSafe " + ], + "sideEffects": false +} diff --git a/packages/gossipsub/src/config.ts b/packages/gossipsub/src/config.ts new file mode 100644 index 0000000000..4fe6c112b8 --- /dev/null +++ b/packages/gossipsub/src/config.ts @@ -0,0 +1,31 @@ +export interface GossipsubOptsSpec { + /** D sets the optimal degree for a Gossipsub topic mesh. */ + D: number + /** Dlo sets the lower bound on the number of peers we keep in a Gossipsub topic mesh. */ + Dlo: number + /** Dhi sets the upper bound on the number of peers we keep in a Gossipsub topic mesh. */ + Dhi: number + /** Dscore affects how peers are selected when pruning a mesh due to over subscription. */ + Dscore: number + /** Dout sets the quota for the number of outbound connections to maintain in a topic mesh. */ + Dout: number + /** + * Dlazy affects the minimum number of peers we will emit gossip to at each + * heartbeat. + */ + Dlazy: number + /** heartbeatInterval is the time between heartbeats in milliseconds */ + heartbeatInterval: number + /** + * fanoutTTL controls how long we keep track of the fanout state. If it's been + * fanoutTTL milliseconds since we've published to a topic that we're not subscribed to, + * we'll delete the fanout map for that topic. + */ + fanoutTTL: number + /** mcacheLength is the number of windows to retain full messages for IWANT responses */ + mcacheLength: number + /** mcacheGossip is the number of windows to gossip about */ + mcacheGossip: number + /** seenTTL is the number of milliseconds to retain message IDs in the seen cache */ + seenTTL: number +} diff --git a/packages/gossipsub/src/constants.ts b/packages/gossipsub/src/constants.ts new file mode 100644 index 0000000000..dc5ef80124 --- /dev/null +++ b/packages/gossipsub/src/constants.ts @@ -0,0 +1,261 @@ +export const second = 1000 +export const minute = 60 * second + +// Protocol identifiers + +export const FloodsubID = '/floodsub/1.0.0' + +/** + * The protocol ID for version 1.0.0 of the Gossipsub protocol + * It is advertised along with GossipsubIDv11 for backwards compatability + */ +export const GossipsubIDv10 = '/meshsub/1.0.0' + +/** + * The protocol ID for version 1.1.0 of the Gossipsub protocol + * See the spec for details about how v1.1.0 compares to v1.0.0: + * https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md + */ +export const GossipsubIDv11 = '/meshsub/1.1.0' + +/** + * The protocol ID for version 1.2.0 of the Gossipsub protocol + * See the spec for details about how v1.2.0 compares to v1.1.0: + * https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md + */ +export const GossipsubIDv12 = '/meshsub/1.2.0' + +// Overlay parameters + +/** + * GossipsubD sets the optimal degree for a Gossipsub topic mesh. For example, if GossipsubD == 6, + * each peer will want to have about six peers in their mesh for each topic they're subscribed to. + * GossipsubD should be set somewhere between GossipsubDlo and GossipsubDhi. + */ +export const GossipsubD = 6 + +/** + * GossipsubDlo sets the lower bound on the number of peers we keep in a Gossipsub topic mesh. + * If we have fewer than GossipsubDlo peers, we will attempt to graft some more into the mesh at + * the next heartbeat. + */ +export const GossipsubDlo = 4 + +/** + * GossipsubDhi sets the upper bound on the number of peers we keep in a Gossipsub topic mesh. + * If we have more than GossipsubDhi peers, we will select some to prune from the mesh at the next heartbeat. + */ +export const GossipsubDhi = 12 + +/** + * GossipsubDscore affects how peers are selected when pruning a mesh due to over subscription. + * At least GossipsubDscore of the retained peers will be high-scoring, while the remainder are + * chosen randomly. + */ +export const GossipsubDscore = 4 + +/** + * GossipsubDout sets the quota for the number of outbound connections to maintain in a topic mesh. + * When the mesh is pruned due to over subscription, we make sure that we have outbound connections + * to at least GossipsubDout of the survivor peers. This prevents sybil attackers from overwhelming + * our mesh with incoming connections. + * + * GossipsubDout must be set below GossipsubDlo, and must not exceed GossipsubD / 2. + */ +export const GossipsubDout = 2 + +// Gossip parameters + +/** + * GossipsubHistoryLength controls the size of the message cache used for gossip. + * The message cache will remember messages for GossipsubHistoryLength heartbeats. + */ +export const GossipsubHistoryLength = 5 + +/** + * GossipsubHistoryGossip controls how many cached message ids we will advertise in + * IHAVE gossip messages. When asked for our seen message IDs, we will return + * only those from the most recent GossipsubHistoryGossip heartbeats. The slack between + * GossipsubHistoryGossip and GossipsubHistoryLength allows us to avoid advertising messages + * that will be expired by the time they're requested. + * + * GossipsubHistoryGossip must be less than or equal to GossipsubHistoryLength to + * avoid a runtime panic. + */ +export const GossipsubHistoryGossip = 3 + +/** + * GossipsubDlazy affects how many peers we will emit gossip to at each heartbeat. + * We will send gossip to at least GossipsubDlazy peers outside our mesh. The actual + * number may be more, depending on GossipsubGossipFactor and how many peers we're + * connected to. + */ +export const GossipsubDlazy = 6 + +/** + * GossipsubGossipFactor affects how many peers we will emit gossip to at each heartbeat. + * We will send gossip to GossipsubGossipFactor * (total number of non-mesh peers), or + * GossipsubDlazy, whichever is greater. + */ +export const GossipsubGossipFactor = 0.25 + +/** + * GossipsubGossipRetransmission controls how many times we will allow a peer to request + * the same message id through IWANT gossip before we start ignoring them. This is designed + * to prevent peers from spamming us with requests and wasting our resources. + */ +export const GossipsubGossipRetransmission = 3 + +// Heartbeat interval + +/** + * GossipsubHeartbeatInitialDelay is the short delay before the heartbeat timer begins + * after the router is initialized. + */ +export const GossipsubHeartbeatInitialDelay = 100 + +/** + * GossipsubHeartbeatInterval controls the time between heartbeats. + */ +export const GossipsubHeartbeatInterval = second + +/** + * GossipsubFanoutTTL controls how long we keep track of the fanout state. If it's been + * GossipsubFanoutTTL since we've published to a topic that we're not subscribed to, + * we'll delete the fanout map for that topic. + */ +export const GossipsubFanoutTTL = minute + +/** + * GossipsubPrunePeers controls the number of peers to include in prune Peer eXchange. + * When we prune a peer that's eligible for PX (has a good score, etc), we will try to + * send them signed peer records for up to GossipsubPrunePeers other peers that we + * know of. + */ +export const GossipsubPrunePeers = 16 + +/** + * GossipsubPruneBackoff controls the backoff time for pruned peers. This is how long + * a peer must wait before attempting to graft into our mesh again after being pruned. + * When pruning a peer, we send them our value of GossipsubPruneBackoff so they know + * the minimum time to wait. Peers running older versions may not send a backoff time, + * so if we receive a prune message without one, we will wait at least GossipsubPruneBackoff + * before attempting to re-graft. + */ +export const GossipsubPruneBackoff = minute + +/** + * Backoff to use when unsuscribing from a topic. Should not resubscribe to this topic before it expired. + */ +export const GossipsubUnsubscribeBackoff = 10 * second + +/** + * GossipsubPruneBackoffTicks is the number of heartbeat ticks for attempting to prune expired + * backoff timers. + */ +export const GossipsubPruneBackoffTicks = 15 + +/** + * GossipsubConnectors controls the number of active connection attempts for peers obtained through PX. + */ +export const GossipsubConnectors = 8 + +/** + * GossipsubMaxPendingConnections sets the maximum number of pending connections for peers attempted through px. + */ +export const GossipsubMaxPendingConnections = 128 + +/** + * GossipsubConnectionTimeout controls the timeout for connection attempts. + */ +export const GossipsubConnectionTimeout = 30 * second + +/** + * GossipsubDirectConnectTicks is the number of heartbeat ticks for attempting to reconnect direct peers + * that are not currently connected. + */ +export const GossipsubDirectConnectTicks = 300 + +/** + * GossipsubDirectConnectInitialDelay is the initial delay before opening connections to direct peers + */ +export const GossipsubDirectConnectInitialDelay = second + +/** + * GossipsubOpportunisticGraftTicks is the number of heartbeat ticks for attempting to improve the mesh + * with opportunistic grafting. Every GossipsubOpportunisticGraftTicks we will attempt to select some + * high-scoring mesh peers to replace lower-scoring ones, if the median score of our mesh peers falls + * below a threshold + */ +export const GossipsubOpportunisticGraftTicks = 60 + +/** + * GossipsubOpportunisticGraftPeers is the number of peers to opportunistically graft. + */ +export const GossipsubOpportunisticGraftPeers = 2 + +/** + * If a GRAFT comes before GossipsubGraftFloodThreshold has elapsed since the last PRUNE, + * then there is an extra score penalty applied to the peer through P7. + */ +export const GossipsubGraftFloodThreshold = 10 * second + +/** + * GossipsubMaxIHaveLength is the maximum number of messages to include in an IHAVE message. + * Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a + * peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the + * default if your system is pushing more than 5000 messages in GossipsubHistoryGossip heartbeats; + * with the defaults this is 1666 messages/s. + */ +export const GossipsubMaxIHaveLength = 5000 + +/** + * GossipsubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. + */ +export const GossipsubMaxIHaveMessages = 10 + +/** + * Time to wait for a message requested through IWANT following an IHAVE advertisement. + * If the message is not received within this window, a broken promise is declared and + * the router may apply bahavioural penalties. + */ +export const GossipsubIWantFollowupTime = 3 * second + +/** + * Time in milliseconds to keep message ids in the seen cache + */ +export const GossipsubSeenTTL = 2 * minute + +export const TimeCacheDuration = 120 * 1000 + +export const ERR_TOPIC_VALIDATOR_REJECT = 'ERR_TOPIC_VALIDATOR_REJECT' +export const ERR_TOPIC_VALIDATOR_IGNORE = 'ERR_TOPIC_VALIDATOR_IGNORE' + +/** + * If peer score is better than this, we accept messages from this peer + * within ACCEPT_FROM_WHITELIST_DURATION_MS from the last time computing score. + */ +export const ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE = 0 + +/** + * If peer score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE, accept up to this + * number of messages from that peer. + */ +export const ACCEPT_FROM_WHITELIST_MAX_MESSAGES = 128 + +/** + * If peer score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE, accept messages from + * this peer up to this time duration. + */ +export const ACCEPT_FROM_WHITELIST_DURATION_MS = 1000 + +/** + * The default MeshMessageDeliveriesWindow to be used in metrics. + */ +export const DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS = 1000 + +/** Wait for 1 more heartbeats before clearing a backoff */ +export const BACKOFF_SLACK = 1 + +export const GossipsubIdontwantMinDataSize = 512 +export const GossipsubIdontwantMaxMessages = 512 diff --git a/packages/gossipsub/src/errors.ts b/packages/gossipsub/src/errors.ts new file mode 100644 index 0000000000..19723cf29a --- /dev/null +++ b/packages/gossipsub/src/errors.ts @@ -0,0 +1,17 @@ +export class InvalidPeerScoreParamsError extends Error { + static name = 'InvalidPeerScoreParamsError' + + constructor (message = 'Invalid peer score params') { + super(message) + this.name = 'InvalidPeerScoreParamsError' + } +} + +export class InvalidPeerScoreThresholdsError extends Error { + static name = 'InvalidPeerScoreThresholdsError' + + constructor (message = 'Invalid peer score thresholds') { + super(message) + this.name = 'InvalidPeerScoreThresholdsError' + } +} diff --git a/packages/gossipsub/src/gossipsub.ts b/packages/gossipsub/src/gossipsub.ts new file mode 100644 index 0000000000..69f5b55ce0 --- /dev/null +++ b/packages/gossipsub/src/gossipsub.ts @@ -0,0 +1,3061 @@ +import { TypedEventEmitter, serviceCapabilities, serviceDependencies } from '@libp2p/interface' +import { peerIdFromMultihash, peerIdFromString } from '@libp2p/peer-id' +import { encode } from 'it-length-prefixed' +import { pipe } from 'it-pipe' +import { pushable } from 'it-pushable' +import * as Digest from 'multiformats/hashes/digest' +import * as constants from './constants.js' +import { + ACCEPT_FROM_WHITELIST_DURATION_MS, + ACCEPT_FROM_WHITELIST_MAX_MESSAGES, + ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE, + BACKOFF_SLACK +} from './constants.js' +import { StrictNoSign, StrictSign, TopicValidatorResult } from './index.ts' +import { defaultDecodeRpcLimits } from './message/decodeRpc.js' +import { RPC } from './message/rpc.js' +import { MessageCache } from './message-cache.js' +import { + ChurnReason, + getMetrics, + IHaveIgnoreReason, + InclusionReason, + + ScorePenalty + +} from './metrics.js' +import { + PeerScore, + + createPeerScoreParams, + createPeerScoreThresholds + +} from './score/index.js' +import { computeAllPeersScoreWeights } from './score/scoreMetrics.js' +import { InboundStream, OutboundStream } from './stream.js' +import { IWantTracer } from './tracer.js' +import { + + ValidateError, + + MessageStatus, + RejectReason, + + rejectReasonFromAcceptance + +} from './types.js' +import { buildRawMessage, validateToRawMessage } from './utils/buildRawMessage.js' +import { createGossipRpc, ensureControl } from './utils/create-gossip-rpc.js' +import { shuffle, messageIdToString } from './utils/index.js' +import { msgIdFnStrictNoSign, msgIdFnStrictSign } from './utils/msgIdFn.js' +import { multiaddrToIPStr } from './utils/multiaddr.js' +import { getPublishConfigFromPeerId } from './utils/publishConfig.js' +import { removeFirstNItemsFromSet, removeItemsFromSet } from './utils/set.js' +import { SimpleTimeCache } from './utils/time-cache.js' +import type { GossipSubComponents, GossipSubEvents, GossipsubMessage, GossipsubOpts, MeshPeer, Message, PublishResult, SubscriptionChangeData, TopicValidatorFn } from './index.ts' +import type { DecodeRPCLimits } from './message/decodeRpc.js' +import type { MessageCacheRecord } from './message-cache.js' +import type { Metrics, ToSendGroupCount } from './metrics.js' +import type { PeerScoreParams, PeerScoreThresholds, PeerScoreStatsDump } from './score/index.js' +import type { MsgIdFn, PublishConfig, TopicStr, MsgIdStr, PeerIdStr, RejectReasonObj, FastMsgIdFn, DataTransform, MsgIdToStrFn, MessageId, PublishOpts } from './types.js' +import type { + Connection, Stream, PeerId, Peer, + Logger, + Topology, + TypedEventTarget, + MessageStreamDirection +} from '@libp2p/interface' +import type { Multiaddr } from '@multiformats/multiaddr' +import type { Uint8ArrayList } from 'uint8arraylist' + +enum GossipStatusCode { + started, + stopped +} + +type GossipStatus = + | { + code: GossipStatusCode.started + registrarTopologyIds: string[] + heartbeatTimeout: ReturnType + hearbeatStartMs: number + } + | { + code: GossipStatusCode.stopped + } + +interface GossipOptions extends GossipsubOpts { + scoreParams: PeerScoreParams + scoreThresholds: PeerScoreThresholds +} + +interface AcceptFromWhitelistEntry { + /** number of messages accepted since recomputing the peer's score */ + messagesAccepted: number + /** have to recompute score after this time */ + acceptUntil: number +} + +type ReceivedMessageResult = + | { code: MessageStatus.duplicate, msgIdStr: MsgIdStr } + | ({ code: MessageStatus.invalid, msgIdStr?: MsgIdStr } & RejectReasonObj) + | { code: MessageStatus.valid, messageId: MessageId, msg: Message } + +export class GossipSub extends TypedEventEmitter implements TypedEventTarget { + /** + * The signature policy to follow by default + */ + public readonly globalSignaturePolicy: typeof StrictSign | typeof StrictNoSign + public protocols: string[] = [constants.GossipsubIDv12, constants.GossipsubIDv11, constants.GossipsubIDv10] + + private publishConfig: PublishConfig | undefined + + private readonly dataTransform: DataTransform | undefined + + // State + + public readonly peers = new Map() + public readonly streamsInbound = new Map() + public readonly streamsOutbound = new Map() + + /** Ensures outbound streams are created sequentially */ + private outboundInflightQueue = pushable<{ peerId: PeerId, connection: Connection }>({ objectMode: true }) + + /** Direct peers */ + public readonly direct = new Set() + + /** Floodsub peers */ + private readonly floodsubPeers = new Set() + + /** Cache of seen messages */ + private readonly seenCache: SimpleTimeCache + + /** + * Map of peer id and AcceptRequestWhileListEntry + */ + private readonly acceptFromWhitelist = new Map() + + /** + * Map of topics to which peers are subscribed to + */ + private readonly topics = new Map>() + + /** + * List of our subscriptions + */ + private readonly subscriptions = new Set() + + /** + * Map of topic meshes + * topic => peer id set + */ + public readonly mesh = new Map>() + + /** + * Map of topics to set of peers. These mesh peers are the ones to which we are publishing without a topic membership + * topic => peer id set + */ + public readonly fanout = new Map>() + + /** + * Map of last publish time for fanout topics + * topic => last publish time + */ + private readonly fanoutLastpub = new Map() + + /** + * Map of pending messages to gossip + * peer id => control messages + */ + public readonly gossip = new Map() + + /** + * Map of control messages + * peer id => control message + */ + public readonly control = new Map() + + /** + * Number of IHAVEs received from peer in the last heartbeat + */ + private readonly peerhave = new Map() + + /** Number of messages we have asked from peer in the last heartbeat */ + private readonly iasked = new Map() + + /** Prune backoff map */ + private readonly backoff = new Map>() + + /** + * Connection direction cache, marks peers with outbound connections + * peer id => direction + */ + private readonly outbound = new Map() + private readonly msgIdFn: MsgIdFn + + /** + * A fast message id function used for internal message de-duplication + */ + private readonly fastMsgIdFn: FastMsgIdFn | undefined + + private readonly msgIdToStrFn: MsgIdToStrFn + + /** Maps fast message-id to canonical message-id */ + private readonly fastMsgIdCache: SimpleTimeCache | undefined + + /** + * Short term cache for published message ids. This is used for penalizing peers sending + * our own messages back if the messages are anonymous or use a random author. + */ + private readonly publishedMessageIds: SimpleTimeCache + + /** + * A message cache that contains the messages for last few heartbeat ticks + */ + private readonly mcache: MessageCache + + /** Peer score tracking */ + public readonly score: PeerScore + + /** + * Custom validator function per topic. + * Must return or resolve quickly (< 100ms) to prevent causing penalties for late messages. + * If you need to apply validation that may require longer times use `asyncValidation` option and callback the + * validation result through `Gossipsub.reportValidationResult` + */ + public readonly topicValidators = new Map() + + /** + * Make this protected so child class may want to redirect to its own log. + */ + protected readonly log: Logger + + /** + * Number of heartbeats since the beginning of time + * This allows us to amortize some resource cleanup -- eg: backoff cleanup + */ + private heartbeatTicks = 0 + + /** + * Tracks IHAVE/IWANT promises broken by peers + */ + readonly gossipTracer: IWantTracer + + /** + * Tracks IDONTWANT messages received by peers in the current heartbeat + */ + private readonly idontwantCounts = new Map() + + /** + * Tracks IDONTWANT messages received by peers and the heartbeat they were received in + * + * idontwants are stored for `mcacheLength` heartbeats before being pruned, + * so this map is bounded by peerCount * idontwantMaxMessages * mcacheLength + */ + private readonly idontwants = new Map>() + + private readonly components: GossipSubComponents + + private directPeerInitial: ReturnType | null = null + + public static multicodec: string = constants.GossipsubIDv12 + + // Options + readonly opts: Required + private readonly decodeRpcLimits: DecodeRPCLimits + + private readonly metrics: Metrics | null + private status: GossipStatus = { code: GossipStatusCode.stopped } + private readonly maxInboundStreams?: number + private readonly maxOutboundStreams?: number + private readonly runOnLimitedConnection?: boolean + private readonly allowedTopics: Set | null + + private heartbeatTimer: { + _intervalId: ReturnType | undefined + runPeriodically(fn: () => void, period: number): void + cancel(): void + } | null = null + + constructor (components: GossipSubComponents, options: Partial = {}) { + super() + + const opts = { + fallbackToFloodsub: true, + floodPublish: true, + batchPublish: false, + tagMeshPeers: true, + doPX: false, + directPeers: [], + D: constants.GossipsubD, + Dlo: constants.GossipsubDlo, + Dhi: constants.GossipsubDhi, + Dscore: constants.GossipsubDscore, + Dout: constants.GossipsubDout, + Dlazy: constants.GossipsubDlazy, + heartbeatInterval: constants.GossipsubHeartbeatInterval, + fanoutTTL: constants.GossipsubFanoutTTL, + mcacheLength: constants.GossipsubHistoryLength, + mcacheGossip: constants.GossipsubHistoryGossip, + seenTTL: constants.GossipsubSeenTTL, + gossipsubIWantFollowupMs: constants.GossipsubIWantFollowupTime, + prunePeers: constants.GossipsubPrunePeers, + pruneBackoff: constants.GossipsubPruneBackoff, + unsubcribeBackoff: constants.GossipsubUnsubscribeBackoff, + graftFloodThreshold: constants.GossipsubGraftFloodThreshold, + opportunisticGraftPeers: constants.GossipsubOpportunisticGraftPeers, + opportunisticGraftTicks: constants.GossipsubOpportunisticGraftTicks, + directConnectTicks: constants.GossipsubDirectConnectTicks, + gossipFactor: constants.GossipsubGossipFactor, + idontwantMinDataSize: constants.GossipsubIdontwantMinDataSize, + idontwantMaxMessages: constants.GossipsubIdontwantMaxMessages, + ...options, + scoreParams: createPeerScoreParams(options.scoreParams), + scoreThresholds: createPeerScoreThresholds(options.scoreThresholds) + } + + this.components = components + this.decodeRpcLimits = opts.decodeRpcLimits ?? defaultDecodeRpcLimits + + this.globalSignaturePolicy = opts.globalSignaturePolicy ?? StrictSign + + // Also wants to get notified of peers connected using floodsub + if (opts.fallbackToFloodsub) { + this.protocols.push(constants.FloodsubID) + } + + // From pubsub + this.log = components.logger.forComponent(opts.debugName ?? 'libp2p:gossipsub') + + // Gossipsub + + this.opts = opts as Required + this.direct = new Set(opts.directPeers.map((p) => p.id.toString())) + this.seenCache = new SimpleTimeCache({ validityMs: opts.seenTTL }) + this.publishedMessageIds = new SimpleTimeCache({ validityMs: opts.seenTTL }) + + if (options.msgIdFn != null) { + // Use custom function + this.msgIdFn = options.msgIdFn + } else { + switch (this.globalSignaturePolicy) { + case StrictSign: + this.msgIdFn = msgIdFnStrictSign + break + case StrictNoSign: + this.msgIdFn = msgIdFnStrictNoSign + break + default: + throw new Error(`Invalid globalSignaturePolicy: ${this.globalSignaturePolicy}`) + } + } + + if (options.fastMsgIdFn != null) { + this.fastMsgIdFn = options.fastMsgIdFn + this.fastMsgIdCache = new SimpleTimeCache({ validityMs: opts.seenTTL }) + } + + // By default, gossipsub only provide a browser friendly function to convert Uint8Array message id to string. + this.msgIdToStrFn = options.msgIdToStrFn ?? messageIdToString + + this.mcache = options.messageCache ?? new MessageCache(opts.mcacheGossip, opts.mcacheLength, this.msgIdToStrFn) + + if (options.dataTransform != null) { + this.dataTransform = options.dataTransform + } + + if (options.metricsRegister != null) { + if (options.metricsTopicStrToLabel == null) { + throw Error('Must set metricsTopicStrToLabel with metrics') + } + + // in theory, each topic has its own meshMessageDeliveriesWindow param + // however in lodestar, we configure it mostly the same so just pick the max of positive ones + // (some topics have meshMessageDeliveriesWindow as 0) + const maxMeshMessageDeliveriesWindowMs = Math.max( + ...Object.values(opts.scoreParams.topics).map((topicParam) => topicParam.meshMessageDeliveriesWindow), + constants.DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS + ) + + const metrics = getMetrics(options.metricsRegister, options.metricsTopicStrToLabel, { + gossipPromiseExpireSec: this.opts.gossipsubIWantFollowupMs / 1000, + behaviourPenaltyThreshold: opts.scoreParams.behaviourPenaltyThreshold, + maxMeshMessageDeliveriesWindowSec: maxMeshMessageDeliveriesWindowMs / 1000 + }) + + metrics.mcacheSize.addCollect(() => { this.onScrapeMetrics(metrics) }) + for (const protocol of this.protocols) { + metrics.protocolsEnabled.set({ protocol }, 1) + } + + this.metrics = metrics + } else { + this.metrics = null + } + + this.gossipTracer = new IWantTracer(this.opts.gossipsubIWantFollowupMs, this.msgIdToStrFn, this.metrics) + + /** + * libp2p + */ + this.score = new PeerScore(this.opts.scoreParams, this.metrics, this.components.logger, { + scoreCacheValidityMs: opts.heartbeatInterval + }) + + this.maxInboundStreams = options.maxInboundStreams + this.maxOutboundStreams = options.maxOutboundStreams + this.runOnLimitedConnection = options.runOnLimitedConnection + + this.allowedTopics = (opts.allowedTopics != null) ? new Set(opts.allowedTopics) : null + } + + readonly [Symbol.toStringTag] = '@chainsafe/libp2p-gossipsub' + + readonly [serviceCapabilities]: string[] = [ + '@libp2p/pubsub' + ] + + readonly [serviceDependencies]: string[] = [ + '@libp2p/identify' + ] + + getPeers (): PeerId[] { + return [...this.peers.values()] + } + + isStarted (): boolean { + return this.status.code === GossipStatusCode.started + } + + // LIFECYCLE METHODS + + /** + * Mounts the gossipsub protocol onto the libp2p node and sends our + * our subscriptions to every peer connected + */ + async start (): Promise { + // From pubsub + if (this.isStarted()) { + return + } + + this.log('starting') + + this.publishConfig = getPublishConfigFromPeerId(this.globalSignaturePolicy, this.components.peerId, this.components.privateKey) + + // Create the outbound inflight queue + // This ensures that outbound stream creation happens sequentially + this.outboundInflightQueue = pushable({ objectMode: true }) + pipe(this.outboundInflightQueue, async (source) => { + for await (const { peerId, connection } of source) { + await this.createOutboundStream(peerId, connection) + } + }).catch((e) => { this.log.error('outbound inflight queue error', e) }) + + // set direct peer addresses in the address book + await Promise.all( + this.opts.directPeers.map(async (p) => { + await this.components.peerStore.merge(p.id, { + multiaddrs: p.addrs + }) + }) + ) + + const registrar = this.components.registrar + // Incoming streams + // Called after a peer dials us + await Promise.all( + this.protocols.map(async (protocol) => + registrar.handle(protocol, this.onIncomingStream.bind(this), { + maxInboundStreams: this.maxInboundStreams, + maxOutboundStreams: this.maxOutboundStreams, + runOnLimitedConnection: this.runOnLimitedConnection + }) + ) + ) + + // # How does Gossipsub interact with libp2p? Rough guide from Mar 2022 + // + // ## Setup: + // Gossipsub requests libp2p to callback, TBD + // + // `this.libp2p.handle()` registers a handler for `/meshsub/1.1.0` and other Gossipsub protocols + // The handler callback is registered in libp2p Upgrader.protocols map. + // + // Upgrader receives an inbound connection from some transport and (`Upgrader.upgradeInbound`): + // - Adds encryption (NOISE in our case) + // - Multiplex stream + // - Create a muxer and register that for each new stream call Upgrader.protocols handler + // + // ## Topology + // - new instance of Topology (unlinked to libp2p) with handlers + // - registar.register(topology) + + // register protocol with topology + // Topology callbacks called on connection manager changes + const topology: Topology = { + onConnect: this.onPeerConnected.bind(this), + onDisconnect: this.onPeerDisconnected.bind(this), + notifyOnLimitedConnection: this.runOnLimitedConnection + } + const registrarTopologyIds = await Promise.all( + this.protocols.map(async (protocol) => registrar.register(protocol, topology)) + ) + + // Schedule to start heartbeat after `GossipsubHeartbeatInitialDelay` + const heartbeatTimeout = setTimeout(this.runHeartbeat, constants.GossipsubHeartbeatInitialDelay) + // Then, run heartbeat every `heartbeatInterval` offset by `GossipsubHeartbeatInitialDelay` + + this.status = { + code: GossipStatusCode.started, + registrarTopologyIds, + heartbeatTimeout, + hearbeatStartMs: Date.now() + constants.GossipsubHeartbeatInitialDelay + } + + this.score.start() + // connect to direct peers + this.directPeerInitial = setTimeout(() => { + Promise.resolve() + .then(async () => { + await Promise.all(Array.from(this.direct).map(async (id) => this.connect(id))) + }) + .catch((err) => { + this.log(err) + }) + }, constants.GossipsubDirectConnectInitialDelay) + + if (this.opts.tagMeshPeers) { + this.addEventListener('gossipsub:graft', this.tagMeshPeer) + this.addEventListener('gossipsub:prune', this.untagMeshPeer) + } + + this.log('started') + } + + /** + * Unmounts the gossipsub protocol and shuts down every connection + */ + async stop (): Promise { + this.log('stopping') + // From pubsub + + if (this.status.code !== GossipStatusCode.started) { + return + } + + const { registrarTopologyIds } = this.status + this.status = { code: GossipStatusCode.stopped } + + if (this.opts.tagMeshPeers) { + this.removeEventListener('gossipsub:graft', this.tagMeshPeer) + this.removeEventListener('gossipsub:prune', this.untagMeshPeer) + } + + // unregister protocol and handlers + const registrar = this.components.registrar + await Promise.all(this.protocols.map(async (protocol) => registrar.unhandle(protocol))) + registrarTopologyIds.forEach((id) => { registrar.unregister(id) }) + + this.outboundInflightQueue.end() + + const closePromises = [] + for (const outboundStream of this.streamsOutbound.values()) { + closePromises.push(outboundStream.close()) + } + this.streamsOutbound.clear() + + for (const inboundStream of this.streamsInbound.values()) { + closePromises.push(inboundStream.close()) + } + this.streamsInbound.clear() + + await Promise.all(closePromises) + + this.peers.clear() + this.subscriptions.clear() + + // Gossipsub + + if (this.heartbeatTimer != null) { + this.heartbeatTimer.cancel() + this.heartbeatTimer = null + } + + this.score.stop() + + this.mesh.clear() + this.fanout.clear() + this.fanoutLastpub.clear() + this.gossip.clear() + this.control.clear() + this.peerhave.clear() + this.iasked.clear() + this.backoff.clear() + this.outbound.clear() + this.gossipTracer.clear() + this.seenCache.clear() + if (this.fastMsgIdCache != null) { this.fastMsgIdCache.clear() } + if (this.directPeerInitial != null) { clearTimeout(this.directPeerInitial) } + this.idontwantCounts.clear() + this.idontwants.clear() + + this.log('stopped') + } + + /** FOR DEBUG ONLY - Dump peer stats for all peers. Data is cloned, safe to mutate */ + dumpPeerScoreStats (): PeerScoreStatsDump { + return this.score.dumpPeerScoreStats() + } + + /** + * On an inbound stream opened + */ + private onIncomingStream (stream: Stream, connection: Connection): void { + if (!this.isStarted()) { + return + } + + const peerId = connection.remotePeer + // add peer to router + this.addPeer(peerId, connection.direction, connection.remoteAddr) + // create inbound stream + this.createInboundStream(peerId, stream) + // attempt to create outbound stream + this.outboundInflightQueue.push({ peerId, connection }) + } + + /** + * Registrar notifies an established connection with pubsub protocol + */ + private onPeerConnected (peerId: PeerId, connection: Connection): void { + this.metrics?.newConnectionCount.inc({ status: connection.status }) + // libp2p may emit a closed connection and never issue peer:disconnect event + // see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/398 + if (!this.isStarted() || connection.status !== 'open') { + return + } + + this.addPeer(peerId, connection.direction, connection.remoteAddr) + this.outboundInflightQueue.push({ peerId, connection }) + } + + /** + * Registrar notifies a closing connection with pubsub protocol + */ + private onPeerDisconnected (peerId: PeerId): void { + this.log('connection ended %p', peerId) + this.removePeer(peerId) + } + + private async createOutboundStream (peerId: PeerId, connection: Connection): Promise { + if (!this.isStarted()) { + return + } + + const id = peerId.toString() + + if (!this.peers.has(id)) { + return + } + + // TODO make this behavior more robust + // This behavior is different than for inbound streams + // If an outbound stream already exists, don't create a new stream + if (this.streamsOutbound.has(id)) { + return + } + + try { + const stream = new OutboundStream( + await connection.newStream(this.protocols, { + runOnLimitedConnection: this.runOnLimitedConnection + }), + (e) => { this.log.error('outbound pipe error', e) }, + { maxBufferSize: this.opts.maxOutboundBufferSize } + ) + + this.log('create outbound stream %p', peerId) + + this.streamsOutbound.set(id, stream) + + const protocol = stream.protocol + if (protocol === constants.FloodsubID) { + this.floodsubPeers.add(id) + } + this.metrics?.peersPerProtocol.inc({ protocol }, 1) + + // Immediately send own subscriptions via the newly attached stream + if (this.subscriptions.size > 0) { + this.log('send subscriptions to', id) + this.sendSubscriptions(id, Array.from(this.subscriptions), true) + } + } catch (e) { + this.log.error('createOutboundStream error', e) + } + } + + private createInboundStream (peerId: PeerId, stream: Stream): void { + if (!this.isStarted()) { + return + } + + const id = peerId.toString() + + if (!this.peers.has(id)) { + return + } + + // TODO make this behavior more robust + // This behavior is different than for outbound streams + // If a peer initiates a new inbound connection + // we assume that one is the new canonical inbound stream + const priorInboundStream = this.streamsInbound.get(id) + if (priorInboundStream !== undefined) { + this.log('replacing existing inbound steam %s', id) + priorInboundStream.close().catch((err) => { this.log.error(err) }) + } + + this.log('create inbound stream %s', id) + + const inboundStream = new InboundStream(stream, { maxDataLength: this.opts.maxInboundDataLength }) + this.streamsInbound.set(id, inboundStream) + + this.pipePeerReadStream(peerId, inboundStream.source).catch((err) => { this.log(err) }) + } + + /** + * Add a peer to the router + */ + private addPeer (peerId: PeerId, direction: MessageStreamDirection, addr: Multiaddr): void { + const id = peerId.toString() + + if (!this.peers.has(id)) { + this.peers.set(id, peerId) + + // Add to peer scoring + this.score.addPeer(id) + const currentIP = multiaddrToIPStr(addr) + if (currentIP !== null) { + this.score.addIP(id, currentIP) + } else { + this.log('Added peer has no IP in current address %s %s', id, addr.toString()) + } + + // track the connection direction. Don't allow to unset outbound + if (!this.outbound.has(id)) { + this.outbound.set(id, direction === 'outbound') + } + } + } + + /** + * Removes a peer from the router + */ + private removePeer (peerId: PeerId): void { + const id = peerId.toString() + + if (!this.peers.has(id)) { + return + } + + // delete peer + this.log('delete peer %p', peerId) + this.peers.delete(id) + + const outboundStream = this.streamsOutbound.get(id) + const inboundStream = this.streamsInbound.get(id) + + if (outboundStream != null) { + this.metrics?.peersPerProtocol.inc({ protocol: outboundStream.protocol }, -1) + } + + // close streams + outboundStream?.close().catch((err) => { this.log.error(err) }) + inboundStream?.close().catch((err) => { this.log.error(err) }) + + // remove streams + this.streamsOutbound.delete(id) + this.streamsInbound.delete(id) + + // remove peer from topics map + for (const peers of this.topics.values()) { + peers.delete(id) + } + + // Remove this peer from the mesh + for (const [topicStr, peers] of this.mesh) { + if (peers.delete(id)) { + this.metrics?.onRemoveFromMesh(topicStr, ChurnReason.Dc, 1) + } + } + + // Remove this peer from the fanout + for (const peers of this.fanout.values()) { + peers.delete(id) + } + + // Remove from floodsubPeers + this.floodsubPeers.delete(id) + // Remove from gossip mapping + this.gossip.delete(id) + // Remove from control mapping + this.control.delete(id) + // Remove from backoff mapping + this.outbound.delete(id) + // Remove from idontwant tracking + this.idontwantCounts.delete(id) + this.idontwants.delete(id) + + // Remove from peer scoring + this.score.removePeer(id) + + this.acceptFromWhitelist.delete(id) + } + + // API METHODS + + get started (): boolean { + return this.status.code === GossipStatusCode.started + } + + /** + * Get a the peer-ids in a topic mesh + */ + getMeshPeers (topic: TopicStr): PeerIdStr[] { + const peersInTopic = this.mesh.get(topic) + return (peersInTopic != null) ? Array.from(peersInTopic) : [] + } + + /** + * Get a list of the peer-ids that are subscribed to one topic. + */ + getSubscribers (topic: TopicStr): PeerId[] { + const peersInTopic = this.topics.get(topic) + return ((peersInTopic != null) ? Array.from(peersInTopic) : []).map((str) => this.peers.get(str) ?? peerIdFromString(str)) + } + + /** + * Get the list of topics which the peer is subscribed to. + */ + getTopics (): TopicStr[] { + return Array.from(this.subscriptions) + } + + // TODO: Reviewing Pubsub API + + // MESSAGE METHODS + + /** + * Responsible for processing each RPC message received by other peers. + */ + private async pipePeerReadStream (peerId: PeerId, stream: AsyncIterable): Promise { + try { + await pipe(stream, async (source) => { + for await (const data of source) { + try { + // TODO: Check max gossip message size, before decodeRpc() + const rpcBytes = data.subarray() + // Note: This function may throw, it must be wrapped in a try {} catch {} to prevent closing the stream. + // TODO: What should we do if the entire RPC is invalid? + const rpc = RPC.decode(rpcBytes, { + limits: { + subscriptions: this.decodeRpcLimits.maxSubscriptions, + messages: this.decodeRpcLimits.maxMessages, + control$: { + ihave: this.decodeRpcLimits.maxIhaveMessageIDs, + iwant: this.decodeRpcLimits.maxIwantMessageIDs, + graft: this.decodeRpcLimits.maxControlMessages, + prune: this.decodeRpcLimits.maxControlMessages, + prune$: { + peers: this.decodeRpcLimits.maxPeerInfos + }, + idontwant: this.decodeRpcLimits.maxControlMessages, + idontwant$: { + messageIDs: this.decodeRpcLimits.maxIdontwantMessageIDs + } + } + } + }) + + this.metrics?.onRpcRecv(rpc, rpcBytes.length) + + // Since processRpc may be overridden entirely in unsafe ways, + // the simplest/safest option here is to wrap in a function and capture all errors + // to prevent a top-level unhandled exception + // This processing of rpc messages should happen without awaiting full validation/execution of prior messages + if (this.opts.awaitRpcHandler) { + try { + await this.handleReceivedRpc(peerId, rpc) + } catch (err) { + this.metrics?.onRpcRecvError() + this.log(err) + } + } else { + this.handleReceivedRpc(peerId, rpc).catch((err) => { + this.metrics?.onRpcRecvError() + this.log(err) + }) + } + } catch (e) { + this.metrics?.onRpcDataError() + this.log(e as Error) + } + } + }) + } catch (err) { + this.metrics?.onPeerReadStreamError() + this.handlePeerReadStreamError(err as Error, peerId) + } + } + + /** + * Handle error when read stream pipe throws, less of the functional use but more + * to for testing purposes to spy on the error handling + */ + private handlePeerReadStreamError (err: Error, peerId: PeerId): void { + this.log.error(err) + this.onPeerDisconnected(peerId) + } + + /** + * Handles an rpc request from a peer + */ + public async handleReceivedRpc (from: PeerId, rpc: RPC): Promise { + // Check if peer is graylisted in which case we ignore the event + if (!this.acceptFrom(from.toString())) { + this.log('received message from unacceptable peer %p', from) + this.metrics?.rpcRecvNotAccepted.inc() + return + } + + const subscriptions = (rpc.subscriptions != null) ? rpc.subscriptions.length : 0 + const messages = (rpc.messages != null) ? rpc.messages.length : 0 + let ihave = 0 + let iwant = 0 + let graft = 0 + let prune = 0 + if (rpc.control != null) { + if (rpc.control.ihave != null) { ihave = rpc.control.ihave.length } + if (rpc.control.iwant != null) { iwant = rpc.control.iwant.length } + if (rpc.control.graft != null) { graft = rpc.control.graft.length } + if (rpc.control.prune != null) { prune = rpc.control.prune.length } + } + this.log( + `rpc.from ${from.toString()} subscriptions ${subscriptions} messages ${messages} ihave ${ihave} iwant ${iwant} graft ${graft} prune ${prune}` + ) + + // Handle received subscriptions + if ((rpc.subscriptions != null) && rpc.subscriptions.length > 0) { + // update peer subscriptions + + const subscriptions: Array<{ topic: TopicStr, subscribe: boolean }> = [] + + rpc.subscriptions.forEach((subOpt) => { + const topic = subOpt.topic + const subscribe = subOpt.subscribe === true + + if (topic != null) { + if ((this.allowedTopics != null) && !this.allowedTopics.has(topic)) { + // Not allowed: subscription data-structures are not bounded by topic count + // TODO: Should apply behaviour penalties? + return + } + + this.handleReceivedSubscription(from, topic, subscribe) + + subscriptions.push({ topic, subscribe }) + } + }) + + this.safeDispatchEvent('subscription-change', { + detail: { peerId: from, subscriptions } + }) + } + + // Handle messages + // TODO: (up to limit) + for (const message of rpc.messages) { + if ((this.allowedTopics != null) && !this.allowedTopics.has(message.topic)) { + // Not allowed: message cache data-structures are not bounded by topic count + // TODO: Should apply behaviour penalties? + continue + } + + const handleReceivedMessagePromise = this.handleReceivedMessage(from, message) + // Should never throw, but handle just in case + .catch((err) => { + this.metrics?.onMsgRecvError(message.topic) + this.log(err) + }) + + if (this.opts.awaitRpcMessageHandler) { + await handleReceivedMessagePromise + } + } + + // Handle control messages + if (rpc.control != null) { + await this.handleControlMessage(from.toString(), rpc.control) + } + } + + /** + * Handles a subscription change from a peer + */ + private handleReceivedSubscription (from: PeerId, topic: TopicStr, subscribe: boolean): void { + this.log('subscription update from %p topic %s', from, topic) + + let topicSet = this.topics.get(topic) + if (topicSet == null) { + topicSet = new Set() + this.topics.set(topic, topicSet) + } + + if (subscribe) { + // subscribe peer to new topic + topicSet.add(from.toString()) + } else { + // unsubscribe from existing topic + topicSet.delete(from.toString()) + } + + // TODO: rust-libp2p has A LOT more logic here + } + + /** + * Handles a newly received message from an RPC. + * May forward to all peers in the mesh. + */ + private async handleReceivedMessage (from: PeerId, rpcMsg: RPC.Message): Promise { + this.metrics?.onMsgRecvPreValidation(rpcMsg.topic) + + const validationResult = await this.validateReceivedMessage(from, rpcMsg) + + this.metrics?.onPrevalidationResult(rpcMsg.topic, validationResult.code) + + const validationCode = validationResult.code + switch (validationCode) { + case MessageStatus.duplicate: + // Report the duplicate + this.score.duplicateMessage(from.toString(), validationResult.msgIdStr, rpcMsg.topic) + // due to the collision of fastMsgIdFn, 2 different messages may end up the same fastMsgId + // so we need to also mark the duplicate message as delivered or the promise is not resolved + // and peer gets penalized. See https://github.com/ChainSafe/js-libp2p-gossipsub/pull/385 + this.gossipTracer.deliverMessage(validationResult.msgIdStr, true) + this.mcache.observeDuplicate(validationResult.msgIdStr, from.toString()) + return + + case MessageStatus.invalid: + // invalid messages received + // metrics.register_invalid_message(&raw_message.topic) + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + if (validationResult.msgIdStr != null) { + const msgIdStr = validationResult.msgIdStr + this.score.rejectMessage(from.toString(), msgIdStr, rpcMsg.topic, validationResult.reason) + this.gossipTracer.rejectMessage(msgIdStr, validationResult.reason) + } else { + this.score.rejectInvalidMessage(from.toString(), rpcMsg.topic) + } + + this.metrics?.onMsgRecvInvalid(rpcMsg.topic, validationResult) + return + + case MessageStatus.valid: + // Tells score that message arrived (but is maybe not fully validated yet). + // Consider the message as delivered for gossip promises. + this.score.validateMessage(validationResult.messageId.msgIdStr) + this.gossipTracer.deliverMessage(validationResult.messageId.msgIdStr) + + // Add the message to our memcache + // if no validation is required, mark the message as validated + this.mcache.put(validationResult.messageId, rpcMsg, !this.opts.asyncValidation) + + // Dispatch the message to the user if we are subscribed to the topic + if (this.subscriptions.has(rpcMsg.topic)) { + const isFromSelf = this.components.peerId.equals(from) + + if (!isFromSelf || this.opts.emitSelf) { + super.dispatchEvent( + new CustomEvent('gossipsub:message', { + detail: { + propagationSource: from, + msgId: validationResult.messageId.msgIdStr, + msg: validationResult.msg + } + }) + ) + // TODO: Add option to switch between emit per topic or all messages in one + super.dispatchEvent(new CustomEvent('message', { detail: validationResult.msg })) + } + } + + // Forward the message to mesh peers, if no validation is required + // If asyncValidation is ON, expect the app layer to call reportMessageValidationResult(), then forward + if (!this.opts.asyncValidation) { + // TODO: in rust-libp2p + // .forward_msg(&msg_id, raw_message, Some(propagation_source)) + this.forwardMessage(validationResult.messageId.msgIdStr, rpcMsg, from.toString()) + } + break + default: + throw new Error(`Invalid validation result: ${validationCode}`) + } + } + + /** + * Handles a newly received message from an RPC. + * May forward to all peers in the mesh. + */ + private async validateReceivedMessage ( + propagationSource: PeerId, + rpcMsg: RPC.Message + ): Promise { + // Fast message ID stuff + const fastMsgIdStr = this.fastMsgIdFn?.(rpcMsg) + const msgIdCached = fastMsgIdStr !== undefined ? this.fastMsgIdCache?.get(fastMsgIdStr) : undefined + + if (msgIdCached != null) { + // This message has been seen previously. Ignore it + return { code: MessageStatus.duplicate, msgIdStr: msgIdCached } + } + + // Perform basic validation on message and convert to RawGossipsubMessage for fastMsgIdFn() + const validationResult = await validateToRawMessage(this.globalSignaturePolicy, rpcMsg) + + if (!validationResult.valid) { + return { code: MessageStatus.invalid, reason: RejectReason.Error, error: validationResult.error } + } + + const msg = validationResult.message + + // Try and perform the data transform to the message. If it fails, consider it invalid. + try { + if (this.dataTransform != null) { + msg.data = this.dataTransform.inboundTransform(rpcMsg.topic, msg.data) + } + } catch (e) { + this.log('Invalid message, transform failed', e) + return { code: MessageStatus.invalid, reason: RejectReason.Error, error: ValidateError.TransformFailed } + } + + // TODO: Check if message is from a blacklisted source or propagation origin + // - Reject any message from a blacklisted peer + // - Also reject any message that originated from a blacklisted peer + // - reject messages claiming to be from ourselves but not locally published + + // Calculate the message id on the transformed data. + const msgId = await this.msgIdFn(msg) + const msgIdStr = this.msgIdToStrFn(msgId) + const messageId = { msgId, msgIdStr } + + // Add the message to the duplicate caches + if (fastMsgIdStr !== undefined && (this.fastMsgIdCache != null)) { + const collision = this.fastMsgIdCache.put(fastMsgIdStr, msgIdStr) + if (collision) { + this.metrics?.fastMsgIdCacheCollision.inc() + } + } + + if (this.seenCache.has(msgIdStr)) { + return { code: MessageStatus.duplicate, msgIdStr } + } else { + this.seenCache.put(msgIdStr) + } + + // possibly send IDONTWANTs to mesh peers + if ((rpcMsg.data?.length ?? 0) >= this.opts.idontwantMinDataSize) { + this.sendIDontWants(msgId, rpcMsg.topic, propagationSource.toString()) + } + + // (Optional) Provide custom validation here with dynamic validators per topic + // NOTE: This custom topicValidator() must resolve fast (< 100ms) to allow scores + // to not penalize peers for long validation times. + const topicValidator = this.topicValidators.get(rpcMsg.topic) + if (topicValidator != null) { + let acceptance: TopicValidatorResult + // Use try {} catch {} in case topicValidator() is synchronous + try { + acceptance = await topicValidator(propagationSource, msg) + } catch (e) { + const errCode = (e as { code: string }).code + if (errCode === constants.ERR_TOPIC_VALIDATOR_IGNORE) { acceptance = TopicValidatorResult.Ignore } + if (errCode === constants.ERR_TOPIC_VALIDATOR_REJECT) { acceptance = TopicValidatorResult.Reject } else { acceptance = TopicValidatorResult.Ignore } + } + + if (acceptance !== TopicValidatorResult.Accept) { + return { code: MessageStatus.invalid, reason: rejectReasonFromAcceptance(acceptance), msgIdStr } + } + } + + return { code: MessageStatus.valid, messageId, msg } + } + + /** + * Return score of a peer. + */ + getScore (peerId: PeerIdStr): number { + return this.score.score(peerId) + } + + /** + * Send an rpc object to a peer with subscriptions + */ + private sendSubscriptions (toPeer: PeerIdStr, topics: string[], subscribe: boolean): void { + this.sendRpc(toPeer, { + subscriptions: topics.map((topic) => ({ topic, subscribe })), + messages: [] + }) + } + + /** + * Handles an rpc control message from a peer + */ + private async handleControlMessage (id: PeerIdStr, controlMsg: RPC.ControlMessage): Promise { + if (controlMsg === undefined) { + return + } + + const iwant = (controlMsg.ihave?.length > 0) ? this.handleIHave(id, controlMsg.ihave) : [] + const ihave = (controlMsg.iwant?.length > 0) ? this.handleIWant(id, controlMsg.iwant) : [] + const prune = (controlMsg.graft?.length > 0) ? await this.handleGraft(id, controlMsg.graft) : [] + ;(controlMsg.prune?.length > 0) && (await this.handlePrune(id, controlMsg.prune)) + ;(controlMsg.idontwant?.length > 0) && this.handleIdontwant(id, controlMsg.idontwant) + + if ((iwant.length === 0) && (ihave.length === 0) && (prune.length === 0)) { + return + } + + const sent = this.sendRpc(id, createGossipRpc(ihave, { iwant, prune })) + const iwantMessageIds = iwant[0]?.messageIDs + if (iwantMessageIds != null) { + if (sent) { + this.gossipTracer.addPromise(id, iwantMessageIds) + } else { + this.metrics?.iwantPromiseUntracked.inc(1) + } + } + } + + /** + * Whether to accept a message from a peer + */ + public acceptFrom (id: PeerIdStr): boolean { + if (this.direct.has(id)) { + return true + } + + const now = Date.now() + const entry = this.acceptFromWhitelist.get(id) + + if ((entry != null) && entry.messagesAccepted < ACCEPT_FROM_WHITELIST_MAX_MESSAGES && entry.acceptUntil >= now) { + entry.messagesAccepted += 1 + return true + } + + const score = this.score.score(id) + if (score >= ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE) { + // peer is unlikely to be able to drop its score to `graylistThreshold` + // after 128 messages or 1s + this.acceptFromWhitelist.set(id, { + messagesAccepted: 0, + acceptUntil: now + ACCEPT_FROM_WHITELIST_DURATION_MS + }) + } else { + this.acceptFromWhitelist.delete(id) + } + + return score >= this.opts.scoreThresholds.graylistThreshold + } + + /** + * Handles IHAVE messages + */ + private handleIHave (id: PeerIdStr, ihave: RPC.ControlIHave[]): RPC.ControlIWant[] { + if (ihave.length === 0) { + return [] + } + + // we ignore IHAVE gossip from any peer whose score is below the gossips threshold + const score = this.score.score(id) + if (score < this.opts.scoreThresholds.gossipThreshold) { + this.log('IHAVE: ignoring peer %s with score below threshold [ score = %d ]', id, score) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.LowScore }) + return [] + } + + // IHAVE flood protection + const peerhave = (this.peerhave.get(id) ?? 0) + 1 + this.peerhave.set(id, peerhave) + if (peerhave > constants.GossipsubMaxIHaveMessages) { + this.log( + 'IHAVE: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring', + id, + peerhave + ) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIhave }) + return [] + } + + const iasked = this.iasked.get(id) ?? 0 + if (iasked >= constants.GossipsubMaxIHaveLength) { + this.log('IHAVE: peer %s has already advertised too many messages (%d); ignoring', id, iasked) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIasked }) + return [] + } + + // string msgId => msgId + const iwant = new Map() + + ihave.forEach(({ topicID, messageIDs }) => { + if (topicID == null || (messageIDs == null) || !this.mesh.has(topicID)) { + return + } + + let idonthave = 0 + + messageIDs.forEach((msgId) => { + const msgIdStr = this.msgIdToStrFn(msgId) + if (!this.seenCache.has(msgIdStr)) { + iwant.set(msgIdStr, msgId) + idonthave++ + } + }) + + this.metrics?.onIhaveRcv(topicID, messageIDs.length, idonthave) + }) + + if (iwant.size === 0) { + return [] + } + + let iask = iwant.size + if (iask + iasked > constants.GossipsubMaxIHaveLength) { + iask = constants.GossipsubMaxIHaveLength - iasked + } + + this.log('IHAVE: Asking for %d out of %d messages from %s', iask, iwant.size, id) + + let iwantList = Array.from(iwant.values()) + // ask in random order + shuffle(iwantList) + + // truncate to the messages we are actually asking for and update the iasked counter + iwantList = iwantList.slice(0, iask) + this.iasked.set(id, iasked + iask) + + // do not add gossipTracer promise here until a successful sendRpc() + + return [ + { + messageIDs: iwantList + } + ] + } + + /** + * Handles IWANT messages + * Returns messages to send back to peer + */ + private handleIWant (id: PeerIdStr, iwant: RPC.ControlIWant[]): RPC.Message[] { + if (iwant.length === 0) { + return [] + } + + // we don't respond to IWANT requests from any per whose score is below the gossip threshold + const score = this.score.score(id) + if (score < this.opts.scoreThresholds.gossipThreshold) { + this.log('IWANT: ignoring peer %s with score below threshold [score = %d]', id, score) + return [] + } + + const ihave = new Map() + const iwantByTopic = new Map() + let iwantDonthave = 0 + + iwant.forEach(({ messageIDs }) => { + messageIDs?.forEach((msgId) => { + const msgIdStr = this.msgIdToStrFn(msgId) + const entry = this.mcache.getWithIWantCount(msgIdStr, id) + if (entry == null) { + iwantDonthave++ + return + } + + iwantByTopic.set(entry.msg.topic, 1 + (iwantByTopic.get(entry.msg.topic) ?? 0)) + + if (entry.count > constants.GossipsubGossipRetransmission) { + this.log('IWANT: Peer %s has asked for message %s too many times: ignoring request', id, msgId) + return + } + + ihave.set(msgIdStr, entry.msg) + }) + }) + + this.metrics?.onIwantRcv(iwantByTopic, iwantDonthave) + + if (ihave.size === 0) { + this.log('IWANT: Could not provide any wanted messages to %s', id) + return [] + } + + this.log('IWANT: Sending %d messages to %s', ihave.size, id) + + return Array.from(ihave.values()) + } + + /** + * Handles Graft messages + */ + private async handleGraft (id: PeerIdStr, graft: RPC.ControlGraft[]): Promise { + const prune: TopicStr[] = [] + const score = this.score.score(id) + const now = Date.now() + let doPX = this.opts.doPX + + graft.forEach(({ topicID }) => { + if (topicID == null) { + return + } + + const peersInMesh = this.mesh.get(topicID) + if (peersInMesh == null) { + // don't do PX when there is an unknown topic to avoid leaking our peers + doPX = false + // spam hardening: ignore GRAFTs for unknown topics + return + } + + // check if peer is already in the mesh; if so do nothing + if (peersInMesh.has(id)) { + return + } + + const backoffExpiry = this.backoff.get(topicID)?.get(id) + + // This if/else chain contains the various cases of valid (and semi-valid) GRAFTs + // Most of these cases result in a PRUNE immediately being sent in response + + // we don't GRAFT to/from direct peers; complain loudly if this happens + if (this.direct.has(id)) { + this.log('GRAFT: ignoring request from direct peer %s', id) + // this is possibly a bug from a non-reciprical configuration; send a PRUNE + prune.push(topicID) + // but don't px + doPX = false + + // make sure we are not backing off that peer + } else if (typeof backoffExpiry === 'number' && now < backoffExpiry) { + this.log('GRAFT: ignoring backed off peer %s', id) + // add behavioral penalty + this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff) + // no PX + doPX = false + // check the flood cutoff -- is the GRAFT coming too fast? + const floodCutoff = backoffExpiry + this.opts.graftFloodThreshold - this.opts.pruneBackoff + if (now < floodCutoff) { + // extra penalty + this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff) + } + // refresh the backoff + this.addBackoff(id, topicID) + prune.push(topicID) + + // check the score + } else if (score < 0) { + // we don't GRAFT peers with negative score + this.log('GRAFT: ignoring peer %s with negative score: score=%d, topic=%s', id, score, topicID) + // we do send them PRUNE however, because it's a matter of protocol correctness + prune.push(topicID) + // but we won't PX to them + doPX = false + // add/refresh backoff so that we don't reGRAFT too early even if the score decays + this.addBackoff(id, topicID) + + // check the number of mesh peers; if it is at (or over) Dhi, we only accept grafts + // from peers with outbound connections; this is a defensive check to restrict potential + // mesh takeover attacks combined with love bombing + } else if (peersInMesh.size >= this.opts.Dhi && !(this.outbound.get(id) ?? false)) { + prune.push(topicID) + this.addBackoff(id, topicID) + + // valid graft + } else { + this.log('GRAFT: Add mesh link from %s in %s', id, topicID) + this.score.graft(id, topicID) + peersInMesh.add(id) + + this.metrics?.onAddToMesh(topicID, InclusionReason.Subscribed, 1) + } + + this.safeDispatchEvent('gossipsub:graft', { detail: { peerId: id, topic: topicID, direction: 'inbound' } }) + }) + + if (prune.length === 0) { + return [] + } + + const onUnsubscribe = false + return Promise.all(prune.map(async (topic) => this.makePrune(id, topic, doPX, onUnsubscribe))) + } + + /** + * Handles Prune messages + */ + private async handlePrune (id: PeerIdStr, prune: RPC.ControlPrune[]): Promise { + const score = this.score.score(id) + + for (const { topicID, backoff, peers } of prune) { + if (topicID == null) { + continue + } + + const peersInMesh = this.mesh.get(topicID) + if (peersInMesh == null) { + return + } + + this.log('PRUNE: Remove mesh link to %s in %s', id, topicID) + this.score.prune(id, topicID) + if (peersInMesh.has(id)) { + peersInMesh.delete(id) + this.metrics?.onRemoveFromMesh(topicID, ChurnReason.Prune, 1) + } + + // is there a backoff specified by the peer? if so obey it + if (typeof backoff === 'number' && backoff > 0) { + this.doAddBackoff(id, topicID, backoff * 1000) + } else { + this.addBackoff(id, topicID) + } + + // PX + if ((peers != null) && (peers.length > 0)) { + // we ignore PX from peers with insufficient scores + if (score < this.opts.scoreThresholds.acceptPXThreshold) { + this.log( + 'PRUNE: ignoring PX from peer %s with insufficient score [score = %d, topic = %s]', + id, + score, + topicID + ) + } else { + await this.pxConnect(peers) + } + } + + this.safeDispatchEvent('gossipsub:prune', { detail: { peerId: id, topic: topicID, direction: 'inbound' } }) + } + } + + private handleIdontwant (id: PeerIdStr, idontwant: RPC.ControlIDontWant[]): void { + let idontwantCount = this.idontwantCounts.get(id) ?? 0 + // return early if we have already received too many IDONTWANT messages from the peer + if (idontwantCount >= this.opts.idontwantMaxMessages) { + return + } + const startIdontwantCount = idontwantCount + + let idontwants = this.idontwants.get(id) + if (idontwants == null) { + idontwants = new Map() + this.idontwants.set(id, idontwants) + } + let idonthave = 0 + // eslint-disable-next-line no-labels + out: for (const { messageIDs } of idontwant) { + for (const msgId of messageIDs) { + if (idontwantCount >= this.opts.idontwantMaxMessages) { + // eslint-disable-next-line no-labels + break out + } + idontwantCount++ + + const msgIdStr = this.msgIdToStrFn(msgId) + idontwants.set(msgIdStr, this.heartbeatTicks) + if (!this.mcache.msgs.has(msgIdStr)) { idonthave++ } + } + } + this.idontwantCounts.set(id, idontwantCount) + const total = idontwantCount - startIdontwantCount + this.metrics?.onIdontwantRcv(total, idonthave) + } + + /** + * Add standard backoff log for a peer in a topic + */ + private addBackoff (id: PeerIdStr, topic: TopicStr): void { + this.doAddBackoff(id, topic, this.opts.pruneBackoff) + } + + /** + * Add backoff expiry interval for a peer in a topic + * + * @param id + * @param topic + * @param intervalMs - backoff duration in milliseconds + */ + private doAddBackoff (id: PeerIdStr, topic: TopicStr, intervalMs: number): void { + let backoff = this.backoff.get(topic) + if (backoff == null) { + backoff = new Map() + this.backoff.set(topic, backoff) + } + const expire = Date.now() + intervalMs + const existingExpire = backoff.get(id) ?? 0 + if (existingExpire < expire) { + backoff.set(id, expire) + } + } + + /** + * Apply penalties from broken IHAVE/IWANT promises + */ + private applyIwantPenalties (): void { + this.gossipTracer.getBrokenPromises().forEach((count, p) => { + this.log("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) + this.score.addPenalty(p, count, ScorePenalty.BrokenPromise) + }) + } + + /** + * Clear expired backoff expiries + */ + private clearBackoff (): void { + // we only clear once every GossipsubPruneBackoffTicks ticks to avoid iterating over the maps too much + if (this.heartbeatTicks % constants.GossipsubPruneBackoffTicks !== 0) { + return + } + + const now = Date.now() + this.backoff.forEach((backoff, topic) => { + backoff.forEach((expire, id) => { + // add some slack time to the expiration, see https://github.com/libp2p/specs/pull/289 + if (expire + BACKOFF_SLACK * this.opts.heartbeatInterval < now) { + backoff.delete(id) + } + }) + if (backoff.size === 0) { + this.backoff.delete(topic) + } + }) + } + + /** + * Maybe reconnect to direct peers + */ + private async directConnect (): Promise { + const toconnect: string[] = [] + this.direct.forEach((id) => { + if (!this.streamsOutbound.has(id)) { + toconnect.push(id) + } + }) + + await Promise.all(toconnect.map(async (id) => this.connect(id))) + } + + /** + * Maybe attempt connection given signed peer records + */ + private async pxConnect (peers: RPC.PeerInfo[]): Promise { + if (peers.length > this.opts.prunePeers) { + shuffle(peers) + peers = peers.slice(0, this.opts.prunePeers) + } + const toconnect: string[] = [] + + await Promise.all( + peers.map(async (pi) => { + if (pi.peerID == null) { + return + } + + const peer = peerIdFromMultihash(Digest.decode(pi.peerID)) + const p = peer.toString() + + if (this.peers.has(p)) { + return + } + + if (pi.signedPeerRecord == null) { + toconnect.push(p) + return + } + + // The peer sent us a signed record + // This is not a record from the peer who sent the record, but another peer who is connected with it + // Ensure that it is valid + try { + if (!(await this.components.peerStore.consumePeerRecord(pi.signedPeerRecord, { + expectedPeer: peer + }))) { + this.log('bogus peer record obtained through px: could not add peer record to address book') + return + } + toconnect.push(p) + } catch (e) { + this.log('bogus peer record obtained through px: invalid signature or not a peer record') + } + }) + ) + + if (toconnect.length === 0) { + return + } + + await Promise.all(toconnect.map(async (id) => this.connect(id))) + } + + /** + * Connect to a peer using the gossipsub protocol + */ + private async connect (id: PeerIdStr): Promise { + this.log('Initiating connection with %s', id) + const peerId = peerIdFromString(id) + const connection = await this.components.connectionManager.openConnection(peerId) + for (const protocol of this.protocols) { + for (const topology of this.components.registrar.getTopologies(protocol)) { + topology.onConnect?.(peerId, connection) + } + } + } + + /** + * Subscribes to a topic + */ + subscribe (topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Pubsub has not started') + } + + if (!this.subscriptions.has(topic)) { + this.subscriptions.add(topic) + + for (const peerId of this.peers.keys()) { + this.sendSubscriptions(peerId, [topic], true) + } + } + + this.join(topic) + } + + /** + * Unsubscribe to a topic + */ + unsubscribe (topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Pubsub is not started') + } + + const wasSubscribed = this.subscriptions.delete(topic) + + this.log('unsubscribe from %s - am subscribed %s', topic, wasSubscribed) + + if (wasSubscribed) { + for (const peerId of this.peers.keys()) { + this.sendSubscriptions(peerId, [topic], false) + } + } + + this.leave(topic) + } + + /** + * Join topic + */ + private join (topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Gossipsub has not started') + } + + // if we are already in the mesh, return + if (this.mesh.has(topic)) { + return + } + + this.log('JOIN %s', topic) + this.metrics?.onJoin(topic) + + const toAdd = new Set() + const backoff = this.backoff.get(topic) + + // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, + // removing the fanout entry. + const fanoutPeers = this.fanout.get(topic) + if (fanoutPeers != null) { + // Remove fanout entry and the last published time + this.fanout.delete(topic) + this.fanoutLastpub.delete(topic) + + // remove explicit peers, peers with negative scores, and backoffed peers + fanoutPeers.forEach((id) => { + if (!this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true) { + toAdd.add(id) + } + }) + + this.metrics?.onAddToMesh(topic, InclusionReason.Fanout, toAdd.size) + } + + // check if we need to get more peers, which we randomly select + if (toAdd.size < this.opts.D) { + const fanoutCount = toAdd.size + const newPeers = this.getRandomGossipPeers( + topic, + this.opts.D, + (id: PeerIdStr): boolean => + // filter direct peers and peers with negative score + !toAdd.has(id) && !this.direct.has(id) && this.score.score(id) >= 0 && backoff?.has(id) !== true + ) + + newPeers.forEach((peer) => { + toAdd.add(peer) + }) + + this.metrics?.onAddToMesh(topic, InclusionReason.Random, toAdd.size - fanoutCount) + } + + this.mesh.set(topic, toAdd) + + toAdd.forEach((id) => { + this.log('JOIN: Add mesh link to %s in %s', id, topic) + this.sendGraft(id, topic) + + // rust-libp2p + // - peer_score.graft() + // - Self::control_pool_add() + // - peer_added_to_mesh() + }) + } + + /** + * Leave topic + */ + private leave (topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Gossipsub has not started') + } + + this.log('LEAVE %s', topic) + this.metrics?.onLeave(topic) + + // Send PRUNE to mesh peers + const meshPeers = this.mesh.get(topic) + if (meshPeers != null) { + Promise.all( + Array.from(meshPeers).map(async (id) => { + this.log('LEAVE: Remove mesh link to %s in %s', id, topic) + await this.sendPrune(id, topic) + }) + ).catch((err) => { + this.log('Error sending prunes to mesh peers', err) + }) + this.mesh.delete(topic) + } + } + + private selectPeersToForward (topic: TopicStr, propagationSource?: PeerIdStr, excludePeers?: Set): Set { + const tosend = new Set() + + // Add explicit peers + const peersInTopic = this.topics.get(topic) + if (peersInTopic != null) { + this.direct.forEach((peer) => { + if (peersInTopic.has(peer) && propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) { + tosend.add(peer) + } + }) + + // As of Mar 2022, spec + golang-libp2p include this while rust-libp2p does not + // rust-libp2p: https://github.com/libp2p/rust-libp2p/blob/6cc3b4ec52c922bfcf562a29b5805c3150e37c75/protocols/gossipsub/src/behaviour.rs#L2693 + // spec: https://github.com/libp2p/specs/blob/10712c55ab309086a52eec7d25f294df4fa96528/pubsub/gossipsub/gossipsub-v1.0.md?plain=1#L361 + this.floodsubPeers.forEach((peer) => { + if ( + peersInTopic.has(peer) && + propagationSource !== peer && + !(excludePeers?.has(peer) ?? false) && + this.score.score(peer) >= this.opts.scoreThresholds.publishThreshold + ) { + tosend.add(peer) + } + }) + } + + // add mesh peers + const meshPeers = this.mesh.get(topic) + if ((meshPeers != null) && meshPeers.size > 0) { + meshPeers.forEach((peer) => { + if (propagationSource !== peer && !(excludePeers?.has(peer) ?? false)) { + tosend.add(peer) + } + }) + } + + return tosend + } + + private selectPeersToPublish (topic: TopicStr): { + tosend: Set + tosendCount: ToSendGroupCount + } { + const tosend = new Set() + const tosendCount: ToSendGroupCount = { + direct: 0, + floodsub: 0, + mesh: 0, + fanout: 0 + } + + const peersInTopic = this.topics.get(topic) + if (peersInTopic != null) { + // flood-publish behavior + // send to direct peers and _all_ peers meeting the publishThreshold + if (this.opts.floodPublish) { + peersInTopic.forEach((id) => { + if (this.direct.has(id)) { + tosend.add(id) + tosendCount.direct++ + } else if (this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) { + tosend.add(id) + tosendCount.floodsub++ + } + }) + } else { + // non-flood-publish behavior + // send to direct peers, subscribed floodsub peers + // and some mesh peers above publishThreshold + + // direct peers (if subscribed) + this.direct.forEach((id) => { + if (peersInTopic.has(id)) { + tosend.add(id) + tosendCount.direct++ + } + }) + + // floodsub peers + // Note: if there are no floodsub peers, we save a loop through peersInTopic Map + this.floodsubPeers.forEach((id) => { + if (peersInTopic.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) { + tosend.add(id) + tosendCount.floodsub++ + } + }) + + // Gossipsub peers handling + const meshPeers = this.mesh.get(topic) + if ((meshPeers != null) && meshPeers.size > 0) { + meshPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.mesh++ + }) + + // We want to publish to at least `D` peers. + // If there are insufficient peers in the mesh, publish to other topic peers + if (meshPeers.size < this.opts.D) { + // pick additional topic peers above the publishThreshold + const topicPeers = this.getRandomGossipPeers(topic, this.opts.D - meshPeers.size, (id) => { + return !meshPeers.has(id) && !this.direct.has(id) && !this.floodsubPeers.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold + }) + + topicPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.mesh++ + }) + } + } else { + // We are not in the mesh for topic, use fanout peers + + const fanoutPeers = this.fanout.get(topic) + if ((fanoutPeers != null) && fanoutPeers.size > 0) { + fanoutPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.fanout++ + }) + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + + // If we are not in the fanout, then pick peers in topic above the publishThreshold + const newFanoutPeers = this.getRandomGossipPeers(topic, this.opts.D, (id) => { + return this.score.score(id) >= this.opts.scoreThresholds.publishThreshold + }) + + // eslint-disable-next-line max-depth + if (newFanoutPeers.size > 0) { + this.fanout.set(topic, newFanoutPeers) + + newFanoutPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.fanout++ + }) + } + } + + // We are publishing to fanout peers - update the time we published + this.fanoutLastpub.set(topic, Date.now()) + } + } + } + + return { tosend, tosendCount } + } + + /** + * Forwards a message from our peers. + * + * For messages published by us (the app layer), this class uses `publish` + */ + private forwardMessage ( + msgIdStr: string, + rawMsg: RPC.Message, + propagationSource?: PeerIdStr, + excludePeers?: Set + ): void { + // message is fully validated inform peer_score + if (propagationSource != null) { + this.score.deliverMessage(propagationSource, msgIdStr, rawMsg.topic) + } + + const tosend = this.selectPeersToForward(rawMsg.topic, propagationSource, excludePeers) + + // Note: Don't throw if tosend is empty, we can have a mesh with a single peer + + // forward the message to peers + tosend.forEach((id) => { + // sendRpc may mutate RPC message on piggyback, create a new message for each peer + this.sendRpc(id, createGossipRpc([rawMsg])) + }) + + this.metrics?.onForwardMsg(rawMsg.topic, tosend.size) + } + + /** + * App layer publishes a message to peers, return number of peers this message is published to + * Note: `async` due to crypto only if `StrictSign`, otherwise it's a sync fn. + * + * For messages not from us, this class uses `forwardMessage`. + */ + async publish (topic: TopicStr, data: Uint8Array, opts?: PublishOpts): Promise { + const startMs = Date.now() + const transformedData = (this.dataTransform != null) ? this.dataTransform.outboundTransform(topic, data) : data + + if (this.publishConfig == null) { + throw Error('PublishError.Uninitialized') + } + + // Prepare raw message with user's publishConfig + const { raw: rawMsg, msg } = await buildRawMessage(this.publishConfig, topic, data, transformedData) + + // calculate the message id from the un-transformed data + const msgId = await this.msgIdFn(msg) + const msgIdStr = this.msgIdToStrFn(msgId) + + // Current publish opt takes precedence global opts, while preserving false value + const ignoreDuplicatePublishError = opts?.ignoreDuplicatePublishError ?? this.opts.ignoreDuplicatePublishError + + if (this.seenCache.has(msgIdStr)) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + if (ignoreDuplicatePublishError) { + this.metrics?.onPublishDuplicateMsg(topic) + return { recipients: [] } + } + throw Error('PublishError.Duplicate') + } + + const { tosend, tosendCount } = this.selectPeersToPublish(topic) + const willSendToSelf = this.opts.emitSelf && this.subscriptions.has(topic) + + // Current publish opt takes precedence global opts, while preserving false value + const allowPublishToZeroTopicPeers = opts?.allowPublishToZeroTopicPeers ?? this.opts.allowPublishToZeroTopicPeers + + if (tosend.size === 0 && !allowPublishToZeroTopicPeers && !willSendToSelf) { + throw Error('PublishError.NoPeersSubscribedToTopic') + } + + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + this.seenCache.put(msgIdStr) + // all published messages are valid + this.mcache.put({ msgId, msgIdStr }, rawMsg, true) + // Consider the message as delivered for gossip promises. + this.gossipTracer.deliverMessage(msgIdStr) + + // If the message is anonymous or has a random author add it to the published message ids cache. + this.publishedMessageIds.put(msgIdStr) + + const batchPublish = opts?.batchPublish ?? this.opts.batchPublish + const rpc = createGossipRpc([rawMsg]) + if (batchPublish) { + this.sendRpcInBatch(tosend, rpc) + } else { + // Send to set of peers aggregated from direct, mesh, fanout + for (const id of tosend) { + // sendRpc may mutate RPC message on piggyback, create a new message for each peer + const sent = this.sendRpc(id, rpc) + + // did not actually send the message + if (!sent) { + tosend.delete(id) + } + } + } + + const durationMs = Date.now() - startMs + this.metrics?.onPublishMsg( + topic, + tosendCount, + tosend.size, + rawMsg.data != null ? rawMsg.data.length : 0, + durationMs + ) + + // Dispatch the message to the user if we are subscribed to the topic + if (willSendToSelf) { + tosend.add(this.components.peerId.toString()) + + super.dispatchEvent( + new CustomEvent('gossipsub:message', { + detail: { + propagationSource: this.components.peerId, + msgId: msgIdStr, + msg + } + }) + ) + // TODO: Add option to switch between emit per topic or all messages in one + super.dispatchEvent(new CustomEvent('message', { detail: msg })) + } + + return { + recipients: Array.from(tosend.values()).map((str) => this.peers.get(str) ?? peerIdFromString(str)) + } + } + + /** + * Send the same data in batch to tosend list without considering cached control messages + * This is not only faster but also avoid allocating memory for each peer + * see https://github.com/ChainSafe/js-libp2p-gossipsub/issues/344 + */ + private sendRpcInBatch (tosend: Set, rpc: RPC): void { + const rpcBytes = RPC.encode(rpc) + const prefixedData = encode.single(rpcBytes) + for (const id of tosend) { + const outboundStream = this.streamsOutbound.get(id) + if (outboundStream == null) { + this.log(`Cannot send RPC to ${id} as there is no open stream to it available`) + tosend.delete(id) + continue + } + try { + outboundStream.pushPrefixed(prefixedData) + } catch (e) { + tosend.delete(id) + this.log.error(`Cannot send rpc to ${id}`, e) + } + + this.metrics?.onRpcSent(rpc, rpcBytes.length) + } + } + + /** + * This function should be called when `asyncValidation` is `true` after + * the message got validated by the caller. Messages are stored in the `mcache` and + * validation is expected to be fast enough that the messages should still exist in the cache. + * There are three possible validation outcomes and the outcome is given in acceptance. + * + * If acceptance = `MessageAcceptance.Accept` the message will get propagated to the + * network. The `propagation_source` parameter indicates who the message was received by and + * will not be forwarded back to that peer. + * + * If acceptance = `MessageAcceptance.Reject` the message will be deleted from the memcache + * and the P₄ penalty will be applied to the `propagationSource`. + * + * If acceptance = `MessageAcceptance.Ignore` the message will be deleted from the memcache + * but no P₄ penalty will be applied. + * + * This function will return true if the message was found in the cache and false if was not + * in the cache anymore. + * + * This should only be called once per message. + */ + reportMessageValidationResult (msgId: MsgIdStr, propagationSource: PeerIdStr, acceptance: TopicValidatorResult): void { + let cacheEntry: MessageCacheRecord | null + + if (acceptance === TopicValidatorResult.Accept) { + cacheEntry = this.mcache.validate(msgId) + + if (cacheEntry != null) { + const { message: rawMsg, originatingPeers } = cacheEntry + // message is fully validated inform peer_score + this.score.deliverMessage(propagationSource, msgId, rawMsg.topic) + + this.forwardMessage(msgId, cacheEntry.message, propagationSource, originatingPeers) + } + // else, Message not in cache. Ignoring forwarding + } else { + // Not valid + cacheEntry = this.mcache.remove(msgId) + + if (cacheEntry != null) { + const rejectReason = rejectReasonFromAcceptance(acceptance) + const { message: rawMsg, originatingPeers } = cacheEntry + + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + this.score.rejectMessage(propagationSource, msgId, rawMsg.topic, rejectReason) + for (const peer of originatingPeers) { + this.score.rejectMessage(peer, msgId, rawMsg.topic, rejectReason) + } + } + // else, Message not in cache. Ignoring forwarding + } + + const firstSeenTimestampMs = this.score.messageFirstSeenTimestampMs(msgId) + this.metrics?.onReportValidation(cacheEntry, acceptance, firstSeenTimestampMs) + } + + /** + * Sends a GRAFT message to a peer + */ + private sendGraft (id: PeerIdStr, topic: string): void { + const graft = [ + { + topicID: topic + } + ] + const out = createGossipRpc([], { graft }) + this.sendRpc(id, out) + } + + /** + * Sends a PRUNE message to a peer + */ + private async sendPrune (id: PeerIdStr, topic: string): Promise { + // this is only called from leave() function + const onUnsubscribe = true + const prune = [await this.makePrune(id, topic, this.opts.doPX, onUnsubscribe)] + const out = createGossipRpc([], { prune }) + this.sendRpc(id, out) + } + + private sendIDontWants (msgId: Uint8Array, topic: string, source: PeerIdStr): void { + const ids = this.mesh.get(topic) + if (ids == null) { + return + } + + // don't send IDONTWANT to: + // - the source + // - peers that don't support v1.2 + const tosend = new Set(ids) + tosend.delete(source) + for (const id of tosend) { + if (this.streamsOutbound.get(id)?.protocol !== constants.GossipsubIDv12) { + tosend.delete(id) + } + } + + const idontwantRpc = createGossipRpc([], { idontwant: [{ messageIDs: [msgId] }] }) + this.sendRpcInBatch(tosend, idontwantRpc) + } + + /** + * Send an rpc object to a peer + */ + private sendRpc (id: PeerIdStr, rpc: RPC): boolean { + const outboundStream = this.streamsOutbound.get(id) + if (outboundStream == null) { + this.log(`Cannot send RPC to ${id} as there is no open stream to it available`) + return false + } + + // piggyback control message retries + const ctrl = this.control.get(id) + if (ctrl != null) { + this.piggybackControl(id, rpc, ctrl) + this.control.delete(id) + } + + // piggyback gossip + const ihave = this.gossip.get(id) + if (ihave != null) { + this.piggybackGossip(id, rpc, ihave) + this.gossip.delete(id) + } + + const rpcBytes = RPC.encode(rpc) + try { + outboundStream.push(rpcBytes) + } catch (e) { + this.log.error(`Cannot send rpc to ${id}`, e) + + // if the peer had control messages or gossip, re-attach + if (ctrl != null) { + this.control.set(id, ctrl) + } + if (ihave != null) { + this.gossip.set(id, ihave) + } + + return false + } + + this.metrics?.onRpcSent(rpc, rpcBytes.length) + + if (rpc.control?.graft != null) { + for (const topic of rpc.control?.graft) { + if (topic.topicID != null) { + this.safeDispatchEvent('gossipsub:graft', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } }) + } + } + } + if (rpc.control?.prune != null) { + for (const topic of rpc.control?.prune) { + if (topic.topicID != null) { + this.safeDispatchEvent('gossipsub:prune', { detail: { peerId: id, topic: topic.topicID, direction: 'outbound' } }) + } + } + } + + return true + } + + /** Mutates `outRpc` adding graft and prune control messages */ + public piggybackControl (id: PeerIdStr, outRpc: RPC, ctrl: RPC.ControlMessage): void { + const rpc = ensureControl(outRpc) + for (const graft of ctrl.graft) { + if (graft.topicID != null && (this.mesh.get(graft.topicID)?.has(id) ?? false)) { + rpc.control.graft.push(graft) + } + } + + for (const prune of ctrl.prune) { + if (prune.topicID != null && !(this.mesh.get(prune.topicID)?.has(id) ?? false)) { + rpc.control.prune.push(prune) + } + } + } + + /** Mutates `outRpc` adding ihave control messages */ + private piggybackGossip (id: PeerIdStr, outRpc: RPC, ihave: RPC.ControlIHave[]): void { + const rpc = ensureControl(outRpc) + rpc.control.ihave = ihave + } + + /** + * Send graft and prune messages + * + * @param tograft - peer id => topic[] + * @param toprune - peer id => topic[] + */ + private async sendGraftPrune ( + tograft: Map, + toprune: Map, + noPX: Map + ): Promise { + const doPX = this.opts.doPX + const onUnsubscribe = false + for (const [id, topics] of tograft) { + const graft = topics.map((topicID) => ({ topicID })) + let prune: RPC.ControlPrune[] = [] + // If a peer also has prunes, process them now + const pruning = toprune.get(id) + if (pruning != null) { + prune = await Promise.all( + pruning.map( + async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe) + ) + ) + toprune.delete(id) + } + + this.sendRpc(id, createGossipRpc([], { graft, prune })) + } + for (const [id, topics] of toprune) { + const prune = await Promise.all( + topics.map( + async (topicID) => this.makePrune(id, topicID, doPX && !(noPX.get(id) ?? false), onUnsubscribe) + ) + ) + this.sendRpc(id, createGossipRpc([], { prune })) + } + } + + /** + * Emits gossip - Send IHAVE messages to a random set of gossip peers + */ + private emitGossip (peersToGossipByTopic: Map>): void { + const gossipIDsByTopic = this.mcache.getGossipIDs(new Set(peersToGossipByTopic.keys())) + for (const [topic, peersToGossip] of peersToGossipByTopic) { + this.doEmitGossip(topic, peersToGossip, gossipIDsByTopic.get(topic) ?? []) + } + } + + /** + * Send gossip messages to GossipFactor peers above threshold with a minimum of D_lazy + * Peers are randomly selected from the heartbeat which exclude mesh + fanout peers + * We also exclude direct peers, as there is no reason to emit gossip to them + * + * @param topic + * @param candidateToGossip - peers to gossip + * @param messageIDs - message ids to gossip + */ + private doEmitGossip (topic: string, candidateToGossip: Set, messageIDs: Uint8Array[]): void { + if (messageIDs.length === 0) { + return + } + + // shuffle to emit in random order + shuffle(messageIDs) + + // if we are emitting more than GossipsubMaxIHaveLength ids, truncate the list + if (messageIDs.length > constants.GossipsubMaxIHaveLength) { + // we do the truncation (with shuffling) per peer below + this.log('too many messages for gossip; will truncate IHAVE list (%d messages)', messageIDs.length) + } + + if (candidateToGossip.size === 0) { return } + let target = this.opts.Dlazy + const gossipFactor = this.opts.gossipFactor + const factor = gossipFactor * candidateToGossip.size + let peersToGossip: Set | PeerIdStr[] = candidateToGossip + if (factor > target) { + target = factor + } + if (target > peersToGossip.size) { + target = peersToGossip.size + } else { + // only shuffle if needed + peersToGossip = shuffle(Array.from(peersToGossip)).slice(0, target) + } + + // Emit the IHAVE gossip to the selected peers up to the target + peersToGossip.forEach((id) => { + let peerMessageIDs = messageIDs + if (messageIDs.length > constants.GossipsubMaxIHaveLength) { + // shuffle and slice message IDs per peer so that we emit a different set for each peer + // we have enough reduncancy in the system that this will significantly increase the message + // coverage when we do truncate + peerMessageIDs = shuffle(peerMessageIDs.slice()).slice(0, constants.GossipsubMaxIHaveLength) + } + this.pushGossip(id, { + topicID: topic, + messageIDs: peerMessageIDs + }) + }) + } + + /** + * Flush gossip and control messages + */ + private flush (): void { + // send gossip first, which will also piggyback control + for (const [peer, ihave] of this.gossip.entries()) { + this.gossip.delete(peer) + this.sendRpc(peer, createGossipRpc([], { ihave })) + } + // send the remaining control messages + for (const [peer, control] of this.control.entries()) { + this.control.delete(peer) + const out = createGossipRpc([], { graft: control.graft, prune: control.prune }) + this.sendRpc(peer, out) + } + } + + /** + * Adds new IHAVE messages to pending gossip + */ + private pushGossip (id: PeerIdStr, controlIHaveMsgs: RPC.ControlIHave): void { + this.log('Add gossip to %s', id) + const gossip = this.gossip.get(id) ?? [] + this.gossip.set(id, gossip.concat(controlIHaveMsgs)) + } + + /** + * Make a PRUNE control message for a peer in a topic + */ + private async makePrune ( + id: PeerIdStr, + topic: string, + doPX: boolean, + onUnsubscribe: boolean + ): Promise { + this.score.prune(id, topic) + if (this.streamsOutbound.get(id)?.protocol === constants.GossipsubIDv10) { + // Gossipsub v1.0 -- no backoff, the peer won't be able to parse it anyway + return { + topicID: topic, + peers: [] + } + } + // backoff is measured in seconds + // GossipsubPruneBackoff and GossipsubUnsubscribeBackoff are measured in milliseconds + // The protobuf has it as a uint64 + const backoffMs = onUnsubscribe ? this.opts.unsubcribeBackoff : this.opts.pruneBackoff + const backoff = backoffMs / 1000 + this.doAddBackoff(id, topic, backoffMs) + + if (!doPX) { + return { + topicID: topic, + peers: [], + backoff + } + } + + // select peers for Peer eXchange + const peers = this.getRandomGossipPeers(topic, this.opts.prunePeers, (xid) => { + return xid !== id && this.score.score(xid) >= 0 + }) + const px = await Promise.all( + Array.from(peers).map(async (peerId) => { + // see if we have a signed record to send back; if we don't, just send + // the peer ID and let the pruned peer find them in the DHT -- we can't trust + // unsigned address records through PX anyways + // Finding signed records in the DHT is not supported at the time of writing in js-libp2p + const id = this.peers.get(peerId) ?? peerIdFromString(peerId) + let peerInfo: Peer | undefined + + try { + peerInfo = await this.components.peerStore.get(id) + } catch (err: any) { + if (err.name !== 'NotFoundError') { + throw err + } + } + + return { + peerID: id.toMultihash().bytes, + signedPeerRecord: peerInfo?.peerRecordEnvelope + } + }) + ) + return { + topicID: topic, + peers: px, + backoff + } + } + + private readonly runHeartbeat = (): void => { + const timer = this.metrics?.heartbeatDuration.startTimer() + + this.heartbeat() + .catch((err) => { + this.log('Error running heartbeat', err) + }) + .finally(() => { + if (timer != null) { + timer() + } + + // Schedule the next run if still in started status + if (this.status.code === GossipStatusCode.started) { + // Clear previous timeout before overwriting `status.heartbeatTimeout`, it should be completed tho. + clearTimeout(this.status.heartbeatTimeout) + + // NodeJS setInterval function is innexact, calls drift by a few miliseconds on each call. + // To run the heartbeat precisely setTimeout() must be used recomputing the delay on every loop. + let msToNextHeartbeat = + this.opts.heartbeatInterval - ((Date.now() - this.status.hearbeatStartMs) % this.opts.heartbeatInterval) + + // If too close to next heartbeat, skip one + if (msToNextHeartbeat < this.opts.heartbeatInterval * 0.25) { + msToNextHeartbeat += this.opts.heartbeatInterval + this.metrics?.heartbeatSkipped.inc() + } + + this.status.heartbeatTimeout = setTimeout(this.runHeartbeat, msToNextHeartbeat) + } + }) + } + + /** + * Maintains the mesh and fanout maps in gossipsub. + */ + public async heartbeat (): Promise { + const { D, Dlo, Dhi, Dscore, Dout, fanoutTTL } = this.opts + + this.heartbeatTicks++ + + // cache scores throught the heartbeat + const scores = new Map() + const getScore = (id: string): number => { + let s = scores.get(id) + if (s === undefined) { + s = this.score.score(id) + scores.set(id, s) + } + return s + } + + // peer id => topic[] + const tograft = new Map() + // peer id => topic[] + const toprune = new Map() + // peer id => don't px + const noPX = new Map() + + // clean up expired backoffs + this.clearBackoff() + + // clean up peerhave/iasked counters + this.peerhave.clear() + this.metrics?.cacheSize.set({ cache: 'iasked' }, this.iasked.size) + this.iasked.clear() + + // apply IWANT request penalties + this.applyIwantPenalties() + + // clean up IDONTWANT counters + this.idontwantCounts.clear() + + // clean up old tracked IDONTWANTs + for (const idontwants of this.idontwants.values()) { + for (const [msgId, heartbeatTick] of idontwants) { + if (this.heartbeatTicks - heartbeatTick >= this.opts.mcacheLength) { + idontwants.delete(msgId) + } + } + } + + // ensure direct peers are connected + if (this.heartbeatTicks % this.opts.directConnectTicks === 0) { + // we only do this every few ticks to allow pending connections to complete and account for restarts/downtime + await this.directConnect() + } + + // EXTRA: Prune caches + this.fastMsgIdCache?.prune() + this.seenCache.prune() + this.gossipTracer.prune() + this.publishedMessageIds.prune() + + /** + * Instead of calling getRandomGossipPeers multiple times to: + * + get more mesh peers + * + more outbound peers + * + oppportunistic grafting + * + emitGossip + * + * We want to loop through the topic peers only a single time and prepare gossip peers for all topics to improve the performance + */ + + const peersToGossipByTopic = new Map>() + // maintain the mesh for topics we have joined + // eslint-disable-next-line complexity + this.mesh.forEach((peers, topic) => { + const peersInTopic = this.topics.get(topic) + const candidateMeshPeers = new Set() + const peersToGossip = new Set() + peersToGossipByTopic.set(topic, peersToGossip) + + if (peersInTopic != null) { + const shuffledPeers = shuffle(Array.from(peersInTopic)) + const backoff = this.backoff.get(topic) + for (const id of shuffledPeers) { + const peerStreams = this.streamsOutbound.get(id) + if ( + (peerStreams != null) && + this.protocols.includes(peerStreams.protocol) && + !peers.has(id) && + !this.direct.has(id) + ) { + const score = getScore(id) + if (backoff?.has(id) !== true && score >= 0) { candidateMeshPeers.add(id) } + // instead of having to find gossip peers after heartbeat which require another loop + // we prepare peers to gossip in a topic within heartbeat to improve performance + if (score >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) } + } + } + } + + // prune/graft helper functions (defined per topic) + const prunePeer = (id: PeerIdStr, reason: ChurnReason): void => { + this.log('HEARTBEAT: Remove mesh link to %s in %s', id, topic) + // no need to update peer score here as we do it in makePrune + // add prune backoff record + this.addBackoff(id, topic) + // remove peer from mesh + peers.delete(id) + // after pruning a peer from mesh, we want to gossip topic to it if its score meet the gossip threshold + if (getScore(id) >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) } + this.metrics?.onRemoveFromMesh(topic, reason, 1) + // add to toprune + const topics = toprune.get(id) + if (topics == null) { + toprune.set(id, [topic]) + } else { + topics.push(topic) + } + } + + const graftPeer = (id: PeerIdStr, reason: InclusionReason): void => { + this.log('HEARTBEAT: Add mesh link to %s in %s', id, topic) + // update peer score + this.score.graft(id, topic) + // add peer to mesh + peers.add(id) + // when we add a new mesh peer, we don't want to gossip messages to it + peersToGossip.delete(id) + this.metrics?.onAddToMesh(topic, reason, 1) + // add to tograft + const topics = tograft.get(id) + if (topics == null) { + tograft.set(id, [topic]) + } else { + topics.push(topic) + } + } + + // drop all peers with negative score, without PX + peers.forEach((id) => { + const score = getScore(id) + + // Record the score + + if (score < 0) { + this.log('HEARTBEAT: Prune peer %s with negative score: score=%d, topic=%s', id, score, topic) + prunePeer(id, ChurnReason.BadScore) + noPX.set(id, true) + } + }) + + // do we have enough peers? + if (peers.size < Dlo) { + const ineed = D - peers.size + // slice up to first `ineed` items and remove them from candidateMeshPeers + // same to `const newMeshPeers = candidateMeshPeers.slice(0, ineed)` + const newMeshPeers = removeFirstNItemsFromSet(candidateMeshPeers, ineed) + + newMeshPeers.forEach((p) => { + graftPeer(p, InclusionReason.NotEnough) + }) + } + + // do we have to many peers? + if (peers.size > Dhi) { + let peersArray = Array.from(peers) + // sort by score + peersArray.sort((a, b) => getScore(b) - getScore(a)) + // We keep the first D_score peers by score and the remaining up to D randomly + // under the constraint that we keep D_out peers in the mesh (if we have that many) + peersArray = peersArray.slice(0, Dscore).concat(shuffle(peersArray.slice(Dscore))) + + // count the outbound peers we are keeping + let outbound = 0 + peersArray.slice(0, D).forEach((p) => { + if (this.outbound.get(p) ?? false) { + outbound++ + } + }) + + // if it's less than D_out, bubble up some outbound peers from the random selection + if (outbound < Dout) { + const rotate = (i: number): void => { + // rotate the peersArray to the right and put the ith peer in the front + const p = peersArray[i] + for (let j = i; j > 0; j--) { + peersArray[j] = peersArray[j - 1] + } + peersArray[0] = p + } + + // first bubble up all outbound peers already in the selection to the front + if (outbound > 0) { + let ihave = outbound + for (let i = 1; i < D && ihave > 0; i++) { + // eslint-disable-next-line max-depth + if (this.outbound.get(peersArray[i]) ?? false) { + rotate(i) + ihave-- + } + } + } + + // now bubble up enough outbound peers outside the selection to the front + let ineed = D - outbound + for (let i = D; i < peersArray.length && ineed > 0; i++) { + if (this.outbound.get(peersArray[i]) ?? false) { + rotate(i) + ineed-- + } + } + } + + // prune the excess peers + peersArray.slice(D).forEach((p) => { + prunePeer(p, ChurnReason.Excess) + }) + } + + // do we have enough outbound peers? + if (peers.size >= Dlo) { + // count the outbound peers we have + let outbound = 0 + peers.forEach((p) => { + if (this.outbound.get(p) ?? false) { + outbound++ + } + }) + + // if it's less than D_out, select some peers with outbound connections and graft them + if (outbound < Dout) { + const ineed = Dout - outbound + const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => this.outbound.get(id) === true) + + newMeshPeers.forEach((p) => { + graftPeer(p, InclusionReason.Outbound) + }) + } + } + + // should we try to improve the mesh with opportunistic grafting? + if (this.heartbeatTicks % this.opts.opportunisticGraftTicks === 0 && peers.size > 1) { + // Opportunistic grafting works as follows: we check the median score of peers in the + // mesh; if this score is below the opportunisticGraftThreshold, we select a few peers at + // random with score over the median. + // The intention is to (slowly) improve an underperforming mesh by introducing good + // scoring peers that may have been gossiping at us. This allows us to get out of sticky + // situations where we are stuck with poor peers and also recover from churn of good peers. + + // now compute the median peer score in the mesh + const peersList = Array.from(peers).sort((a, b) => getScore(a) - getScore(b)) + const medianIndex = Math.floor(peers.size / 2) + const medianScore = getScore(peersList[medianIndex]) + + // if the median score is below the threshold, select a better peer (if any) and GRAFT + if (medianScore < this.opts.scoreThresholds.opportunisticGraftThreshold) { + const ineed = this.opts.opportunisticGraftPeers + const newMeshPeers = removeItemsFromSet(candidateMeshPeers, ineed, (id) => getScore(id) > medianScore) + for (const id of newMeshPeers) { + this.log('HEARTBEAT: Opportunistically graft peer %s on topic %s', id, topic) + graftPeer(id, InclusionReason.Opportunistic) + } + } + } + }) + + // expire fanout for topics we haven't published to in a while + const now = Date.now() + this.fanoutLastpub.forEach((lastpb, topic) => { + if (lastpb + fanoutTTL < now) { + this.fanout.delete(topic) + this.fanoutLastpub.delete(topic) + } + }) + + // maintain our fanout for topics we are publishing but we have not joined + this.fanout.forEach((fanoutPeers, topic) => { + // checks whether our peers are still in the topic and have a score above the publish threshold + const topicPeers = this.topics.get(topic) + fanoutPeers.forEach((id) => { + if (!(topicPeers?.has(id) ?? false) || getScore(id) < this.opts.scoreThresholds.publishThreshold) { + fanoutPeers.delete(id) + } + }) + + const peersInTopic = this.topics.get(topic) + const candidateFanoutPeers = [] + // the fanout map contains topics to which we are not subscribed. + const peersToGossip = new Set() + peersToGossipByTopic.set(topic, peersToGossip) + + if (peersInTopic != null) { + const shuffledPeers = shuffle(Array.from(peersInTopic)) + for (const id of shuffledPeers) { + const peerStreams = this.streamsOutbound.get(id) + if ( + (peerStreams != null) && + this.protocols.includes(peerStreams.protocol) && + !fanoutPeers.has(id) && + !this.direct.has(id) + ) { + const score = getScore(id) + if (score >= this.opts.scoreThresholds.publishThreshold) { candidateFanoutPeers.push(id) } + // instead of having to find gossip peers after heartbeat which require another loop + // we prepare peers to gossip in a topic within heartbeat to improve performance + if (score >= this.opts.scoreThresholds.gossipThreshold) { peersToGossip.add(id) } + } + } + } + + // do we need more peers? + if (fanoutPeers.size < D) { + const ineed = D - fanoutPeers.size + candidateFanoutPeers.slice(0, ineed).forEach((id) => { + fanoutPeers.add(id) + peersToGossip?.delete(id) + }) + } + }) + + this.emitGossip(peersToGossipByTopic) + + // send coalesced GRAFT/PRUNE messages (will piggyback gossip) + await this.sendGraftPrune(tograft, toprune, noPX) + + // flush pending gossip that wasn't piggybacked above + this.flush() + + // advance the message history window + this.mcache.shift() + + this.dispatchEvent(new CustomEvent('gossipsub:heartbeat')) + } + + /** + * Given a topic, returns up to count peers subscribed to that topic + * that pass an optional filter function + * + * @param topic + * @param count + * @param filter - a function to filter acceptable peers + */ + private getRandomGossipPeers ( + topic: string, + count: number, + filter: (id: string) => boolean = () => true + ): Set { + const peersInTopic = this.topics.get(topic) + + if (peersInTopic == null) { + return new Set() + } + + // Adds all peers using our protocol + // that also pass the filter function + let peers: string[] = [] + peersInTopic.forEach((id) => { + const peerStreams = this.streamsOutbound.get(id) + if (peerStreams == null) { + return + } + if (this.protocols.includes(peerStreams.protocol) && filter(id)) { + peers.push(id) + } + }) + + // Pseudo-randomly shuffles peers + peers = shuffle(peers) + if (count > 0 && peers.length > count) { + peers = peers.slice(0, count) + } + + return new Set(peers) + } + + private onScrapeMetrics (metrics: Metrics): void { + /* Data structure sizes */ + metrics.mcacheSize.set(this.mcache.size) + metrics.mcacheNotValidatedCount.set(this.mcache.notValidatedCount) + // Arbitrary size + metrics.cacheSize.set({ cache: 'direct' }, this.direct.size) + metrics.cacheSize.set({ cache: 'seenCache' }, this.seenCache.size) + metrics.cacheSize.set({ cache: 'fastMsgIdCache' }, this.fastMsgIdCache?.size ?? 0) + metrics.cacheSize.set({ cache: 'publishedMessageIds' }, this.publishedMessageIds.size) + metrics.cacheSize.set({ cache: 'mcache' }, this.mcache.size) + metrics.cacheSize.set({ cache: 'score' }, this.score.size) + metrics.cacheSize.set({ cache: 'gossipTracer.promises' }, this.gossipTracer.size) + metrics.cacheSize.set({ cache: 'gossipTracer.requests' }, this.gossipTracer.requestMsByMsgSize) + // Bounded by topic + metrics.cacheSize.set({ cache: 'topics' }, this.topics.size) + metrics.cacheSize.set({ cache: 'subscriptions' }, this.subscriptions.size) + metrics.cacheSize.set({ cache: 'mesh' }, this.mesh.size) + metrics.cacheSize.set({ cache: 'fanout' }, this.fanout.size) + // Bounded by peer + metrics.cacheSize.set({ cache: 'peers' }, this.peers.size) + metrics.cacheSize.set({ cache: 'streamsOutbound' }, this.streamsOutbound.size) + metrics.cacheSize.set({ cache: 'streamsInbound' }, this.streamsInbound.size) + metrics.cacheSize.set({ cache: 'acceptFromWhitelist' }, this.acceptFromWhitelist.size) + metrics.cacheSize.set({ cache: 'gossip' }, this.gossip.size) + metrics.cacheSize.set({ cache: 'control' }, this.control.size) + metrics.cacheSize.set({ cache: 'peerhave' }, this.peerhave.size) + metrics.cacheSize.set({ cache: 'outbound' }, this.outbound.size) + + // 2D nested data structure + let backoffSize = 0 + const now = Date.now() + metrics.connectedPeersBackoffSec.reset() + for (const backoff of this.backoff.values()) { + backoffSize += backoff.size + for (const [peer, expiredMs] of backoff.entries()) { + if (this.peers.has(peer)) { + metrics.connectedPeersBackoffSec.observe(Math.max(0, expiredMs - now) / 1000) + } + } + } + metrics.cacheSize.set({ cache: 'backoff' }, backoffSize) + + let idontwantsCount = 0 + for (const idontwant of this.idontwants.values()) { + idontwantsCount += idontwant.size + } + metrics.cacheSize.set({ cache: 'idontwants' }, idontwantsCount) + + // Peer counts + + for (const [topicStr, peers] of this.topics) { + metrics.topicPeersCount.set({ topicStr }, peers.size) + } + + for (const [topicStr, peers] of this.mesh) { + metrics.meshPeerCounts.set({ topicStr }, peers.size) + } + + // Peer scores + + const scores: number[] = [] + const scoreByPeer = new Map() + metrics.behaviourPenalty.reset() + + for (const peerIdStr of this.peers.keys()) { + const score = this.score.score(peerIdStr) + scores.push(score) + scoreByPeer.set(peerIdStr, score) + metrics.behaviourPenalty.observe(this.score.peerStats.get(peerIdStr)?.behaviourPenalty ?? 0) + } + + metrics.registerScores(scores, this.opts.scoreThresholds) + + // Breakdown score per mesh topicLabel + + metrics.registerScorePerMesh(this.mesh, scoreByPeer) + + // Breakdown on each score weight + + const sw = computeAllPeersScoreWeights( + this.peers.keys(), + this.score.peerStats, + this.score.params, + this.score.peerIPs, + metrics.topicStrToLabel + ) + + metrics.registerScoreWeights(sw) + } + + private readonly tagMeshPeer = (evt: CustomEvent): void => { + const { peerId, topic } = evt.detail + this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), { + tags: { + [topic]: { + value: 100 + } + } + }).catch((err) => { this.log.error('Error tagging peer %s with topic %s', peerId, topic, err) }) + } + + private readonly untagMeshPeer = (evt: CustomEvent): void => { + const { peerId, topic } = evt.detail + this.components.peerStore.merge(this.peers.get(peerId) ?? peerIdFromString(peerId), { + tags: { + [topic]: undefined + } + }).catch((err) => { this.log.error('Error untagging peer %s with topic %s', peerId, topic, err) }) + } +} diff --git a/packages/gossipsub/src/index.ts b/packages/gossipsub/src/index.ts new file mode 100644 index 0000000000..70dc4d7344 --- /dev/null +++ b/packages/gossipsub/src/index.ts @@ -0,0 +1,404 @@ +import * as constants from './constants.js' +import { GossipSub as GossipSubClass } from './gossipsub.ts' +import { MessageCache } from './message-cache.js' +import type { GossipsubOptsSpec } from './config.js' +import type { DecodeRPCLimits } from './message/decodeRpc.js' +import type { MetricsRegister, TopicStrToLabel } from './metrics.js' +import type { PeerScoreParams, PeerScoreThresholds } from './score/index.js' +import type { MsgIdFn, MsgIdStr, FastMsgIdFn, AddrInfo, DataTransform, MsgIdToStrFn } from './types.js' +import type { + PeerId, PeerStore, + ComponentLogger, + PrivateKey, + PublicKey, + TypedEventTarget, + MessageStreamDirection +} from '@libp2p/interface' +import type { ConnectionManager, Registrar } from '@libp2p/interface-internal' + +/** + * On the producing side: + * - Build messages with the signature, key (from may be enough for certain inlineable public key types), from and seqno fields. + * + * On the consuming side: + * - Enforce the fields to be present, reject otherwise. + * - Propagate only if the fields are valid and signature can be verified, reject otherwise. + */ +export const StrictSign = 'StrictSign' + +/** + * On the producing side: + * - Build messages without the signature, key, from and seqno fields. + * - The corresponding protobuf key-value pairs are absent from the marshaled message, not just empty. + * + * On the consuming side: + * - Enforce the fields to be absent, reject otherwise. + * - Propagate only if the fields are absent, reject otherwise. + * - A message_id function will not be able to use the above fields, and should instead rely on the data field. A commonplace strategy is to calculate a hash. + */ +export const StrictNoSign = 'StrictNoSign' + +export type SignaturePolicy = typeof StrictSign | typeof StrictNoSign + +export enum TopicValidatorResult { + /** + * The message is considered valid, and it should be delivered and forwarded to the network + */ + Accept = 'accept', + /** + * The message is neither delivered nor forwarded to the network + */ + Ignore = 'ignore', + /** + * The message is considered invalid, and it should be rejected + */ + Reject = 'reject' +} + +export interface SignedMessage { + type: 'signed' + from: PeerId + topic: string + data: Uint8Array + sequenceNumber: bigint + signature: Uint8Array + key: PublicKey +} + +export interface UnsignedMessage { + type: 'unsigned' + topic: string + data: Uint8Array +} + +export type Message = SignedMessage | UnsignedMessage + +export interface PublishResult { + recipients: PeerId[] +} + +export interface Subscription { + topic: string + subscribe: boolean +} + +export interface SubscriptionChangeData { + peerId: PeerId + subscriptions: Subscription[] +} + +export interface TopicValidatorFn { + (peer: PeerId, message: Message): TopicValidatorResult | Promise +} + +export const multicodec: string = constants.GossipsubIDv12 + +export interface GossipsubOpts extends GossipsubOptsSpec { + /** if dial should fallback to floodsub */ + fallbackToFloodsub: boolean + /** if self-published messages should be sent to all peers */ + floodPublish: boolean + /** serialize message once and send to all peers without control messages */ + batchPublish: boolean + /** whether PX is enabled; this should be enabled in bootstrappers and other well connected/trusted nodes. */ + doPX: boolean + /** peers with which we will maintain direct connections */ + directPeers: AddrInfo[] + /** + * If true will not forward messages to mesh peers until reportMessageValidationResult() is called. + * Messages will be cached in mcache for some time after which they are evicted. Calling + * reportMessageValidationResult() after the message is dropped from mcache won't forward the message. + */ + asyncValidation: boolean + /** + * Do not throw `PublishError.NoPeersSubscribedToTopic` error if there are no + * peers listening on the topic. + * + * N.B. if you sent this option to true, and you publish a message on a topic + * with no peers listening on that topic, no other network node will ever + * receive the message. + */ + allowPublishToZeroTopicPeers: boolean + /** Do not throw `PublishError.Duplicate` if publishing duplicate messages */ + ignoreDuplicatePublishError: boolean + /** For a single stream, await processing each RPC before processing the next */ + awaitRpcHandler: boolean + /** For a single RPC, await processing each message before processing the next */ + awaitRpcMessageHandler: boolean + + /** message id function */ + msgIdFn: MsgIdFn + /** fast message id function */ + fastMsgIdFn: FastMsgIdFn + /** Uint8Array message id to string function */ + msgIdToStrFn: MsgIdToStrFn + /** override the default MessageCache */ + messageCache: MessageCache + /** peer score parameters */ + scoreParams: Partial + /** peer score thresholds */ + scoreThresholds: Partial + /** customize GossipsubIWantFollowupTime in order not to apply IWANT penalties */ + gossipsubIWantFollowupMs: number + + /** override constants for fine tuning */ + prunePeers?: number + pruneBackoff?: number + unsubcribeBackoff?: number + graftFloodThreshold?: number + opportunisticGraftPeers?: number + opportunisticGraftTicks?: number + directConnectTicks?: number + + dataTransform?: DataTransform + metricsRegister?: MetricsRegister | null + metricsTopicStrToLabel?: TopicStrToLabel + + // Debug + /** Prefix tag for debug logs */ + debugName?: string + + /** + * Specify the maximum number of inbound gossipsub protocol + * streams that are allowed to be open concurrently + */ + maxInboundStreams?: number + + /** + * Specify the maximum number of outbound gossipsub protocol + * streams that are allowed to be open concurrently + */ + maxOutboundStreams?: number + + /** + * Pass true to run on limited connections - data or time-limited + * connections that may be closed at any time such as circuit relay + * connections. + * + * @default false + */ + runOnLimitedConnection?: boolean + + /** + * Specify max buffer size in bytes for OutboundStream. + * If full it will throw and reject sending any more data. + */ + maxOutboundBufferSize?: number + + /** + * Specify max size to skip decoding messages whose data + * section exceeds this size. + * + */ + maxInboundDataLength?: number + + /** + * If provided, only allow topics in this list + */ + allowedTopics?: string[] | Set + + /** + * Limits to bound protobuf decoding + */ + decodeRpcLimits?: DecodeRPCLimits + + /** + * If true, will utilize the libp2p connection manager tagging system to prune/graft connections to peers, defaults to true + */ + tagMeshPeers: boolean + + /** + * Specify what percent of peers to send gossip to. If the percent results in + * a number smaller than `Dlazy`, `Dlazy` will be used instead. + * + * It should be a number between 0 and 1, with a reasonable default of 0.25 + */ + gossipFactor: number + + /** + * The minimum message size in bytes to be considered for sending IDONTWANT messages + * + * @default 512 + */ + idontwantMinDataSize?: number + + /** + * The maximum number of IDONTWANT messages per heartbeat per peer + * + * @default 512 + */ + idontwantMaxMessages?: number + + /** + * Override the protocol registered with the registrar + * + * @default ['/floodsub/1.0.0'] + */ + protocols?: string[] + + /** + * defines how signatures should be handled + */ + globalSignaturePolicy?: SignaturePolicy + + /** + * if can relay messages not subscribed + */ + canRelayMessage?: boolean + + /** + * if publish should emit to self, if subscribed + */ + emitSelf?: boolean + + /** + * handle this many incoming pubsub messages concurrently + */ + messageProcessingConcurrency?: number +} + +export interface GossipsubMessage { + propagationSource: PeerId + msgId: MsgIdStr + msg: Message +} + +export interface MeshPeer { + peerId: string + topic: string + direction: MessageStreamDirection +} + +export interface GossipSubEvents { + 'subscription-change': CustomEvent + message: CustomEvent + 'gossipsub:heartbeat': CustomEvent + 'gossipsub:message': CustomEvent + 'gossipsub:graft': CustomEvent + 'gossipsub:prune': CustomEvent +} + +export interface GossipSubComponents { + privateKey: PrivateKey + peerId: PeerId + peerStore: PeerStore + registrar: Registrar + connectionManager: ConnectionManager + logger: ComponentLogger +} + +export interface GossipSub extends TypedEventTarget { + /** + * The global signature policy controls whether or not we sill send and receive + * signed or unsigned messages. + * + * Signed messages prevent spoofing message senders and should be preferred to + * using unsigned messages. + */ + globalSignaturePolicy: typeof StrictSign | typeof StrictNoSign + + /** + * A list of multicodecs that contain the pubsub protocol name. + */ + protocols: string[] + + /** + * Pubsub routers support message validators per topic, which will validate the message + * before its propagations. They are stored in a map where keys are the topic name and + * values are the validators. + * + * @example + * + * ```TypeScript + * const topic = 'topic' + * const validateMessage = (msgTopic, msg) => { + * const input = uint8ArrayToString(msg.data) + * const validInputs = ['a', 'b', 'c'] + * + * if (!validInputs.includes(input)) { + * throw new Error('no valid input received') + * } + * } + * libp2p.pubsub.topicValidators.set(topic, validateMessage) + * ``` + */ + topicValidators: Map + + getPeers(): PeerId[] + + /** + * Gets a list of topics the node is subscribed to. + * + * ```TypeScript + * const topics = libp2p.pubsub.getTopics() + * ``` + */ + getTopics(): string[] + + /** + * Subscribes to a pubsub topic. + * + * @example + * + * ```TypeScript + * const topic = 'topic' + * const handler = (msg) => { + * if (msg.topic === topic) { + * // msg.data - pubsub data received + * } + * } + * + * libp2p.pubsub.addEventListener('message', handler) + * libp2p.pubsub.subscribe(topic) + * ``` + */ + subscribe(topic: string): void + + /** + * Unsubscribes from a pubsub topic. + * + * @example + * + * ```TypeScript + * const topic = 'topic' + * const handler = (msg) => { + * // msg.data - pubsub data received + * } + * + * libp2p.pubsub.removeEventListener(topic handler) + * libp2p.pubsub.unsubscribe(topic) + * ``` + */ + unsubscribe(topic: string): void + + /** + * Gets a list of the PeerIds that are subscribed to one topic. + * + * @example + * + * ```TypeScript + * const peerIds = libp2p.pubsub.getSubscribers(topic) + * ``` + */ + getSubscribers(topic: string): PeerId[] + + /** + * Publishes messages to the given topic. + * + * @example + * + * ```TypeScript + * const topic = 'topic' + * const data = uint8ArrayFromString('data') + * + * await libp2p.pubsub.publish(topic, data) + * ``` + */ + publish(topic: string, data?: Uint8Array): Promise +} + +export function gossipsub ( + init: Partial = {} +): (components: GossipSubComponents) => GossipSub { + return (components: GossipSubComponents) => new GossipSubClass(components, init) +} diff --git a/packages/gossipsub/src/message-cache.ts b/packages/gossipsub/src/message-cache.ts new file mode 100644 index 0000000000..a382373ef6 --- /dev/null +++ b/packages/gossipsub/src/message-cache.ts @@ -0,0 +1,196 @@ +import type { RPC } from './message/rpc.js' +import type { MessageId, MsgIdStr, PeerIdStr, TopicStr, MsgIdToStrFn } from './types.js' + +export type CacheEntry = MessageId & { + topic: TopicStr +} + +export type MessageCacheRecord = Pick + +interface MessageCacheEntry { + message: RPC.Message + /** + * Tracks if the message has been validated by the app layer and thus forwarded + */ + validated: boolean + /** + * Tracks peers that sent this message before it has been validated by the app layer + */ + originatingPeers: Set + /** + * For every message and peer the number of times this peer asked for the message + */ + iwantCounts: Map +} + +export class MessageCache { + msgs = new Map() + + msgIdToStrFn: MsgIdToStrFn + + history: CacheEntry[][] = [] + + /** Track with accounting of messages in the mcache that are not yet validated */ + notValidatedCount = 0 + + /** + * Holds history of messages in timebounded history arrays + */ + constructor ( + /** + * The number of indices in the cache history used for gossiping. That means that a message + * won't get gossiped anymore when shift got called `gossip` many times after inserting the + * message in the cache. + */ + private readonly gossip: number, + historyCapacity: number, + msgIdToStrFn: MsgIdToStrFn + ) { + this.msgIdToStrFn = msgIdToStrFn + for (let i = 0; i < historyCapacity; i++) { + this.history[i] = [] + } + } + + get size (): number { + return this.msgs.size + } + + /** + * Adds a message to the current window and the cache + * Returns true if the message is not known and is inserted in the cache + */ + put (messageId: MessageId, msg: RPC.Message, validated = false): boolean { + const { msgIdStr } = messageId + // Don't add duplicate entries to the cache. + if (this.msgs.has(msgIdStr)) { + return false + } + + this.msgs.set(msgIdStr, { + message: msg, + validated, + originatingPeers: new Set(), + iwantCounts: new Map() + }) + + this.history[0].push({ ...messageId, topic: msg.topic }) + + if (!validated) { + this.notValidatedCount++ + } + + return true + } + + observeDuplicate (msgId: MsgIdStr, fromPeerIdStr: PeerIdStr): void { + const entry = this.msgs.get(msgId) + + if ( + (entry != null) && + // if the message is already validated, we don't need to store extra peers sending us + // duplicates as the message has already been forwarded + !entry.validated + ) { + entry.originatingPeers.add(fromPeerIdStr) + } + } + + /** + * Retrieves a message from the cache by its ID, if it is still present + */ + get (msgId: Uint8Array): RPC.Message | undefined { + return this.msgs.get(this.msgIdToStrFn(msgId))?.message + } + + /** + * Increases the iwant count for the given message by one and returns the message together + * with the iwant if the message exists. + */ + getWithIWantCount (msgIdStr: string, p: string): { msg: RPC.Message, count: number } | null { + const msg = this.msgs.get(msgIdStr) + if (msg == null) { + return null + } + + const count = (msg.iwantCounts.get(p) ?? 0) + 1 + msg.iwantCounts.set(p, count) + + return { msg: msg.message, count } + } + + /** + * Retrieves a list of message IDs for a set of topics + */ + getGossipIDs (topics: Set): Map { + const msgIdsByTopic = new Map() + for (let i = 0; i < this.gossip; i++) { + this.history[i].forEach((entry) => { + const msg = this.msgs.get(entry.msgIdStr) + if ((msg?.validated ?? false) && topics.has(entry.topic)) { + let msgIds = msgIdsByTopic.get(entry.topic) + if (msgIds == null) { + msgIds = [] + msgIdsByTopic.set(entry.topic, msgIds) + } + msgIds.push(entry.msgId) + } + }) + } + + return msgIdsByTopic + } + + /** + * Gets a message with msgId and tags it as validated. + * This function also returns the known peers that have sent us this message. This is used to + * prevent us sending redundant messages to peers who have already propagated it. + */ + validate (msgId: MsgIdStr): MessageCacheRecord | null { + const entry = this.msgs.get(msgId) + if (entry == null) { + return null + } + + if (!entry.validated) { + this.notValidatedCount-- + } + + const { message, originatingPeers } = entry + entry.validated = true + // Clear the known peers list (after a message is validated, it is forwarded and we no + // longer need to store the originating peers). + entry.originatingPeers = new Set() + return { message, originatingPeers } + } + + /** + * Shifts the current window, discarding messages older than this.history.length of the cache + */ + shift (): void { + const lastCacheEntries = this.history[this.history.length - 1] + lastCacheEntries.forEach((cacheEntry) => { + const entry = this.msgs.get(cacheEntry.msgIdStr) + if (entry != null) { + this.msgs.delete(cacheEntry.msgIdStr) + if (!entry.validated) { + this.notValidatedCount-- + } + } + }) + + this.history.pop() + this.history.unshift([]) + } + + remove (msgId: MsgIdStr): MessageCacheRecord | null { + const entry = this.msgs.get(msgId) + if (entry == null) { + return null + } + + // Keep the message on the history vector, it will be dropped on a shift() + this.msgs.delete(msgId) + return entry + } +} diff --git a/packages/gossipsub/src/message/decodeRpc.ts b/packages/gossipsub/src/message/decodeRpc.ts new file mode 100644 index 0000000000..7b9ecd99a5 --- /dev/null +++ b/packages/gossipsub/src/message/decodeRpc.ts @@ -0,0 +1,19 @@ +export interface DecodeRPCLimits { + maxSubscriptions: number + maxMessages: number + maxIhaveMessageIDs: number + maxIwantMessageIDs: number + maxIdontwantMessageIDs: number + maxControlMessages: number + maxPeerInfos: number +} + +export const defaultDecodeRpcLimits: DecodeRPCLimits = { + maxSubscriptions: Infinity, + maxMessages: Infinity, + maxIhaveMessageIDs: Infinity, + maxIwantMessageIDs: Infinity, + maxIdontwantMessageIDs: Infinity, + maxControlMessages: Infinity, + maxPeerInfos: Infinity +} diff --git a/packages/gossipsub/src/message/index.ts b/packages/gossipsub/src/message/index.ts new file mode 100644 index 0000000000..d1e0e3604b --- /dev/null +++ b/packages/gossipsub/src/message/index.ts @@ -0,0 +1 @@ +export * from './rpc.js' diff --git a/packages/gossipsub/src/message/rpc.proto b/packages/gossipsub/src/message/rpc.proto new file mode 100644 index 0000000000..efe8fafe37 --- /dev/null +++ b/packages/gossipsub/src/message/rpc.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +message RPC { + repeated SubOpts subscriptions = 1; + repeated Message messages = 2; + optional ControlMessage control = 3; + + message SubOpts { + optional bool subscribe = 1; // subscribe or unsubcribe + optional string topic = 2; + } + + message Message { + optional bytes from = 1; + optional bytes data = 2; + optional bytes seqno = 3; + string topic = 4; + optional bytes signature = 5; + optional bytes key = 6; + } + + message ControlMessage { + repeated ControlIHave ihave = 1; + repeated ControlIWant iwant = 2; + repeated ControlGraft graft = 3; + repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; + } + + message ControlIHave { + optional string topicID = 1; + repeated bytes messageIDs = 2; + } + + message ControlIWant { + repeated bytes messageIDs = 1; + } + + message ControlGraft { + optional string topicID = 1; + } + + message ControlPrune { + optional string topicID = 1; + repeated PeerInfo peers = 2; + optional uint64 backoff = 3 [jstype = JS_NUMBER]; + } + + message PeerInfo { + optional bytes peerID = 1; + optional bytes signedPeerRecord = 2; + } + + message ControlIDontWant { + repeated bytes messageIDs = 1; + } + +} \ No newline at end of file diff --git a/packages/gossipsub/src/message/rpc.ts b/packages/gossipsub/src/message/rpc.ts new file mode 100644 index 0000000000..eb018dd6e1 --- /dev/null +++ b/packages/gossipsub/src/message/rpc.ts @@ -0,0 +1,848 @@ +/* eslint-disable complexity */ + +import { decodeMessage, encodeMessage, MaxLengthError, message } from 'protons-runtime' +import type { Codec, DecodeOptions } from 'protons-runtime' +import type { Uint8ArrayList } from 'uint8arraylist' + +export interface RPC { + subscriptions: RPC.SubOpts[] + messages: RPC.Message[] + control?: RPC.ControlMessage +} + +export namespace RPC { + export interface SubOpts { + subscribe?: boolean + topic?: string + } + + export namespace SubOpts { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.subscribe != null) { + w.uint32(8) + w.bool(obj.subscribe) + } + + if (obj.topic != null) { + w.uint32(18) + w.string(obj.topic) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = {} + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.subscribe = reader.bool() + break + } + case 2: { + obj.topic = reader.string() + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, SubOpts.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): SubOpts => { + return decodeMessage(buf, SubOpts.codec(), opts) + } + } + + export interface Message { + from?: Uint8Array + data?: Uint8Array + seqno?: Uint8Array + topic: string + signature?: Uint8Array + key?: Uint8Array + } + + export namespace Message { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.from != null) { + w.uint32(10) + w.bytes(obj.from) + } + + if (obj.data != null) { + w.uint32(18) + w.bytes(obj.data) + } + + if (obj.seqno != null) { + w.uint32(26) + w.bytes(obj.seqno) + } + + if ((obj.topic != null && obj.topic !== '')) { + w.uint32(34) + w.string(obj.topic) + } + + if (obj.signature != null) { + w.uint32(42) + w.bytes(obj.signature) + } + + if (obj.key != null) { + w.uint32(50) + w.bytes(obj.key) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + topic: '' + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.from = reader.bytes() + break + } + case 2: { + obj.data = reader.bytes() + break + } + case 3: { + obj.seqno = reader.bytes() + break + } + case 4: { + obj.topic = reader.string() + break + } + case 5: { + obj.signature = reader.bytes() + break + } + case 6: { + obj.key = reader.bytes() + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, Message.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): Message => { + return decodeMessage(buf, Message.codec(), opts) + } + } + + export interface ControlMessage { + ihave: RPC.ControlIHave[] + iwant: RPC.ControlIWant[] + graft: RPC.ControlGraft[] + prune: RPC.ControlPrune[] + idontwant: RPC.ControlIDontWant[] + } + + export namespace ControlMessage { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.ihave != null) { + for (const value of obj.ihave) { + w.uint32(10) + RPC.ControlIHave.codec().encode(value, w) + } + } + + if (obj.iwant != null) { + for (const value of obj.iwant) { + w.uint32(18) + RPC.ControlIWant.codec().encode(value, w) + } + } + + if (obj.graft != null) { + for (const value of obj.graft) { + w.uint32(26) + RPC.ControlGraft.codec().encode(value, w) + } + } + + if (obj.prune != null) { + for (const value of obj.prune) { + w.uint32(34) + RPC.ControlPrune.codec().encode(value, w) + } + } + + if (obj.idontwant != null) { + for (const value of obj.idontwant) { + w.uint32(42) + RPC.ControlIDontWant.codec().encode(value, w) + } + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + ihave: [], + iwant: [], + graft: [], + prune: [], + idontwant: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + if (opts.limits?.ihave != null && obj.ihave.length === opts.limits.ihave) { + throw new MaxLengthError('Decode error - map field "ihave" had too many elements') + } + + obj.ihave.push(RPC.ControlIHave.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.ihave$ + })) + break + } + case 2: { + if (opts.limits?.iwant != null && obj.iwant.length === opts.limits.iwant) { + throw new MaxLengthError('Decode error - map field "iwant" had too many elements') + } + + obj.iwant.push(RPC.ControlIWant.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.iwant$ + })) + break + } + case 3: { + if (opts.limits?.graft != null && obj.graft.length === opts.limits.graft) { + throw new MaxLengthError('Decode error - map field "graft" had too many elements') + } + + obj.graft.push(RPC.ControlGraft.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.graft$ + })) + break + } + case 4: { + if (opts.limits?.prune != null && obj.prune.length === opts.limits.prune) { + throw new MaxLengthError('Decode error - map field "prune" had too many elements') + } + + obj.prune.push(RPC.ControlPrune.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.prune$ + })) + break + } + case 5: { + if (opts.limits?.idontwant != null && obj.idontwant.length === opts.limits.idontwant) { + throw new MaxLengthError('Decode error - map field "idontwant" had too many elements') + } + + obj.idontwant.push(RPC.ControlIDontWant.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.idontwant$ + })) + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlMessage.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlMessage => { + return decodeMessage(buf, ControlMessage.codec(), opts) + } + } + + export interface ControlIHave { + topicID?: string + messageIDs: Uint8Array[] + } + + export namespace ControlIHave { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.topicID != null) { + w.uint32(10) + w.string(obj.topicID) + } + + if (obj.messageIDs != null) { + for (const value of obj.messageIDs) { + w.uint32(18) + w.bytes(value) + } + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + messageIDs: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.topicID = reader.string() + break + } + case 2: { + if (opts.limits?.messageIDs != null && obj.messageIDs.length === opts.limits.messageIDs) { + throw new MaxLengthError('Decode error - map field "messageIDs" had too many elements') + } + + obj.messageIDs.push(reader.bytes()) + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlIHave.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlIHave => { + return decodeMessage(buf, ControlIHave.codec(), opts) + } + } + + export interface ControlIWant { + messageIDs: Uint8Array[] + } + + export namespace ControlIWant { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.messageIDs != null) { + for (const value of obj.messageIDs) { + w.uint32(10) + w.bytes(value) + } + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + messageIDs: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + if (opts.limits?.messageIDs != null && obj.messageIDs.length === opts.limits.messageIDs) { + throw new MaxLengthError('Decode error - map field "messageIDs" had too many elements') + } + + obj.messageIDs.push(reader.bytes()) + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlIWant.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlIWant => { + return decodeMessage(buf, ControlIWant.codec(), opts) + } + } + + export interface ControlGraft { + topicID?: string + } + + export namespace ControlGraft { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.topicID != null) { + w.uint32(10) + w.string(obj.topicID) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = {} + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.topicID = reader.string() + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlGraft.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlGraft => { + return decodeMessage(buf, ControlGraft.codec(), opts) + } + } + + export interface ControlPrune { + topicID?: string + peers: RPC.PeerInfo[] + backoff?: number + } + + export namespace ControlPrune { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.topicID != null) { + w.uint32(10) + w.string(obj.topicID) + } + + if (obj.peers != null) { + for (const value of obj.peers) { + w.uint32(18) + RPC.PeerInfo.codec().encode(value, w) + } + } + + if (obj.backoff != null) { + w.uint32(24) + w.uint64Number(obj.backoff) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + peers: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.topicID = reader.string() + break + } + case 2: { + if (opts.limits?.peers != null && obj.peers.length === opts.limits.peers) { + throw new MaxLengthError('Decode error - map field "peers" had too many elements') + } + + obj.peers.push(RPC.PeerInfo.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.peers$ + })) + break + } + case 3: { + obj.backoff = reader.uint64Number() + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlPrune.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlPrune => { + return decodeMessage(buf, ControlPrune.codec(), opts) + } + } + + export interface PeerInfo { + peerID?: Uint8Array + signedPeerRecord?: Uint8Array + } + + export namespace PeerInfo { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.peerID != null) { + w.uint32(10) + w.bytes(obj.peerID) + } + + if (obj.signedPeerRecord != null) { + w.uint32(18) + w.bytes(obj.signedPeerRecord) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = {} + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + obj.peerID = reader.bytes() + break + } + case 2: { + obj.signedPeerRecord = reader.bytes() + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, PeerInfo.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): PeerInfo => { + return decodeMessage(buf, PeerInfo.codec(), opts) + } + } + + export interface ControlIDontWant { + messageIDs: Uint8Array[] + } + + export namespace ControlIDontWant { + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.messageIDs != null) { + for (const value of obj.messageIDs) { + w.uint32(10) + w.bytes(value) + } + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + messageIDs: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + if (opts.limits?.messageIDs != null && obj.messageIDs.length === opts.limits.messageIDs) { + throw new MaxLengthError('Decode error - map field "messageIDs" had too many elements') + } + + obj.messageIDs.push(reader.bytes()) + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, ControlIDontWant.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlIDontWant => { + return decodeMessage(buf, ControlIDontWant.codec(), opts) + } + } + + let _codec: Codec + + export const codec = (): Codec => { + if (_codec == null) { + _codec = message((obj, w, opts = {}) => { + if (opts.lengthDelimited !== false) { + w.fork() + } + + if (obj.subscriptions != null) { + for (const value of obj.subscriptions) { + w.uint32(10) + RPC.SubOpts.codec().encode(value, w) + } + } + + if (obj.messages != null) { + for (const value of obj.messages) { + w.uint32(18) + RPC.Message.codec().encode(value, w) + } + } + + if (obj.control != null) { + w.uint32(26) + RPC.ControlMessage.codec().encode(obj.control, w) + } + + if (opts.lengthDelimited !== false) { + w.ldelim() + } + }, (reader, length, opts = {}) => { + const obj: any = { + subscriptions: [], + messages: [] + } + + const end = length == null ? reader.len : reader.pos + length + + while (reader.pos < end) { + const tag = reader.uint32() + + switch (tag >>> 3) { + case 1: { + if (opts.limits?.subscriptions != null && obj.subscriptions.length === opts.limits.subscriptions) { + throw new MaxLengthError('Decode error - map field "subscriptions" had too many elements') + } + + obj.subscriptions.push(RPC.SubOpts.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.subscriptions$ + })) + break + } + case 2: { + if (opts.limits?.messages != null && obj.messages.length === opts.limits.messages) { + throw new MaxLengthError('Decode error - map field "messages" had too many elements') + } + + obj.messages.push(RPC.Message.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.messages$ + })) + break + } + case 3: { + obj.control = RPC.ControlMessage.codec().decode(reader, reader.uint32(), { + limits: opts.limits?.control + }) + break + } + default: { + reader.skipType(tag & 7) + break + } + } + } + + return obj + }) + } + + return _codec + } + + export const encode = (obj: Partial): Uint8Array => { + return encodeMessage(obj, RPC.codec()) + } + + export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): RPC => { + return decodeMessage(buf, RPC.codec(), opts) + } +} diff --git a/packages/gossipsub/src/metrics.ts b/packages/gossipsub/src/metrics.ts new file mode 100644 index 0000000000..84ec71e75c --- /dev/null +++ b/packages/gossipsub/src/metrics.ts @@ -0,0 +1,1014 @@ +import { TopicValidatorResult } from './index.ts' +import { + MessageStatus, + + RejectReason + +} from './types.js' +import type { RPC } from './message/rpc.js' +import type { PeerScoreThresholds } from './score/peer-score-thresholds.js' +import type { PeerIdStr, RejectReasonObj, TopicStr, ValidateError } from './types.js' + +/** Topic label as provided in `topicStrToLabel` */ +export type TopicLabel = string +export type TopicStrToLabel = Map + +export enum MessageSource { + forward = 'forward', + publish = 'publish' +} + +type NoLabels = Record +type LabelsGeneric = Record +type LabelKeys = Extract +interface CollectFn { (metric: Gauge): void } + +export interface Gauge { + inc: NoLabels extends Labels ? (value?: number) => void : (labels: Labels, value?: number) => void + set: NoLabels extends Labels ? (value: number) => void : (labels: Labels, value: number) => void + + addCollect(collectFn: CollectFn): void +} + +export interface Histogram { + startTimer(): () => void + + observe: NoLabels extends Labels ? (value: number) => void : (labels: Labels, value: number) => void + + reset(): void +} + +export interface AvgMinMax { + set: NoLabels extends Labels ? (values: number[]) => void : (labels: Labels, values: number[]) => void +} + +export type GaugeConfig = { + name: string + help: string +} & (NoLabels extends Labels ? { labelNames?: never } : { labelNames: [LabelKeys, ...Array>] }) + +export type HistogramConfig = GaugeConfig & { + buckets?: number[] +} + +export type AvgMinMaxConfig = GaugeConfig + +export interface MetricsRegister { + gauge(config: GaugeConfig): Gauge + histogram(config: HistogramConfig): Histogram + avgMinMax(config: AvgMinMaxConfig): AvgMinMax +} + +export enum InclusionReason { + /** Peer was a fanaout peer. */ + Fanout = 'fanout', + /** Included from random selection. */ + Random = 'random', + /** Peer subscribed. */ + Subscribed = 'subscribed', + /** On heartbeat, peer was included to fill the outbound quota. */ + Outbound = 'outbound', + /** On heartbeat, not enough peers in mesh */ + NotEnough = 'not_enough', + /** On heartbeat opportunistic grafting due to low mesh score */ + Opportunistic = 'opportunistic' +} + +/// Reasons why a peer was removed from the mesh. +export enum ChurnReason { + /// Peer disconnected. + Dc = 'disconnected', + /// Peer had a bad score. + BadScore = 'bad_score', + /// Peer sent a PRUNE. + Prune = 'prune', + /// Too many peers. + Excess = 'excess' +} + +/// Kinds of reasons a peer's score has been penalized +export enum ScorePenalty { + /// A peer grafted before waiting the back-off time. + GraftBackoff = 'graft_backoff', + /// A Peer did not respond to an IWANT request in time. + BrokenPromise = 'broken_promise', + /// A Peer did not send enough messages as expected. + MessageDeficit = 'message_deficit', + /// Too many peers under one IP address. + IPColocation = 'IP_colocation' +} + +export enum IHaveIgnoreReason { + LowScore = 'low_score', + MaxIhave = 'max_ihave', + MaxIasked = 'max_iasked' +} + +export enum ScoreThreshold { + graylist = 'graylist', + publish = 'publish', + gossip = 'gossip', + mesh = 'mesh' +} + +export type PeersByScoreThreshold = Record + +export interface ToSendGroupCount { + direct: number + floodsub: number + mesh: number + fanout: number +} + +export interface ToAddGroupCount { + fanout: number + random: number +} + +export type PromiseDeliveredStats = + | { expired: false, requestedCount: number, maxDeliverMs: number } + | { expired: true, maxDeliverMs: number } + +export interface TopicScoreWeights { p1w: T, p2w: T, p3w: T, p3bw: T, p4w: T } +export interface ScoreWeights { + byTopic: Map> + p5w: T + p6w: T + p7w: T + score: T +} + +export type Metrics = ReturnType + +/** + * A collection of metrics used throughout the Gossipsub behaviour. + * NOTE: except for special reasons, do not add more than 1 label for frequent metrics, + * there's a performance penalty as of June 2023. + */ +// eslint-disable-next-line @typescript-eslint/explicit-function-return-type +export function getMetrics ( + register: MetricsRegister, + topicStrToLabel: TopicStrToLabel, + opts: { gossipPromiseExpireSec: number, behaviourPenaltyThreshold: number, maxMeshMessageDeliveriesWindowSec: number } +) { + // Using function style instead of class to prevent having to re-declare all MetricsPrometheus types. + + return { + /* Metrics for static config */ + protocolsEnabled: register.gauge<{ protocol: string }>({ + name: 'gossipsub_protocol', + help: 'Status of enabled protocols', + labelNames: ['protocol'] + }), + + /* Metrics per known topic */ + /** + * Status of our subscription to this topic. This metric allows analyzing other topic metrics + * filtered by our current subscription status. + * = rust-libp2p `topic_subscription_status` + */ + topicSubscriptionStatus: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_topic_subscription_status', + help: 'Status of our subscription to this topic', + labelNames: ['topicStr'] + }), + /** + * Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour + * regardless of our subscription status. + */ + topicPeersCount: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_topic_peer_count', + help: 'Number of peers subscribed to each topic', + labelNames: ['topicStr'] + }), + + /* Metrics regarding mesh state */ + /** + * Number of peers in our mesh. This metric should be updated with the count of peers for a + * topic in the mesh regardless of inclusion and churn events. + * = rust-libp2p `mesh_peer_counts` + */ + meshPeerCounts: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_mesh_peer_count', + help: 'Number of peers in our mesh', + labelNames: ['topicStr'] + }), + /** + * Number of times we include peers in a topic mesh for different reasons. + * = rust-libp2p `mesh_peer_inclusion_events` + */ + meshPeerInclusionEventsFanout: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_fanout_total', + help: 'Number of times we include peers in a topic mesh for fanout reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsRandom: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_random_total', + help: 'Number of times we include peers in a topic mesh for random reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsSubscribed: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_subscribed_total', + help: 'Number of times we include peers in a topic mesh for subscribed reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsOutbound: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_outbound_total', + help: 'Number of times we include peers in a topic mesh for outbound reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsNotEnough: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_not_enough_total', + help: 'Number of times we include peers in a topic mesh for not_enough reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsOpportunistic: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_opportunistic_total', + help: 'Number of times we include peers in a topic mesh for opportunistic reasons', + labelNames: ['topic'] + }), + meshPeerInclusionEventsUnknown: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peer_inclusion_events_unknown_total', + help: 'Number of times we include peers in a topic mesh for unknown reasons', + labelNames: ['topic'] + }), + /** + * Number of times we remove peers in a topic mesh for different reasons. + * = rust-libp2p `mesh_peer_churn_events` + */ + meshPeerChurnEventsDisconnected: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_peer_churn_events_disconnected_total', + help: 'Number of times we remove peers in a topic mesh for disconnected reasons', + labelNames: ['topic'] + }), + meshPeerChurnEventsBadScore: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_peer_churn_events_bad_score_total', + help: 'Number of times we remove peers in a topic mesh for bad_score reasons', + labelNames: ['topic'] + }), + meshPeerChurnEventsPrune: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_peer_churn_events_prune_total', + help: 'Number of times we remove peers in a topic mesh for prune reasons', + labelNames: ['topic'] + }), + meshPeerChurnEventsExcess: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_peer_churn_events_excess_total', + help: 'Number of times we remove peers in a topic mesh for excess reasons', + labelNames: ['topic'] + }), + meshPeerChurnEventsUnknown: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_peer_churn_events_unknown_total', + help: 'Number of times we remove peers in a topic mesh for unknown reasons', + labelNames: ['topic'] + }), + + /* General Metrics */ + /** + * Gossipsub supports floodsub, gossipsub v1.0, v1.1, and v1.2. Peers are classified based + * on which protocol they support. This metric keeps track of the number of peers that are + * connected of each type. + */ + peersPerProtocol: register.gauge<{ protocol: string }>({ + name: 'gossipsub_peers_per_protocol_count', + help: 'Peers connected for each topic', + labelNames: ['protocol'] + }), + /** The time it takes to complete one iteration of the heartbeat. */ + heartbeatDuration: register.histogram({ + name: 'gossipsub_heartbeat_duration_seconds', + help: 'The time it takes to complete one iteration of the heartbeat', + // Should take <10ms, over 1s it's a huge issue that needs debugging, since a heartbeat will be cancelled + buckets: [0.01, 0.1, 1] + }), + /** Heartbeat run took longer than heartbeat interval so next is skipped */ + heartbeatSkipped: register.gauge({ + name: 'gossipsub_heartbeat_skipped', + help: 'Heartbeat run took longer than heartbeat interval so next is skipped' + }), + + /** + * Message validation results for each topic. + * Invalid == Reject? + * = rust-libp2p `invalid_messages`, `accepted_messages`, `ignored_messages`, `rejected_messages` + */ + acceptedMessagesTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_accepted_messages_total', + help: 'Total accepted messages for each topic', + labelNames: ['topic'] + }), + ignoredMessagesTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_ignored_messages_total', + help: 'Total ignored messages for each topic', + labelNames: ['topic'] + }), + rejectedMessagesTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_rejected_messages_total', + help: 'Total rejected messages for each topic', + labelNames: ['topic'] + }), + unknownValidationResultsTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_unknown_validation_results_total', + help: 'Total unknown validation results for each topic', + labelNames: ['topic'] + }), + /** + * When the user validates a message, it tries to re propagate it to its mesh peers. If the + * message expires from the memcache before it can be validated, we count this a cache miss + * and it is an indicator that the memcache size should be increased. + * = rust-libp2p `mcache_misses` + */ + asyncValidationMcacheHit: register.gauge<{ hit: 'hit' | 'miss' }>({ + name: 'gossipsub_async_validation_mcache_hit_total', + help: 'Async validation result reported by the user layer', + labelNames: ['hit'] + }), + + asyncValidationDelayFromFirstSeenSec: register.histogram({ + name: 'gossipsub_async_validation_delay_from_first_seen', + help: 'Async validation report delay from first seen in second', + buckets: [0.01, 0.03, 0.1, 0.3, 1, 3, 10] + }), + + asyncValidationUnknownFirstSeen: register.gauge({ + name: 'gossipsub_async_validation_unknown_first_seen_count_total', + help: 'Async validation report unknown first seen value for message' + }), + + // peer stream + peerReadStreamError: register.gauge({ + name: 'gossipsub_peer_read_stream_err_count_total', + help: 'Peer read stream error' + }), + + // RPC outgoing. Track byte length + data structure sizes + rpcRecvBytes: register.gauge({ name: 'gossipsub_rpc_recv_bytes_total', help: 'RPC recv' }), + rpcRecvCount: register.gauge({ name: 'gossipsub_rpc_recv_count_total', help: 'RPC recv' }), + rpcRecvSubscription: register.gauge({ name: 'gossipsub_rpc_recv_subscription_total', help: 'RPC recv' }), + rpcRecvMessage: register.gauge({ name: 'gossipsub_rpc_recv_message_total', help: 'RPC recv' }), + rpcRecvControl: register.gauge({ name: 'gossipsub_rpc_recv_control_total', help: 'RPC recv' }), + rpcRecvIHave: register.gauge({ name: 'gossipsub_rpc_recv_ihave_total', help: 'RPC recv' }), + rpcRecvIWant: register.gauge({ name: 'gossipsub_rpc_recv_iwant_total', help: 'RPC recv' }), + rpcRecvGraft: register.gauge({ name: 'gossipsub_rpc_recv_graft_total', help: 'RPC recv' }), + rpcRecvPrune: register.gauge({ name: 'gossipsub_rpc_recv_prune_total', help: 'RPC recv' }), + rpcDataError: register.gauge({ name: 'gossipsub_rpc_data_err_count_total', help: 'RPC data error' }), + rpcRecvError: register.gauge({ name: 'gossipsub_rpc_recv_err_count_total', help: 'RPC recv error' }), + + /** Total count of RPC dropped because acceptFrom() == false */ + rpcRecvNotAccepted: register.gauge({ + name: 'gossipsub_rpc_rcv_not_accepted_total', + help: 'Total count of RPC dropped because acceptFrom() == false' + }), + + // RPC incoming. Track byte length + data structure sizes + rpcSentBytes: register.gauge({ name: 'gossipsub_rpc_sent_bytes_total', help: 'RPC sent' }), + rpcSentCount: register.gauge({ name: 'gossipsub_rpc_sent_count_total', help: 'RPC sent' }), + rpcSentSubscription: register.gauge({ name: 'gossipsub_rpc_sent_subscription_total', help: 'RPC sent' }), + rpcSentMessage: register.gauge({ name: 'gossipsub_rpc_sent_message_total', help: 'RPC sent' }), + rpcSentControl: register.gauge({ name: 'gossipsub_rpc_sent_control_total', help: 'RPC sent' }), + rpcSentIHave: register.gauge({ name: 'gossipsub_rpc_sent_ihave_total', help: 'RPC sent' }), + rpcSentIWant: register.gauge({ name: 'gossipsub_rpc_sent_iwant_total', help: 'RPC sent' }), + rpcSentGraft: register.gauge({ name: 'gossipsub_rpc_sent_graft_total', help: 'RPC sent' }), + rpcSentPrune: register.gauge({ name: 'gossipsub_rpc_sent_prune_total', help: 'RPC sent' }), + rpcSentIDontWant: register.gauge({ name: 'gossipsub_rpc_sent_idontwant_total', help: 'RPC sent' }), + + // publish message. Track peers sent to and bytes + /** Total count of msg published by topic */ + msgPublishCount: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_count_total', + help: 'Total count of msg published by topic', + labelNames: ['topic'] + }), + /** Total count of peers that we publish a msg to */ + msgPublishPeersByTopic: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_peers_total', + help: 'Total count of peers that we publish a msg to', + labelNames: ['topic'] + }), + /** Total count of peers (by group) that we publish a msg to */ + directPeersPublishedTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_direct_peers_published_total', + help: 'Total direct peers that we publish a msg to', + labelNames: ['topic'] + }), + floodsubPeersPublishedTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_floodsub_peers_published_total', + help: 'Total floodsub peers that we publish a msg to', + labelNames: ['topic'] + }), + meshPeersPublishedTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_mesh_peers_published_total', + help: 'Total mesh peers that we publish a msg to', + labelNames: ['topic'] + }), + fanoutPeersPublishedTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_fanout_peers_published_total', + help: 'Total fanout peers that we publish a msg to', + labelNames: ['topic'] + }), + /** Total count of msg publish data.length bytes */ + msgPublishBytes: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_bytes_total', + help: 'Total count of msg publish data.length bytes', + labelNames: ['topic'] + }), + /** Total time in seconds to publish a message */ + msgPublishTime: register.histogram<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_seconds', + help: 'Total time in seconds to publish a message', + buckets: [0.001, 0.002, 0.005, 0.01, 0.1, 0.5, 1], + labelNames: ['topic'] + }), + + /** Total count of msg forwarded by topic */ + msgForwardCount: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_forward_count_total', + help: 'Total count of msg forwarded by topic', + labelNames: ['topic'] + }), + /** Total count of peers that we forward a msg to */ + msgForwardPeers: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_forward_peers_total', + help: 'Total count of peers that we forward a msg to', + labelNames: ['topic'] + }), + + /** Total count of recv msgs before any validation */ + msgReceivedPreValidation: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_received_prevalidation_total', + help: 'Total count of recv msgs before any validation', + labelNames: ['topic'] + }), + /** Total count of recv msgs error */ + msgReceivedError: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_received_error_total', + help: 'Total count of recv msgs error', + labelNames: ['topic'] + }), + /** Tracks distribution of recv msgs by duplicate, invalid, valid */ + prevalidationInvalidTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_pre_validation_invalid_total', + help: 'Total count of invalid messages received', + labelNames: ['topic'] + }), + prevalidationValidTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_pre_validation_valid_total', + help: 'Total count of valid messages received', + labelNames: ['topic'] + }), + prevalidationDuplicateTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_pre_validation_duplicate_total', + help: 'Total count of duplicate messages received', + labelNames: ['topic'] + }), + prevalidationUnknownTotal: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_pre_validation_unknown_status_total', + help: 'Total count of unknown_status messages received', + labelNames: ['topic'] + }), + /** Tracks specific reason of invalid */ + msgReceivedInvalid: register.gauge<{ error: RejectReason | ValidateError }>({ + name: 'gossipsub_msg_received_invalid_total', + help: 'Tracks specific reason of invalid', + labelNames: ['error'] + }), + msgReceivedInvalidByTopic: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_received_invalid_by_topic_total', + help: 'Tracks specific invalid message by topic', + labelNames: ['topic'] + }), + /** Track duplicate message delivery time */ + duplicateMsgDeliveryDelay: register.histogram<{ topic: TopicLabel }>({ + name: 'gossisub_duplicate_msg_delivery_delay_seconds', + help: 'Time since the 1st duplicated message validated', + labelNames: ['topic'], + buckets: [ + 0.25 * opts.maxMeshMessageDeliveriesWindowSec, + 0.5 * opts.maxMeshMessageDeliveriesWindowSec, + Number(opts.maxMeshMessageDeliveriesWindowSec), + 2 * opts.maxMeshMessageDeliveriesWindowSec, + 4 * opts.maxMeshMessageDeliveriesWindowSec + ] + }), + /** Total count of late msg delivery total by topic */ + duplicateMsgLateDelivery: register.gauge<{ topic: TopicLabel }>({ + name: 'gossisub_duplicate_msg_late_delivery_total', + help: 'Total count of late duplicate message delivery by topic, which triggers P3 penalty', + labelNames: ['topic'] + }), + + duplicateMsgIgnored: register.gauge<{ topic: TopicLabel }>({ + name: 'gossisub_ignored_published_duplicate_msgs_total', + help: 'Total count of published duplicate message ignored by topic', + labelNames: ['topic'] + }), + + /* Metrics related to scoring */ + /** Total times score() is called */ + scoreFnCalls: register.gauge({ + name: 'gossipsub_score_fn_calls_total', + help: 'Total times score() is called' + }), + /** Total times score() call actually computed computeScore(), no cache */ + scoreFnRuns: register.gauge({ + name: 'gossipsub_score_fn_runs_total', + help: 'Total times score() call actually computed computeScore(), no cache' + }), + scoreCachedDelta: register.histogram({ + name: 'gossipsub_score_cache_delta', + help: 'Delta of score between cached values that expired', + buckets: [10, 100, 1000] + }), + /** Current count of peers by score threshold */ + peersByScoreThreshold: register.gauge<{ threshold: ScoreThreshold }>({ + name: 'gossipsub_peers_by_score_threshold_count', + help: 'Current count of peers by score threshold', + labelNames: ['threshold'] + }), + score: register.avgMinMax({ + name: 'gossipsub_score', + help: 'Avg min max of gossip scores' + }), + /** + * Separate score weights + * Need to use 2-label metrics in this case to debug the score weights + */ + scoreWeights: register.avgMinMax<{ topic?: TopicLabel, p: string }>({ + name: 'gossipsub_score_weights', + help: 'Separate score weights', + labelNames: ['topic', 'p'] + }), + /** Histogram of the scores for each mesh topic. */ + // TODO: Not implemented + scorePerMesh: register.avgMinMax<{ topic: TopicLabel }>({ + name: 'gossipsub_score_per_mesh', + help: 'Histogram of the scores for each mesh topic', + labelNames: ['topic'] + }), + /** A counter of the kind of penalties being applied to peers. */ + // TODO: Not fully implemented + scoringPenalties: register.gauge<{ penalty: ScorePenalty }>({ + name: 'gossipsub_scoring_penalties_total', + help: 'A counter of the kind of penalties being applied to peers', + labelNames: ['penalty'] + }), + behaviourPenalty: register.histogram({ + name: 'gossipsub_peer_stat_behaviour_penalty', + help: 'Current peer stat behaviour_penalty at each scrape', + buckets: [ + 0.25 * opts.behaviourPenaltyThreshold, + 0.5 * opts.behaviourPenaltyThreshold, + Number(opts.behaviourPenaltyThreshold), + 2 * opts.behaviourPenaltyThreshold, + 4 * opts.behaviourPenaltyThreshold + ] + }), + + // TODO: + // - iasked per peer (on heartbeat) + // - when promise is resolved, track messages from promises + + /** Total received IHAVE messages that we ignore for some reason */ + ihaveRcvIgnored: register.gauge<{ reason: IHaveIgnoreReason }>({ + name: 'gossipsub_ihave_rcv_ignored_total', + help: 'Total received IHAVE messages that we ignore for some reason', + labelNames: ['reason'] + }), + /** Total received IHAVE messages by topic */ + ihaveRcvMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_ihave_rcv_msgids_total', + help: 'Total received IHAVE messages by topic', + labelNames: ['topic'] + }), + /** + * Total messages per topic we don't have. Not actual requests. + * The number of times we have decided that an IWANT control message is required for this + * topic. A very high metric might indicate an underperforming network. + * = rust-libp2p `topic_iwant_msgs` + */ + ihaveRcvNotSeenMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_ihave_rcv_not_seen_msgids_total', + help: 'Total messages per topic we do not have, not actual requests', + labelNames: ['topic'] + }), + + /** Total received IWANT messages by topic */ + iwantRcvMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_iwant_rcv_msgids_total', + help: 'Total received IWANT messages by topic', + labelNames: ['topic'] + }), + /** Total requested messageIDs that we don't have */ + iwantRcvDonthaveMsgids: register.gauge({ + name: 'gossipsub_iwant_rcv_dont_have_msgids_total', + help: 'Total requested messageIDs that we do not have' + }), + /** Total received IDONTWANT messages */ + idontwantRcvMsgids: register.gauge({ + name: 'gossipsub_idontwant_rcv_msgids_total', + help: 'Total received IDONTWANT messages' + }), + /** Total received IDONTWANT messageIDs that we don't have */ + idontwantRcvDonthaveMsgids: register.gauge({ + name: 'gossipsub_idontwant_rcv_dont_have_msgids_total', + help: 'Total received IDONTWANT messageIDs that we do not have in mcache' + }), + iwantPromiseStarted: register.gauge({ + name: 'gossipsub_iwant_promise_sent_total', + help: 'Total count of started IWANT promises' + }), + /** Total count of resolved IWANT promises */ + iwantPromiseResolved: register.gauge({ + name: 'gossipsub_iwant_promise_resolved_total', + help: 'Total count of resolved IWANT promises' + }), + /** Total count of resolved IWANT promises from duplicate messages */ + iwantPromiseResolvedFromDuplicate: register.gauge({ + name: 'gossipsub_iwant_promise_resolved_from_duplicate_total', + help: 'Total count of resolved IWANT promises from duplicate messages' + }), + /** Total count of peers we have asked IWANT promises that are resolved */ + iwantPromiseResolvedPeers: register.gauge({ + name: 'gossipsub_iwant_promise_resolved_peers', + help: 'Total count of peers we have asked IWANT promises that are resolved' + }), + iwantPromiseBroken: register.gauge({ + name: 'gossipsub_iwant_promise_broken', + help: 'Total count of broken IWANT promises' + }), + iwantMessagePruned: register.gauge({ + name: 'gossipsub_iwant_message_pruned', + help: 'Total count of pruned IWANT messages' + }), + /** Histogram of delivery time of resolved IWANT promises */ + iwantPromiseDeliveryTime: register.histogram({ + name: 'gossipsub_iwant_promise_delivery_seconds', + help: 'Histogram of delivery time of resolved IWANT promises', + buckets: [ + 0.5 * opts.gossipPromiseExpireSec, + Number(opts.gossipPromiseExpireSec), + 2 * opts.gossipPromiseExpireSec, + 4 * opts.gossipPromiseExpireSec + ] + }), + iwantPromiseUntracked: register.gauge({ + name: 'gossip_iwant_promise_untracked', + help: 'Total count of untracked IWANT promise' + }), + /** Backoff time */ + connectedPeersBackoffSec: register.histogram({ + name: 'gossipsub_connected_peers_backoff_seconds', + help: 'Backoff time in seconds', + // Using 1 seconds as minimum as that's close to the heartbeat duration, no need for more resolution. + // As per spec, backoff times are 10 seconds for UnsubscribeBackoff and 60 seconds for PruneBackoff. + // Higher values of 60 seconds should not occur, but we add 120 seconds just in case + // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#overview-of-new-parameters + buckets: [1, 2, 4, 10, 20, 60, 120] + }), + + /* Data structure sizes */ + /** Unbounded cache sizes */ + cacheSize: register.gauge<{ cache: string }>({ + name: 'gossipsub_cache_size', + help: 'Unbounded cache sizes', + labelNames: ['cache'] + }), + /** Current mcache msg count */ + mcacheSize: register.gauge({ + name: 'gossipsub_mcache_size', + help: 'Current mcache msg count' + }), + mcacheNotValidatedCount: register.gauge({ + name: 'gossipsub_mcache_not_validated_count', + help: 'Current mcache msg count not validated' + }), + + fastMsgIdCacheCollision: register.gauge({ + name: 'gossipsub_fastmsgid_cache_collision_total', + help: 'Total count of key collisions on fastmsgid cache put' + }), + + newConnectionCount: register.gauge<{ status: string }>({ + name: 'gossipsub_new_connection_total', + help: 'Total new connection by status', + labelNames: ['status'] + }), + + topicStrToLabel, + + toTopic (topicStr: TopicStr): TopicLabel { + return this.topicStrToLabel.get(topicStr) ?? topicStr + }, + + /** We joined a topic */ + onJoin (topicStr: TopicStr): void { + this.topicSubscriptionStatus.set({ topicStr }, 1) + this.meshPeerCounts.set({ topicStr }, 0) // Reset count + }, + + /** We left a topic */ + onLeave (topicStr: TopicStr): void { + this.topicSubscriptionStatus.set({ topicStr }, 0) + this.meshPeerCounts.set({ topicStr }, 0) // Reset count + }, + + /** Register the inclusion of peers in our mesh due to some reason. */ + onAddToMesh (topicStr: TopicStr, reason: InclusionReason, count: number): void { + const topic = this.toTopic(topicStr) + switch (reason) { + case InclusionReason.Fanout: + this.meshPeerInclusionEventsFanout.inc({ topic }, count) + break + case InclusionReason.Random: + this.meshPeerInclusionEventsRandom.inc({ topic }, count) + break + case InclusionReason.Subscribed: + this.meshPeerInclusionEventsSubscribed.inc({ topic }, count) + break + case InclusionReason.Outbound: + this.meshPeerInclusionEventsOutbound.inc({ topic }, count) + break + case InclusionReason.NotEnough: + this.meshPeerInclusionEventsNotEnough.inc({ topic }, count) + break + case InclusionReason.Opportunistic: + this.meshPeerInclusionEventsOpportunistic.inc({ topic }, count) + break + default: + this.meshPeerInclusionEventsUnknown.inc({ topic }, count) + break + } + }, + + /** Register the removal of peers in our mesh due to some reason */ + // - remove_peer_from_mesh() + // - heartbeat() Churn::BadScore + // - heartbeat() Churn::Excess + // - on_disconnect() Churn::Ds + onRemoveFromMesh (topicStr: TopicStr, reason: ChurnReason, count: number): void { + const topic = this.toTopic(topicStr) + switch (reason) { + case ChurnReason.Dc: + this.meshPeerChurnEventsDisconnected.inc({ topic }, count) + break + case ChurnReason.BadScore: + this.meshPeerChurnEventsBadScore.inc({ topic }, count) + break + case ChurnReason.Prune: + this.meshPeerChurnEventsPrune.inc({ topic }, count) + break + case ChurnReason.Excess: + this.meshPeerChurnEventsExcess.inc({ topic }, count) + break + default: + this.meshPeerChurnEventsUnknown.inc({ topic }, count) + break + } + }, + + /** + * Update validation result to metrics + * + * @param messageRecord - null means the message's mcache record was not known at the time of acceptance report + */ + onReportValidation ( + messageRecord: { message: { topic: TopicStr } } | null, + acceptance: TopicValidatorResult, + firstSeenTimestampMs: number | null + ): void { + this.asyncValidationMcacheHit.inc({ hit: messageRecord != null ? 'hit' : 'miss' }) + + if (messageRecord != null) { + const topic = this.toTopic(messageRecord.message.topic) + switch (acceptance) { + case TopicValidatorResult.Accept: + this.acceptedMessagesTotal.inc({ topic }) + break + case TopicValidatorResult.Ignore: + this.ignoredMessagesTotal.inc({ topic }) + break + case TopicValidatorResult.Reject: + this.rejectedMessagesTotal.inc({ topic }) + break + default: + this.unknownValidationResultsTotal.inc({ topic }) + break + } + } + + if (firstSeenTimestampMs != null) { + this.asyncValidationDelayFromFirstSeenSec.observe((Date.now() - firstSeenTimestampMs) / 1000) + } else { + this.asyncValidationUnknownFirstSeen.inc() + } + }, + + /** + * - in handle_graft() Penalty::GraftBackoff + * - in apply_iwant_penalties() Penalty::BrokenPromise + * - in metric_score() P3 Penalty::MessageDeficit + * - in metric_score() P6 Penalty::IPColocation + */ + onScorePenalty (penalty: ScorePenalty): void { + // Can this be labeled by topic too? + this.scoringPenalties.inc({ penalty }, 1) + }, + + onIhaveRcv (topicStr: TopicStr, ihave: number, idonthave: number): void { + const topic = this.toTopic(topicStr) + this.ihaveRcvMsgids.inc({ topic }, ihave) + this.ihaveRcvNotSeenMsgids.inc({ topic }, idonthave) + }, + + onIwantRcv (iwantByTopic: Map, iwantDonthave: number): void { + for (const [topicStr, iwant] of iwantByTopic) { + const topic = this.toTopic(topicStr) + this.iwantRcvMsgids.inc({ topic }, iwant) + } + + this.iwantRcvDonthaveMsgids.inc(iwantDonthave) + }, + + onIdontwantRcv (idontwant: number, idontwantDonthave: number): void { + this.idontwantRcvMsgids.inc(idontwant) + this.idontwantRcvDonthaveMsgids.inc(idontwantDonthave) + }, + + onForwardMsg (topicStr: TopicStr, tosendCount: number): void { + const topic = this.toTopic(topicStr) + this.msgForwardCount.inc({ topic }, 1) + this.msgForwardPeers.inc({ topic }, tosendCount) + }, + + onPublishMsg ( + topicStr: TopicStr, + tosendGroupCount: ToSendGroupCount, + tosendCount: number, + dataLen: number, + ms: number + ): void { + const topic = this.toTopic(topicStr) + this.msgPublishCount.inc({ topic }, 1) + this.msgPublishBytes.inc({ topic }, tosendCount * dataLen) + this.msgPublishPeersByTopic.inc({ topic }, tosendCount) + this.directPeersPublishedTotal.inc({ topic }, tosendGroupCount.direct) + this.floodsubPeersPublishedTotal.inc({ topic }, tosendGroupCount.floodsub) + this.meshPeersPublishedTotal.inc({ topic }, tosendGroupCount.mesh) + this.fanoutPeersPublishedTotal.inc({ topic }, tosendGroupCount.fanout) + this.msgPublishTime.observe({ topic }, ms / 1000) + }, + + onMsgRecvPreValidation (topicStr: TopicStr): void { + const topic = this.toTopic(topicStr) + this.msgReceivedPreValidation.inc({ topic }, 1) + }, + + onMsgRecvError (topicStr: TopicStr): void { + const topic = this.toTopic(topicStr) + this.msgReceivedError.inc({ topic }, 1) + }, + + onPrevalidationResult (topicStr: TopicStr, status: MessageStatus): void { + const topic = this.toTopic(topicStr) + switch (status) { + case MessageStatus.duplicate: + this.prevalidationDuplicateTotal.inc({ topic }) + break + case MessageStatus.invalid: + this.prevalidationInvalidTotal.inc({ topic }) + break + case MessageStatus.valid: + this.prevalidationValidTotal.inc({ topic }) + break + default: + this.prevalidationUnknownTotal.inc({ topic }) + break + } + }, + + onMsgRecvInvalid (topicStr: TopicStr, reason: RejectReasonObj): void { + const topic = this.toTopic(topicStr) + + const error = reason.reason === RejectReason.Error ? reason.error : reason.reason + this.msgReceivedInvalid.inc({ error }, 1) + this.msgReceivedInvalidByTopic.inc({ topic }, 1) + }, + + onDuplicateMsgDelivery (topicStr: TopicStr, deliveryDelayMs: number, isLateDelivery: boolean): void { + const topic = this.toTopic(topicStr) + this.duplicateMsgDeliveryDelay.observe({ topic }, deliveryDelayMs / 1000) + if (isLateDelivery) { + this.duplicateMsgLateDelivery.inc({ topic }, 1) + } + }, + + onPublishDuplicateMsg (topicStr: TopicStr): void { + const topic = this.toTopic(topicStr) + this.duplicateMsgIgnored.inc({ topic }, 1) + }, + + onPeerReadStreamError (): void { + this.peerReadStreamError.inc(1) + }, + + onRpcRecvError (): void { + this.rpcRecvError.inc(1) + }, + + onRpcDataError (): void { + this.rpcDataError.inc(1) + }, + + onRpcRecv (rpc: RPC, rpcBytes: number): void { + this.rpcRecvBytes.inc(rpcBytes) + this.rpcRecvCount.inc(1) + if (rpc.subscriptions != null) { this.rpcRecvSubscription.inc(rpc.subscriptions.length) } + if (rpc.messages != null) { this.rpcRecvMessage.inc(rpc.messages.length) } + if (rpc.control != null) { + this.rpcRecvControl.inc(1) + if (rpc.control.ihave != null) { this.rpcRecvIHave.inc(rpc.control.ihave.length) } + if (rpc.control.iwant != null) { this.rpcRecvIWant.inc(rpc.control.iwant.length) } + if (rpc.control.graft != null) { this.rpcRecvGraft.inc(rpc.control.graft.length) } + if (rpc.control.prune != null) { this.rpcRecvPrune.inc(rpc.control.prune.length) } + } + }, + + onRpcSent (rpc: RPC, rpcBytes: number): void { + this.rpcSentBytes.inc(rpcBytes) + this.rpcSentCount.inc(1) + if (rpc.subscriptions != null) { this.rpcSentSubscription.inc(rpc.subscriptions.length) } + if (rpc.messages != null) { this.rpcSentMessage.inc(rpc.messages.length) } + if (rpc.control != null) { + const ihave = rpc.control.ihave?.length ?? 0 + const iwant = rpc.control.iwant?.length ?? 0 + const graft = rpc.control.graft?.length ?? 0 + const prune = rpc.control.prune?.length ?? 0 + const idontwant = rpc.control.idontwant?.length ?? 0 + if (ihave > 0) { this.rpcSentIHave.inc(ihave) } + if (iwant > 0) { this.rpcSentIWant.inc(iwant) } + if (graft > 0) { this.rpcSentGraft.inc(graft) } + if (prune > 0) { this.rpcSentPrune.inc(prune) } + if (idontwant > 0) { this.rpcSentIDontWant.inc(idontwant) } + if (ihave > 0 || iwant > 0 || graft > 0 || prune > 0 || idontwant > 0) { this.rpcSentControl.inc(1) } + } + }, + + registerScores (scores: number[], scoreThresholds: PeerScoreThresholds): void { + let graylist = 0 + let publish = 0 + let gossip = 0 + let mesh = 0 + + for (const score of scores) { + if (score >= scoreThresholds.graylistThreshold) { graylist++ } + if (score >= scoreThresholds.publishThreshold) { publish++ } + if (score >= scoreThresholds.gossipThreshold) { gossip++ } + if (score >= 0) { mesh++ } + } + + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.graylist }, graylist) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.publish }, publish) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.gossip }, gossip) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.mesh }, mesh) + + // Register full score too + this.score.set(scores) + }, + + registerScoreWeights (sw: ScoreWeights): void { + for (const [topic, wsTopic] of sw.byTopic) { + this.scoreWeights.set({ topic, p: 'p1' }, wsTopic.p1w) + this.scoreWeights.set({ topic, p: 'p2' }, wsTopic.p2w) + this.scoreWeights.set({ topic, p: 'p3' }, wsTopic.p3w) + this.scoreWeights.set({ topic, p: 'p3b' }, wsTopic.p3bw) + this.scoreWeights.set({ topic, p: 'p4' }, wsTopic.p4w) + } + + this.scoreWeights.set({ p: 'p5' }, sw.p5w) + this.scoreWeights.set({ p: 'p6' }, sw.p6w) + this.scoreWeights.set({ p: 'p7' }, sw.p7w) + }, + + registerScorePerMesh (mesh: Map>, scoreByPeer: Map): void { + const peersPerTopicLabel = new Map>() + + mesh.forEach((peers, topicStr) => { + // Aggregate by known topicLabel or throw to 'unknown'. This prevent too high cardinality + const topicLabel = this.topicStrToLabel.get(topicStr) ?? 'unknown' + let peersInMesh = peersPerTopicLabel.get(topicLabel) + if (peersInMesh == null) { + peersInMesh = new Set() + peersPerTopicLabel.set(topicLabel, peersInMesh) + } + peers.forEach((p) => peersInMesh?.add(p)) + }) + + for (const [topic, peers] of peersPerTopicLabel) { + const meshScores: number[] = [] + peers.forEach((peer) => { + meshScores.push(scoreByPeer.get(peer) ?? 0) + }) + this.scorePerMesh.set({ topic }, meshScores) + } + } + } +} diff --git a/packages/gossipsub/src/score/compute-score.ts b/packages/gossipsub/src/score/compute-score.ts new file mode 100644 index 0000000000..b55dc923ec --- /dev/null +++ b/packages/gossipsub/src/score/compute-score.ts @@ -0,0 +1,98 @@ +import type { PeerScoreParams } from './peer-score-params.js' +import type { PeerStats } from './peer-stats.js' + +export function computeScore ( + peer: string, + pstats: PeerStats, + params: PeerScoreParams, + peerIPs: Map> +): number { + let score = 0 + + // topic stores + Object.entries(pstats.topics).forEach(([topic, tstats]) => { + // the topic parameters + const topicParams = params.topics[topic] + if (topicParams === undefined) { + // we are not scoring this topic + return + } + + let topicScore = 0 + + // P1: time in Mesh + if (tstats.inMesh) { + let p1 = tstats.meshTime / topicParams.timeInMeshQuantum + if (p1 > topicParams.timeInMeshCap) { + p1 = topicParams.timeInMeshCap + } + topicScore += p1 * topicParams.timeInMeshWeight + } + + // P2: first message deliveries + let p2 = tstats.firstMessageDeliveries + if (p2 > topicParams.firstMessageDeliveriesCap) { + p2 = topicParams.firstMessageDeliveriesCap + } + topicScore += p2 * topicParams.firstMessageDeliveriesWeight + + // P3: mesh message deliveries + if ( + tstats.meshMessageDeliveriesActive && + tstats.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold + ) { + const deficit = topicParams.meshMessageDeliveriesThreshold - tstats.meshMessageDeliveries + const p3 = deficit * deficit + topicScore += p3 * topicParams.meshMessageDeliveriesWeight + } + + // P3b: + // NOTE: the weight of P3b is negative (validated in validateTopicScoreParams) so this detracts + const p3b = tstats.meshFailurePenalty + topicScore += p3b * topicParams.meshFailurePenaltyWeight + + // P4: invalid messages + // NOTE: the weight of P4 is negative (validated in validateTopicScoreParams) so this detracts + const p4 = tstats.invalidMessageDeliveries * tstats.invalidMessageDeliveries + topicScore += p4 * topicParams.invalidMessageDeliveriesWeight + + // update score, mixing with topic weight + score += topicScore * topicParams.topicWeight + }) + + // apply the topic score cap, if any + if (params.topicScoreCap > 0 && score > params.topicScoreCap) { + score = params.topicScoreCap + } + + // P5: application-specific score + const p5 = params.appSpecificScore(peer) + score += p5 * params.appSpecificWeight + + // P6: IP colocation factor + pstats.knownIPs.forEach((ip) => { + if (params.IPColocationFactorWhitelist.has(ip)) { + return + } + + // P6 has a cliff (IPColocationFactorThreshold) + // It's only applied if at least that many peers are connected to us from that source IP addr. + // It is quadratic, and the weight is negative (validated in validatePeerScoreParams) + const peersInIP = peerIPs.get(ip) + const numPeersInIP = (peersInIP != null) ? peersInIP.size : 0 + if (numPeersInIP > params.IPColocationFactorThreshold) { + const surplus = numPeersInIP - params.IPColocationFactorThreshold + const p6 = surplus * surplus + score += p6 * params.IPColocationFactorWeight + } + }) + + // P7: behavioural pattern penalty + if (pstats.behaviourPenalty > params.behaviourPenaltyThreshold) { + const excess = pstats.behaviourPenalty - params.behaviourPenaltyThreshold + const p7 = excess * excess + score += p7 * params.behaviourPenaltyWeight + } + + return score +} diff --git a/packages/gossipsub/src/score/index.ts b/packages/gossipsub/src/score/index.ts new file mode 100644 index 0000000000..4aa268e445 --- /dev/null +++ b/packages/gossipsub/src/score/index.ts @@ -0,0 +1,3 @@ +export * from './peer-score-params.js' +export * from './peer-score-thresholds.js' +export * from './peer-score.js' diff --git a/packages/gossipsub/src/score/message-deliveries.ts b/packages/gossipsub/src/score/message-deliveries.ts new file mode 100644 index 0000000000..70cce2ac55 --- /dev/null +++ b/packages/gossipsub/src/score/message-deliveries.ts @@ -0,0 +1,95 @@ +import Denque from 'denque' +import { TimeCacheDuration } from '../constants.js' + +export enum DeliveryRecordStatus { + /** + * we don't know (yet) if the message is valid + */ + unknown, + /** + * we know the message is valid + */ + valid, + /** + * we know the message is invalid + */ + invalid, + /** + * we were instructed by the validator to ignore the message + */ + ignored +} + +export interface DeliveryRecord { + status: DeliveryRecordStatus + firstSeenTsMs: number + validated: number + peers: Set +} + +interface DeliveryQueueEntry { + msgId: string + expire: number +} + +/** + * Map of canonical message ID to DeliveryRecord + * + * Maintains an internal queue for efficient gc of old messages + */ +export class MessageDeliveries { + private readonly records: Map + public queue: Denque + + constructor () { + this.records = new Map() + this.queue = new Denque() + } + + getRecord (msgIdStr: string): DeliveryRecord | undefined { + return this.records.get(msgIdStr) + } + + ensureRecord (msgIdStr: string): DeliveryRecord { + let drec = this.records.get(msgIdStr) + if (drec != null) { + return drec + } + + // record doesn't exist yet + // create record + drec = { + status: DeliveryRecordStatus.unknown, + firstSeenTsMs: Date.now(), + validated: 0, + peers: new Set() + } + this.records.set(msgIdStr, drec) + + // and add msgId to the queue + const entry: DeliveryQueueEntry = { + msgId: msgIdStr, + expire: Date.now() + TimeCacheDuration + } + this.queue.push(entry) + + return drec + } + + gc (): void { + const now = Date.now() + // queue is sorted by expiry time + // remove expired messages, remove from queue until first un-expired message found + let head = this.queue.peekFront() + while ((head != null) && head.expire < now) { + this.records.delete(head.msgId) + this.queue.shift() + head = this.queue.peekFront() + } + } + + clear (): void { + this.records.clear() + this.queue.clear() + } +} diff --git a/packages/gossipsub/src/score/peer-score-params.ts b/packages/gossipsub/src/score/peer-score-params.ts new file mode 100644 index 0000000000..8048d28150 --- /dev/null +++ b/packages/gossipsub/src/score/peer-score-params.ts @@ -0,0 +1,316 @@ +import { InvalidPeerScoreParamsError } from '../errors.js' + +// This file defines PeerScoreParams and TopicScoreParams interfaces +// as well as constructors, default constructors, and validation functions +// for these interfaces + +export interface PeerScoreParams { + /** + * Score parameters per topic. + */ + topics: Record + + /** + * Aggregate topic score cap; this limits the total contribution of topics towards a positive + * score. It must be positive (or 0 for no cap). + */ + topicScoreCap: number + + /** + * P5: Application-specific peer scoring + */ + appSpecificScore(p: string): number + appSpecificWeight: number + + /** + * P6: IP-colocation factor. + * The parameter has an associated counter which counts the number of peers with the same IP. + * If the number of peers in the same IP exceeds IPColocationFactorThreshold, then the value + * is the square of the difference, ie (PeersInSameIP - IPColocationThreshold)^2. + * If the number of peers in the same IP is less than the threshold, then the value is 0. + * The weight of the parameter MUST be negative, unless you want to disable for testing. + * Note: In order to simulate many IPs in a managable manner when testing, you can set the weight to 0 + * thus disabling the IP colocation penalty. + */ + IPColocationFactorWeight: number + IPColocationFactorThreshold: number + IPColocationFactorWhitelist: Set + + /** + * P7: behavioural pattern penalties. + * This parameter has an associated counter which tracks misbehaviour as detected by the + * router. The router currently applies penalties for the following behaviors: + * - attempting to re-graft before the prune backoff time has elapsed. + * - not following up in IWANT requests for messages advertised with IHAVE. + * + * The value of the parameter is the square of the counter, which decays with BehaviourPenaltyDecay. + * The weight of the parameter MUST be negative (or zero to disable). + */ + behaviourPenaltyWeight: number + behaviourPenaltyThreshold: number + behaviourPenaltyDecay: number + + /** + * the decay interval for parameter counters. + */ + decayInterval: number + + /** + * counter value below which it is considered 0. + */ + decayToZero: number + + /** + * time to remember counters for a disconnected peer. + */ + retainScore: number +} + +export interface TopicScoreParams { + /** + * The weight of the topic. + */ + topicWeight: number + + /** + * P1: time in the mesh + * This is the time the peer has ben grafted in the mesh. + * The value of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap + * The weight of the parameter MUST be positive (or zero to disable). + */ + timeInMeshWeight: number + timeInMeshQuantum: number + timeInMeshCap: number + + /** + * P2: first message deliveries + * This is the number of message deliveries in the topic. + * The value of the parameter is a counter, decaying with FirstMessageDeliveriesDecay, and capped + * by FirstMessageDeliveriesCap. + * The weight of the parameter MUST be positive (or zero to disable). + */ + firstMessageDeliveriesWeight: number + firstMessageDeliveriesDecay: number + firstMessageDeliveriesCap: number + + /** + * P3: mesh message deliveries + * This is the number of message deliveries in the mesh, within the MeshMessageDeliveriesWindow of + * message validation; deliveries during validation also count and are retroactively applied + * when validation succeeds. + * This window accounts for the minimum time before a hostile mesh peer trying to game the score + * could replay back a valid message we just sent them. + * It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer + * before we have forwarded it to them. + * The parameter has an associated counter, decaying with MeshMessageDeliveriesDecay. + * If the counter exceeds the threshold, its value is 0. + * If the counter is below the MeshMessageDeliveriesThreshold, the value is the square of + * the deficit, ie (MessageDeliveriesThreshold - counter)^2 + * The penalty is only activated after MeshMessageDeliveriesActivation time in the mesh. + * The weight of the parameter MUST be negative (or zero to disable). + */ + meshMessageDeliveriesWeight: number + meshMessageDeliveriesDecay: number + meshMessageDeliveriesCap: number + meshMessageDeliveriesThreshold: number + meshMessageDeliveriesWindow: number + meshMessageDeliveriesActivation: number + + /** + * P3b: sticky mesh propagation failures + * This is a sticky penalty that applies when a peer gets pruned from the mesh with an active + * mesh message delivery penalty. + * The weight of the parameter MUST be negative (or zero to disable) + */ + meshFailurePenaltyWeight: number + meshFailurePenaltyDecay: number + + /** + * P4: invalid messages + * This is the number of invalid messages in the topic. + * The value of the parameter is the square of the counter, decaying with + * InvalidMessageDeliveriesDecay. + * The weight of the parameter MUST be negative (or zero to disable). + */ + invalidMessageDeliveriesWeight: number + invalidMessageDeliveriesDecay: number +} + +export const defaultPeerScoreParams: PeerScoreParams = { + topics: {}, + topicScoreCap: 10.0, + appSpecificScore: () => 0.0, + appSpecificWeight: 10.0, + IPColocationFactorWeight: -5.0, + IPColocationFactorThreshold: 10.0, + IPColocationFactorWhitelist: new Set(), + behaviourPenaltyWeight: -10.0, + behaviourPenaltyThreshold: 0.0, + behaviourPenaltyDecay: 0.2, + decayInterval: 1000.0, + decayToZero: 0.1, + retainScore: 3600 * 1000 +} + +export const defaultTopicScoreParams: TopicScoreParams = { + topicWeight: 0.5, + timeInMeshWeight: 1, + timeInMeshQuantum: 1, + timeInMeshCap: 3600, + + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 2000, + + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesCap: 100, + meshMessageDeliveriesThreshold: 20, + meshMessageDeliveriesWindow: 10, + meshMessageDeliveriesActivation: 5000, + + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 0.5, + + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.3 +} + +export function createPeerScoreParams (p: Partial = {}): PeerScoreParams { + return { + ...defaultPeerScoreParams, + ...p, + topics: (p.topics != null) + ? Object.entries(p.topics).reduce>((topics, [topic, topicScoreParams]) => { + topics[topic] = createTopicScoreParams(topicScoreParams) + return topics + }, {}) + : {} + } +} + +export function createTopicScoreParams (p: Partial = {}): TopicScoreParams { + return { + ...defaultTopicScoreParams, + ...p + } +} + +// peer score parameter validation +export function validatePeerScoreParams (p: PeerScoreParams): void { + for (const [topic, params] of Object.entries(p.topics)) { + try { + validateTopicScoreParams(params) + } catch (e) { + throw new InvalidPeerScoreParamsError(`invalid score parameters for topic ${topic}: ${(e as Error).message}`) + } + } + + // check that the topic score is 0 or something positive + if (p.topicScoreCap < 0) { + throw new InvalidPeerScoreParamsError('invalid topic score cap; must be positive (or 0 for no cap)') + } + + // check that we have an app specific score; the weight can be anything (but expected positive) + if (p.appSpecificScore === null || p.appSpecificScore === undefined) { + throw new InvalidPeerScoreParamsError('missing application specific score function') + } + + // check the IP colocation factor + if (p.IPColocationFactorWeight > 0) { + throw new InvalidPeerScoreParamsError('invalid IPColocationFactorWeight; must be negative (or 0 to disable)') + } + if (p.IPColocationFactorWeight !== 0 && p.IPColocationFactorThreshold < 1) { + throw new InvalidPeerScoreParamsError('invalid IPColocationFactorThreshold; must be at least 1') + } + + // check the behaviour penalty + if (p.behaviourPenaltyWeight > 0) { + throw new InvalidPeerScoreParamsError('invalid BehaviourPenaltyWeight; must be negative (or 0 to disable)') + } + if (p.behaviourPenaltyWeight !== 0 && (p.behaviourPenaltyDecay <= 0 || p.behaviourPenaltyDecay >= 1)) { + throw new InvalidPeerScoreParamsError('invalid BehaviourPenaltyDecay; must be between 0 and 1') + } + + // check the decay parameters + if (p.decayInterval < 1000) { + throw new InvalidPeerScoreParamsError('invalid DecayInterval; must be at least 1s') + } + if (p.decayToZero <= 0 || p.decayToZero >= 1) { + throw new InvalidPeerScoreParamsError('invalid DecayToZero; must be between 0 and 1') + } + + // no need to check the score retention; a value of 0 means that we don't retain scores +} + +// eslint-disable-next-line complexity +export function validateTopicScoreParams (p: TopicScoreParams): void { + // make sure we have a sane topic weight + if (p.topicWeight < 0) { + throw new InvalidPeerScoreParamsError('invalid topic weight; must be >= 0') + } + + // check P1 + if (p.timeInMeshQuantum === 0) { + throw new InvalidPeerScoreParamsError('invalid TimeInMeshQuantum; must be non zero') + } + if (p.timeInMeshWeight < 0) { + throw new InvalidPeerScoreParamsError('invalid TimeInMeshWeight; must be positive (or 0 to disable)') + } + if (p.timeInMeshWeight !== 0 && p.timeInMeshQuantum <= 0) { + throw new InvalidPeerScoreParamsError('invalid TimeInMeshQuantum; must be positive') + } + if (p.timeInMeshWeight !== 0 && p.timeInMeshCap <= 0) { + throw new InvalidPeerScoreParamsError('invalid TimeInMeshCap; must be positive') + } + + // check P2 + if (p.firstMessageDeliveriesWeight < 0) { + throw new InvalidPeerScoreParamsError('invallid FirstMessageDeliveriesWeight; must be positive (or 0 to disable)') + } + if ( + p.firstMessageDeliveriesWeight !== 0 && + (p.firstMessageDeliveriesDecay <= 0 || p.firstMessageDeliveriesDecay >= 1) + ) { + throw new InvalidPeerScoreParamsError('invalid FirstMessageDeliveriesDecay; must be between 0 and 1') + } + if (p.firstMessageDeliveriesWeight !== 0 && p.firstMessageDeliveriesCap <= 0) { + throw new InvalidPeerScoreParamsError('invalid FirstMessageDeliveriesCap; must be positive') + } + + // check P3 + if (p.meshMessageDeliveriesWeight > 0) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesWeight; must be negative (or 0 to disable)') + } + if (p.meshMessageDeliveriesWeight !== 0 && (p.meshMessageDeliveriesDecay <= 0 || p.meshMessageDeliveriesDecay >= 1)) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesDecay; must be between 0 and 1') + } + if (p.meshMessageDeliveriesWeight !== 0 && p.meshMessageDeliveriesCap <= 0) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesCap; must be positive') + } + if (p.meshMessageDeliveriesWeight !== 0 && p.meshMessageDeliveriesThreshold <= 0) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesThreshold; must be positive') + } + if (p.meshMessageDeliveriesWindow < 0) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesWindow; must be non-negative') + } + if (p.meshMessageDeliveriesWeight !== 0 && p.meshMessageDeliveriesActivation < 1000) { + throw new InvalidPeerScoreParamsError('invalid MeshMessageDeliveriesActivation; must be at least 1s') + } + + // check P3b + if (p.meshFailurePenaltyWeight > 0) { + throw new InvalidPeerScoreParamsError('invalid MeshFailurePenaltyWeight; must be negative (or 0 to disable)') + } + if (p.meshFailurePenaltyWeight !== 0 && (p.meshFailurePenaltyDecay <= 0 || p.meshFailurePenaltyDecay >= 1)) { + throw new InvalidPeerScoreParamsError('invalid MeshFailurePenaltyDecay; must be between 0 and 1') + } + + // check P4 + if (p.invalidMessageDeliveriesWeight > 0) { + throw new InvalidPeerScoreParamsError('invalid InvalidMessageDeliveriesWeight; must be negative (or 0 to disable)') + } + if (p.invalidMessageDeliveriesDecay <= 0 || p.invalidMessageDeliveriesDecay >= 1) { + throw new InvalidPeerScoreParamsError('invalid InvalidMessageDeliveriesDecay; must be between 0 and 1') + } +} diff --git a/packages/gossipsub/src/score/peer-score-thresholds.ts b/packages/gossipsub/src/score/peer-score-thresholds.ts new file mode 100644 index 0000000000..b50225b0c0 --- /dev/null +++ b/packages/gossipsub/src/score/peer-score-thresholds.ts @@ -0,0 +1,70 @@ +import { InvalidPeerScoreThresholdsError } from '../errors.js' + +// This file defines PeerScoreThresholds interface +// as well as a constructor, default constructor, and validation function +// for this interface + +export interface PeerScoreThresholds { + /** + * gossipThreshold is the score threshold below which gossip propagation is supressed; + * should be negative. + */ + gossipThreshold: number + + /** + * publishThreshold is the score threshold below which we shouldn't publish when using flood + * publishing (also applies to fanout and floodsub peers); should be negative and <= GossipThreshold. + */ + publishThreshold: number + + /** + * graylistThreshold is the score threshold below which message processing is supressed altogether, + * implementing an effective graylist according to peer score; should be negative and <= PublisThreshold. + */ + graylistThreshold: number + + /** + * acceptPXThreshold is the score threshold below which PX will be ignored; this should be positive + * and limited to scores attainable by bootstrappers and other trusted nodes. + */ + acceptPXThreshold: number + + /** + * opportunisticGraftThreshold is the median mesh score threshold before triggering opportunistic + * grafting; this should have a small positive value. + */ + opportunisticGraftThreshold: number +} + +export const defaultPeerScoreThresholds: PeerScoreThresholds = { + gossipThreshold: -10, + publishThreshold: -50, + graylistThreshold: -80, + acceptPXThreshold: 10, + opportunisticGraftThreshold: 20 +} + +export function createPeerScoreThresholds (p: Partial = {}): PeerScoreThresholds { + return { + ...defaultPeerScoreThresholds, + ...p + } +} + +export function validatePeerScoreThresholds (p: PeerScoreThresholds): void { + if (p.gossipThreshold > 0) { + throw new InvalidPeerScoreThresholdsError('invalid gossip threshold; it must be <= 0') + } + if (p.publishThreshold > 0 || p.publishThreshold > p.gossipThreshold) { + throw new InvalidPeerScoreThresholdsError('invalid publish threshold; it must be <= 0 and <= gossip threshold') + } + if (p.graylistThreshold > 0 || p.graylistThreshold > p.publishThreshold) { + throw new InvalidPeerScoreThresholdsError('invalid graylist threshold; it must be <= 0 and <= publish threshold') + } + if (p.acceptPXThreshold < 0) { + throw new InvalidPeerScoreThresholdsError('invalid accept PX threshold; it must be >= 0') + } + if (p.opportunisticGraftThreshold < 0) { + throw new InvalidPeerScoreThresholdsError('invalid opportunistic grafting threshold; it must be >= 0') + } +} diff --git a/packages/gossipsub/src/score/peer-score.ts b/packages/gossipsub/src/score/peer-score.ts new file mode 100644 index 0000000000..e755fcd6d0 --- /dev/null +++ b/packages/gossipsub/src/score/peer-score.ts @@ -0,0 +1,565 @@ +import { RejectReason } from '../types.js' +import { MapDef } from '../utils/set.js' +import { computeScore } from './compute-score.js' +import { MessageDeliveries, DeliveryRecordStatus } from './message-deliveries.js' +import { validatePeerScoreParams } from './peer-score-params.js' +import type { MsgIdStr, PeerIdStr, TopicStr, IPStr } from '../types.js' +import type { PeerScoreParams } from './peer-score-params.js' +import type { PeerStats, TopicStats } from './peer-stats.js' +import type { Metrics, ScorePenalty } from '../metrics.js' +import type { ComponentLogger, Logger } from '@libp2p/interface' + +interface PeerScoreOpts { + /** + * Miliseconds to cache computed score per peer + */ + scoreCacheValidityMs: number + + computeScore?: typeof computeScore +} + +interface ScoreCacheEntry { + /** The cached score */ + score: number + /** Unix timestamp in miliseconds, the time after which the cached score for a peer is no longer valid */ + cacheUntil: number +} + +export type PeerScoreStatsDump = Record + +export class PeerScore { + /** + * Per-peer stats for score calculation + */ + readonly peerStats = new Map() + /** + * IP colocation tracking; maps IP => set of peers. + */ + readonly peerIPs = new MapDef>(() => new Set()) + /** + * Cache score up to decayInterval if topic stats are unchanged. + */ + readonly scoreCache = new Map() + /** + * Recent message delivery timing/participants + */ + readonly deliveryRecords = new MessageDeliveries() + + _backgroundInterval?: ReturnType + + private readonly scoreCacheValidityMs: number + private readonly computeScore: typeof computeScore + private readonly log: Logger + + constructor (readonly params: PeerScoreParams, private readonly metrics: Metrics | null, componentLogger: ComponentLogger, opts: PeerScoreOpts) { + validatePeerScoreParams(params) + this.scoreCacheValidityMs = opts.scoreCacheValidityMs + this.computeScore = opts.computeScore ?? computeScore + this.log = componentLogger.forComponent('libp2p:gossipsub:score') + } + + get size (): number { + return this.peerStats.size + } + + /** + * Start PeerScore instance + */ + start (): void { + if (this._backgroundInterval != null) { + this.log('Peer score already running') + return + } + this._backgroundInterval = setInterval(() => { this.background() }, this.params.decayInterval) + this.log('started') + } + + /** + * Stop PeerScore instance + */ + stop (): void { + if (this._backgroundInterval == null) { + this.log('Peer score already stopped') + return + } + clearInterval(this._backgroundInterval) + delete this._backgroundInterval + this.peerIPs.clear() + this.peerStats.clear() + this.deliveryRecords.clear() + this.log('stopped') + } + + /** + * Periodic maintenance + */ + background (): void { + this.refreshScores() + this.deliveryRecords.gc() + } + + dumpPeerScoreStats (): PeerScoreStatsDump { + return Object.fromEntries(Array.from(this.peerStats.entries()).map(([peer, stats]) => [peer, stats])) + } + + messageFirstSeenTimestampMs (msgIdStr: MsgIdStr): number | null { + const drec = this.deliveryRecords.getRecord(msgIdStr) + return (drec != null) ? drec.firstSeenTsMs : null + } + + /** + * Decays scores, and purges score records for disconnected peers once their expiry has elapsed. + */ + public refreshScores (): void { + const now = Date.now() + const decayToZero = this.params.decayToZero + + this.peerStats.forEach((pstats, id) => { + if (!pstats.connected) { + // has the retention period expired? + if (now > pstats.expire) { + // yes, throw it away (but clean up the IP tracking first) + this.removeIPsForPeer(id, pstats.knownIPs) + this.peerStats.delete(id) + this.scoreCache.delete(id) + } + + // we don't decay retained scores, as the peer is not active. + // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, + // unless the retention period has elapsed. + // similarly, a well behaved peer does not lose its score by getting disconnected. + return + } + + Object.entries(pstats.topics).forEach(([topic, tstats]) => { + const tparams = this.params.topics[topic] + if (tparams === undefined) { + // we are not scoring this topic + // should be unreachable, we only add scored topics to pstats + return + } + + // decay counters + tstats.firstMessageDeliveries *= tparams.firstMessageDeliveriesDecay + if (tstats.firstMessageDeliveries < decayToZero) { + tstats.firstMessageDeliveries = 0 + } + + tstats.meshMessageDeliveries *= tparams.meshMessageDeliveriesDecay + if (tstats.meshMessageDeliveries < decayToZero) { + tstats.meshMessageDeliveries = 0 + } + + tstats.meshFailurePenalty *= tparams.meshFailurePenaltyDecay + if (tstats.meshFailurePenalty < decayToZero) { + tstats.meshFailurePenalty = 0 + } + + tstats.invalidMessageDeliveries *= tparams.invalidMessageDeliveriesDecay + if (tstats.invalidMessageDeliveries < decayToZero) { + tstats.invalidMessageDeliveries = 0 + } + + // update mesh time and activate mesh message delivery parameter if need be + if (tstats.inMesh) { + tstats.meshTime = now - tstats.graftTime + if (tstats.meshTime > tparams.meshMessageDeliveriesActivation) { + tstats.meshMessageDeliveriesActive = true + } + } + }) + + // decay P7 counter + pstats.behaviourPenalty *= this.params.behaviourPenaltyDecay + if (pstats.behaviourPenalty < decayToZero) { + pstats.behaviourPenalty = 0 + } + }) + } + + /** + * Return the score for a peer + */ + score (id: PeerIdStr): number { + this.metrics?.scoreFnCalls.inc() + + const pstats = this.peerStats.get(id) + if (pstats == null) { + return 0 + } + + const now = Date.now() + const cacheEntry = this.scoreCache.get(id) + + // Found cached score within validity period + if ((cacheEntry != null) && cacheEntry.cacheUntil > now) { + return cacheEntry.score + } + + this.metrics?.scoreFnRuns.inc() + + const score = this.computeScore(id, pstats, this.params, this.peerIPs) + const cacheUntil = now + this.scoreCacheValidityMs + + if (cacheEntry != null) { + this.metrics?.scoreCachedDelta.observe(Math.abs(score - cacheEntry.score)) + cacheEntry.score = score + cacheEntry.cacheUntil = cacheUntil + } else { + this.scoreCache.set(id, { score, cacheUntil }) + } + + return score + } + + /** + * Apply a behavioural penalty to a peer + */ + addPenalty (id: PeerIdStr, penalty: number, penaltyLabel: ScorePenalty): void { + const pstats = this.peerStats.get(id) + if (pstats != null) { + pstats.behaviourPenalty += penalty + this.metrics?.onScorePenalty(penaltyLabel) + } + } + + addPeer (id: PeerIdStr): void { + // create peer stats (not including topic stats for each topic to be scored) + // topic stats will be added as needed + const pstats: PeerStats = { + connected: true, + expire: 0, + topics: {}, + knownIPs: new Set(), + behaviourPenalty: 0 + } + this.peerStats.set(id, pstats) + } + + /** Adds a new IP to a peer, if the peer is not known the update is ignored */ + addIP (id: PeerIdStr, ip: string): void { + const pstats = this.peerStats.get(id) + if (pstats != null) { + pstats.knownIPs.add(ip) + } + + this.peerIPs.getOrDefault(ip).add(id) + } + + /** Remove peer association with IP */ + removeIP (id: PeerIdStr, ip: string): void { + const pstats = this.peerStats.get(id) + if (pstats != null) { + pstats.knownIPs.delete(ip) + } + + const peersWithIP = this.peerIPs.get(ip) + if (peersWithIP != null) { + peersWithIP.delete(id) + if (peersWithIP.size === 0) { + this.peerIPs.delete(ip) + } + } + } + + removePeer (id: PeerIdStr): void { + const pstats = this.peerStats.get(id) + if (pstats == null) { + return + } + + // decide whether to retain the score; this currently only retains non-positive scores + // to dissuade attacks on the score function. + if (this.score(id) > 0) { + this.removeIPsForPeer(id, pstats.knownIPs) + this.peerStats.delete(id) + return + } + + // furthermore, when we decide to retain the score, the firstMessageDelivery counters are + // reset to 0 and mesh delivery penalties applied. + Object.entries(pstats.topics).forEach(([topic, tstats]) => { + tstats.firstMessageDeliveries = 0 + + const threshold = this.params.topics[topic].meshMessageDeliveriesThreshold + if (tstats.inMesh && tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold) { + const deficit = threshold - tstats.meshMessageDeliveries + tstats.meshFailurePenalty += deficit * deficit + } + + tstats.inMesh = false + tstats.meshMessageDeliveriesActive = false + }) + + pstats.connected = false + pstats.expire = Date.now() + this.params.retainScore + } + + /** Handles scoring functionality as a peer GRAFTs to a topic. */ + graft (id: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(id) + if (pstats != null) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats != null) { + // if we are scoring the topic, update the mesh status. + tstats.inMesh = true + tstats.graftTime = Date.now() + tstats.meshTime = 0 + tstats.meshMessageDeliveriesActive = false + } + } + } + + /** Handles scoring functionality as a peer PRUNEs from a topic. */ + prune (id: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(id) + if (pstats != null) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats != null) { + // sticky mesh delivery rate failure penalty + const threshold = this.params.topics[topic].meshMessageDeliveriesThreshold + if (tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold) { + const deficit = threshold - tstats.meshMessageDeliveries + tstats.meshFailurePenalty += deficit * deficit + } + tstats.meshMessageDeliveriesActive = false + tstats.inMesh = false + + // TODO: Consider clearing score cache on important penalties + // this.scoreCache.delete(id) + } + } + } + + validateMessage (msgIdStr: MsgIdStr): void { + this.deliveryRecords.ensureRecord(msgIdStr) + } + + deliverMessage (from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr): void { + this.markFirstMessageDelivery(from, topic) + + const drec = this.deliveryRecords.ensureRecord(msgIdStr) + const now = Date.now() + + // defensive check that this is the first delivery trace -- delivery status should be unknown + if (drec.status !== DeliveryRecordStatus.unknown) { + this.log( + 'unexpected delivery: message from %s was first seen %s ago and has delivery status %s', + from, + now - drec.firstSeenTsMs, + DeliveryRecordStatus[drec.status] + ) + return + } + + // mark the message as valid and reward mesh peers that have already forwarded it to us + drec.status = DeliveryRecordStatus.valid + drec.validated = now + drec.peers.forEach((p) => { + // this check is to make sure a peer can't send us a message twice and get a double count + // if it is a first delivery. + if (p !== from.toString()) { + this.markDuplicateMessageDelivery(p, topic) + } + }) + } + + /** + * Similar to `rejectMessage` except does not require the message id or reason for an invalid message. + */ + rejectInvalidMessage (from: PeerIdStr, topic: TopicStr): void { + this.markInvalidMessageDelivery(from, topic) + } + + rejectMessage (from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr, reason: RejectReason): void { + // eslint-disable-next-line default-case + switch (reason) { + // these messages are not tracked, but the peer is penalized as they are invalid + case RejectReason.Error: + this.markInvalidMessageDelivery(from, topic) + return + + // we ignore those messages, so do nothing. + case RejectReason.Blacklisted: + return + + // the rest are handled after record creation + } + + const drec = this.deliveryRecords.ensureRecord(msgIdStr) + + // defensive check that this is the first rejection -- delivery status should be unknown + if (drec.status !== DeliveryRecordStatus.unknown) { + this.log( + 'unexpected rejection: message from %s was first seen %s ago and has delivery status %d', + from, + Date.now() - drec.firstSeenTsMs, + DeliveryRecordStatus[drec.status] + ) + return + } + + if (reason === RejectReason.Ignore) { + // we were explicitly instructed by the validator to ignore the message but not penalize the peer + drec.status = DeliveryRecordStatus.ignored + drec.peers.clear() + return + } + + // mark the message as invalid and penalize peers that have already forwarded it. + drec.status = DeliveryRecordStatus.invalid + + this.markInvalidMessageDelivery(from, topic) + drec.peers.forEach((p) => { + this.markInvalidMessageDelivery(p, topic) + }) + + // release the delivery time tracking map to free some memory early + drec.peers.clear() + } + + duplicateMessage (from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr): void { + const drec = this.deliveryRecords.ensureRecord(msgIdStr) + + if (drec.peers.has(from)) { + // we have already seen this duplicate + return + } + + // eslint-disable-next-line default-case + switch (drec.status) { + case DeliveryRecordStatus.unknown: + // the message is being validated; track the peer delivery and wait for + // the Deliver/Reject/Ignore notification. + drec.peers.add(from) + break + + case DeliveryRecordStatus.valid: + // mark the peer delivery time to only count a duplicate delivery once. + drec.peers.add(from) + this.markDuplicateMessageDelivery(from, topic, drec.validated) + break + + case DeliveryRecordStatus.invalid: + // we no longer track delivery time + this.markInvalidMessageDelivery(from, topic) + break + + case DeliveryRecordStatus.ignored: + // the message was ignored; do nothing (we don't know if it was valid) + break + } + } + + /** + * Increments the "invalid message deliveries" counter for all scored topics the message is published in. + */ + public markInvalidMessageDelivery (from: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(from) + if (pstats != null) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats != null) { + tstats.invalidMessageDeliveries += 1 + } + } + } + + /** + * Increments the "first message deliveries" counter for all scored topics the message is published in, + * as well as the "mesh message deliveries" counter, if the peer is in the mesh for the topic. + * Messages already known (with the seenCache) are counted with markDuplicateMessageDelivery() + */ + public markFirstMessageDelivery (from: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(from) + if (pstats != null) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats != null) { + let cap = this.params.topics[topic].firstMessageDeliveriesCap + tstats.firstMessageDeliveries = Math.min(cap, tstats.firstMessageDeliveries + 1) + + if (tstats.inMesh) { + cap = this.params.topics[topic].meshMessageDeliveriesCap + tstats.meshMessageDeliveries = Math.min(cap, tstats.meshMessageDeliveries + 1) + } + } + } + } + + /** + * Increments the "mesh message deliveries" counter for messages we've seen before, + * as long the message was received within the P3 window. + */ + public markDuplicateMessageDelivery (from: PeerIdStr, topic: TopicStr, validatedTime?: number): void { + const pstats = this.peerStats.get(from) + if (pstats != null) { + const now = validatedTime !== undefined ? Date.now() : 0 + + const tstats = this.getPtopicStats(pstats, topic) + + if (tstats != null && tstats.inMesh) { + const tparams = this.params.topics[topic] + + // check against the mesh delivery window -- if the validated time is passed as 0, then + // the message was received before we finished validation and thus falls within the mesh + // delivery window. + if (validatedTime !== undefined) { + const deliveryDelayMs = now - validatedTime + const isLateDelivery = deliveryDelayMs > tparams.meshMessageDeliveriesWindow + this.metrics?.onDuplicateMsgDelivery(topic, deliveryDelayMs, isLateDelivery) + + if (isLateDelivery) { + return + } + } + + const cap = tparams.meshMessageDeliveriesCap + tstats.meshMessageDeliveries = Math.min(cap, tstats.meshMessageDeliveries + 1) + } + } + } + + /** + * Removes an IP list from the tracking list for a peer. + */ + private removeIPsForPeer (id: PeerIdStr, ipsToRemove: Set): void { + for (const ipToRemove of ipsToRemove) { + const peerSet = this.peerIPs.get(ipToRemove) + if (peerSet != null) { + peerSet.delete(id) + if (peerSet.size === 0) { + this.peerIPs.delete(ipToRemove) + } + } + } + } + + /** + * Returns topic stats if they exist, otherwise if the supplied parameters score the + * topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + */ + private getPtopicStats (pstats: PeerStats, topic: TopicStr): TopicStats | null { + let topicStats: TopicStats | undefined = pstats.topics[topic] + + if (topicStats !== undefined) { + return topicStats + } + + if (this.params.topics[topic] !== undefined) { + topicStats = { + inMesh: false, + graftTime: 0, + meshTime: 0, + firstMessageDeliveries: 0, + meshMessageDeliveries: 0, + meshMessageDeliveriesActive: false, + meshFailurePenalty: 0, + invalidMessageDeliveries: 0 + } + pstats.topics[topic] = topicStats + + return topicStats + } + + return null + } +} diff --git a/packages/gossipsub/src/score/peer-stats.ts b/packages/gossipsub/src/score/peer-stats.ts new file mode 100644 index 0000000000..3401168561 --- /dev/null +++ b/packages/gossipsub/src/score/peer-stats.ts @@ -0,0 +1,33 @@ +import type { TopicStr } from '../types.js' + +export interface PeerStats { + /** true if the peer is currently connected */ + connected: boolean + /** expiration time of the score stats for disconnected peers */ + expire: number + /** per topic stats */ + topics: Record + /** IP tracking; store as set for easy processing */ + knownIPs: Set + /** behavioural pattern penalties (applied by the router) */ + behaviourPenalty: number +} + +export interface TopicStats { + /** true if the peer is in the mesh */ + inMesh: boolean + /** time when the peer was (last) GRAFTed; valid only when in mesh */ + graftTime: number + /** time in mesh (updated during refresh/decay to avoid calling gettimeofday on every score invocation) */ + meshTime: number + /** first message deliveries */ + firstMessageDeliveries: number + /** mesh message deliveries */ + meshMessageDeliveries: number + /** true if the peer has been enough time in the mesh to activate mess message deliveries */ + meshMessageDeliveriesActive: boolean + /** sticky mesh rate failure penalty counter */ + meshFailurePenalty: number + /** invalid message counter */ + invalidMessageDeliveries: number +} diff --git a/packages/gossipsub/src/score/scoreMetrics.ts b/packages/gossipsub/src/score/scoreMetrics.ts new file mode 100644 index 0000000000..7291ef86dd --- /dev/null +++ b/packages/gossipsub/src/score/scoreMetrics.ts @@ -0,0 +1,215 @@ +import type { PeerScoreParams } from './peer-score-params.js' +import type { PeerStats } from './peer-stats.js' + +type TopicLabel = string +type TopicStr = string +type TopicStrToLabel = Map + +export interface TopicScoreWeights { + p1w: T + p2w: T + p3w: T + p3bw: T + p4w: T +} +export interface ScoreWeights { + byTopic: Map> + p5w: T + p6w: T + p7w: T + score: T +} + +export function computeScoreWeights ( + peer: string, + pstats: PeerStats, + params: PeerScoreParams, + peerIPs: Map>, + topicStrToLabel: TopicStrToLabel +): ScoreWeights { + let score = 0 + + const byTopic = new Map>() + + // topic stores + Object.entries(pstats.topics).forEach(([topic, tstats]) => { + // the topic parameters + // Aggregate by known topicLabel or throw to 'unknown'. This prevent too high cardinality + const topicLabel = topicStrToLabel.get(topic) ?? 'unknown' + const topicParams = params.topics[topic] + if (topicParams === undefined) { + // we are not scoring this topic + return + } + + let topicScores = byTopic.get(topicLabel) + if (topicScores == null) { + topicScores = { + p1w: 0, + p2w: 0, + p3w: 0, + p3bw: 0, + p4w: 0 + } + byTopic.set(topicLabel, topicScores) + } + + let p1w = 0 + let p2w = 0 + let p3w = 0 + let p3bw = 0 + let p4w = 0 + + // P1: time in Mesh + if (tstats.inMesh) { + const p1 = Math.max(tstats.meshTime / topicParams.timeInMeshQuantum, topicParams.timeInMeshCap) + p1w += p1 * topicParams.timeInMeshWeight + } + + // P2: first message deliveries + let p2 = tstats.firstMessageDeliveries + if (p2 > topicParams.firstMessageDeliveriesCap) { + p2 = topicParams.firstMessageDeliveriesCap + } + p2w += p2 * topicParams.firstMessageDeliveriesWeight + + // P3: mesh message deliveries + if ( + tstats.meshMessageDeliveriesActive && + tstats.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold + ) { + const deficit = topicParams.meshMessageDeliveriesThreshold - tstats.meshMessageDeliveries + const p3 = deficit * deficit + p3w += p3 * topicParams.meshMessageDeliveriesWeight + } + + // P3b: + // NOTE: the weight of P3b is negative (validated in validateTopicScoreParams) so this detracts + const p3b = tstats.meshFailurePenalty + p3bw += p3b * topicParams.meshFailurePenaltyWeight + + // P4: invalid messages + // NOTE: the weight of P4 is negative (validated in validateTopicScoreParams) so this detracts + const p4 = tstats.invalidMessageDeliveries * tstats.invalidMessageDeliveries + p4w += p4 * topicParams.invalidMessageDeliveriesWeight + + // update score, mixing with topic weight + score += (p1w + p2w + p3w + p3bw + p4w) * topicParams.topicWeight + + topicScores.p1w += p1w + topicScores.p2w += p2w + topicScores.p3w += p3w + topicScores.p3bw += p3bw + topicScores.p4w += p4w + }) + + // apply the topic score cap, if any + if (params.topicScoreCap > 0 && score > params.topicScoreCap) { + score = params.topicScoreCap + + // Proportionally apply cap to all individual contributions + const capF = params.topicScoreCap / score + for (const ws of byTopic.values()) { + ws.p1w *= capF + ws.p2w *= capF + ws.p3w *= capF + ws.p3bw *= capF + ws.p4w *= capF + } + } + + let p5w = 0 + let p6w = 0 + let p7w = 0 + + // P5: application-specific score + const p5 = params.appSpecificScore(peer) + p5w += p5 * params.appSpecificWeight + + // P6: IP colocation factor + pstats.knownIPs.forEach((ip) => { + if (params.IPColocationFactorWhitelist.has(ip)) { + return + } + + // P6 has a cliff (IPColocationFactorThreshold) + // It's only applied if at least that many peers are connected to us from that source IP addr. + // It is quadratic, and the weight is negative (validated in validatePeerScoreParams) + const peersInIP = peerIPs.get(ip) + const numPeersInIP = (peersInIP != null) ? peersInIP.size : 0 + if (numPeersInIP > params.IPColocationFactorThreshold) { + const surplus = numPeersInIP - params.IPColocationFactorThreshold + const p6 = surplus * surplus + p6w += p6 * params.IPColocationFactorWeight + } + }) + + // P7: behavioural pattern penalty + const p7 = pstats.behaviourPenalty * pstats.behaviourPenalty + p7w += p7 * params.behaviourPenaltyWeight + + score += p5w + p6w + p7w + + return { + byTopic, + p5w, + p6w, + p7w, + score + } +} + +export function computeAllPeersScoreWeights ( + peerIdStrs: Iterable, + peerStats: Map, + params: PeerScoreParams, + peerIPs: Map>, + topicStrToLabel: TopicStrToLabel +): ScoreWeights { + const sw: ScoreWeights = { + byTopic: new Map(), + p5w: [], + p6w: [], + p7w: [], + score: [] + } + + for (const peerIdStr of peerIdStrs) { + const pstats = peerStats.get(peerIdStr) + if (pstats != null) { + const swPeer = computeScoreWeights(peerIdStr, pstats, params, peerIPs, topicStrToLabel) + + for (const [topic, swPeerTopic] of swPeer.byTopic) { + let swTopic = sw.byTopic.get(topic) + if (swTopic == null) { + swTopic = { + p1w: [], + p2w: [], + p3w: [], + p3bw: [], + p4w: [] + } + sw.byTopic.set(topic, swTopic) + } + + swTopic.p1w.push(swPeerTopic.p1w) + swTopic.p2w.push(swPeerTopic.p2w) + swTopic.p3w.push(swPeerTopic.p3w) + swTopic.p3bw.push(swPeerTopic.p3bw) + swTopic.p4w.push(swPeerTopic.p4w) + } + + sw.p5w.push(swPeer.p5w) + sw.p6w.push(swPeer.p6w) + sw.p7w.push(swPeer.p7w) + sw.score.push(swPeer.score) + } else { + sw.p5w.push(0) + sw.p6w.push(0) + sw.p7w.push(0) + sw.score.push(0) + } + } + + return sw +} diff --git a/packages/gossipsub/src/stream.ts b/packages/gossipsub/src/stream.ts new file mode 100644 index 0000000000..d434ba582c --- /dev/null +++ b/packages/gossipsub/src/stream.ts @@ -0,0 +1,79 @@ +import { pipe } from '@libp2p/utils' +import { encode, decode } from 'it-length-prefixed' +import type { AbortOptions, Stream } from '@libp2p/interface' +import type { Uint8ArrayList } from 'uint8arraylist' + +interface OutboundStreamOpts { + /** Max size in bytes for pushable buffer. If full, will throw on .push */ + maxBufferSize?: number +} + +interface InboundStreamOpts { + /** Max size in bytes for reading messages from the stream */ + maxDataLength?: number +} + +export class OutboundStream { + constructor (private readonly rawStream: Stream, errCallback: (e: Error) => void, opts: OutboundStreamOpts) { + if (opts.maxBufferSize != null) { + rawStream.maxWriteBufferLength = opts.maxBufferSize + } + + rawStream.addEventListener('close', (evt) => { + if (evt.error != null) { + errCallback(evt.error) + } + }) + } + + get protocol (): string { + return this.rawStream.protocol + } + + async push (data: Uint8Array): Promise { + return this.pushPrefixed(encode.single(data)) + } + + /** + * Same to push() but this is prefixed data so no need to encode length prefixed again + */ + pushPrefixed (data: Uint8ArrayList): void { + // TODO: backpressure + this.rawStream.send(data) + } + + async close (options?: AbortOptions): Promise { + await this.rawStream.close(options) + .catch(err => { + this.rawStream.abort(err) + }) + } +} + +export class InboundStream { + public readonly source: AsyncIterable + + private readonly rawStream: Stream + private readonly closeController: AbortController + + constructor (rawStream: Stream, opts: InboundStreamOpts = {}) { + this.rawStream = rawStream + this.closeController = new AbortController() + + this.closeController.signal.addEventListener('abort', () => { + rawStream.close() + .catch(err => { + rawStream.abort(err) + }) + }) + + this.source = pipe( + this.rawStream, + (source) => decode(source, opts) + ) + } + + async close (): Promise { + this.closeController.abort() + } +} diff --git a/packages/gossipsub/src/tracer.ts b/packages/gossipsub/src/tracer.ts new file mode 100644 index 0000000000..6637c54b9f --- /dev/null +++ b/packages/gossipsub/src/tracer.ts @@ -0,0 +1,177 @@ +import { RejectReason } from './types.js' +import type { Metrics } from './metrics.js' +import type { MsgIdStr, MsgIdToStrFn, PeerIdStr } from './types.js' + +/** + * IWantTracer is an internal tracer that tracks IWANT requests in order to penalize + * peers who don't follow up on IWANT requests after an IHAVE advertisement. + * The tracking of promises is probabilistic to avoid using too much memory. + * + * Note: Do not confuse these 'promises' with JS Promise objects. + * These 'promises' are merely expectations of a peer's behavior. + */ +export class IWantTracer { + /** + * Promises to deliver a message + * Map per message id, per peer, promise expiration time + */ + private readonly promises = new Map>() + /** + * First request time by msgId. Used for metrics to track expire times. + * Necessary to know if peers are actually breaking promises or simply sending them a bit later + */ + private readonly requestMsByMsg = new Map() + private readonly requestMsByMsgExpire: number + + constructor ( + private readonly gossipsubIWantFollowupMs: number, + private readonly msgIdToStrFn: MsgIdToStrFn, + private readonly metrics: Metrics | null + ) { + this.requestMsByMsgExpire = 10 * gossipsubIWantFollowupMs + } + + get size (): number { + return this.promises.size + } + + get requestMsByMsgSize (): number { + return this.requestMsByMsg.size + } + + /** + * Track a promise to deliver a message from a list of msgIds we are requesting + */ + addPromise (from: PeerIdStr, msgIds: Uint8Array[]): void { + // pick msgId randomly from the list + const ix = Math.floor(Math.random() * msgIds.length) + const msgId = msgIds[ix] + const msgIdStr = this.msgIdToStrFn(msgId) + + let expireByPeer = this.promises.get(msgIdStr) + if (expireByPeer == null) { + expireByPeer = new Map() + this.promises.set(msgIdStr, expireByPeer) + } + + const now = Date.now() + + // If a promise for this message id and peer already exists we don't update the expiry + if (!expireByPeer.has(from)) { + expireByPeer.set(from, now + this.gossipsubIWantFollowupMs) + + if (this.metrics != null) { + this.metrics.iwantPromiseStarted.inc(1) + if (!this.requestMsByMsg.has(msgIdStr)) { + this.requestMsByMsg.set(msgIdStr, now) + } + } + } + } + + /** + * Returns the number of broken promises for each peer who didn't follow up on an IWANT request. + * + * This should be called not too often relative to the expire times, since it iterates over the whole data. + */ + getBrokenPromises (): Map { + const now = Date.now() + const result = new Map() + + let brokenPromises = 0 + + this.promises.forEach((expireByPeer, msgId) => { + expireByPeer.forEach((expire, p) => { + // the promise has been broken + if (expire < now) { + // add 1 to result + result.set(p, (result.get(p) ?? 0) + 1) + // delete from tracked promises + expireByPeer.delete(p) + // for metrics + brokenPromises++ + } + }) + // clean up empty promises for a msgId + if (expireByPeer.size === 0) { + this.promises.delete(msgId) + } + }) + + this.metrics?.iwantPromiseBroken.inc(brokenPromises) + + return result + } + + /** + * Someone delivered a message, stop tracking promises for it + */ + deliverMessage (msgIdStr: MsgIdStr, isDuplicate = false): void { + this.trackMessage(msgIdStr) + + const expireByPeer = this.promises.get(msgIdStr) + + // Expired promise, check requestMsByMsg + if (expireByPeer != null) { + this.promises.delete(msgIdStr) + + if (this.metrics != null) { + this.metrics.iwantPromiseResolved.inc(1) + if (isDuplicate) { this.metrics.iwantPromiseResolvedFromDuplicate.inc(1) } + this.metrics.iwantPromiseResolvedPeers.inc(expireByPeer.size) + } + } + } + + /** + * A message got rejected, so we can stop tracking promises and let the score penalty apply from invalid message delivery, + * unless its an obviously invalid message. + */ + rejectMessage (msgIdStr: MsgIdStr, reason: RejectReason): void { + this.trackMessage(msgIdStr) + + // A message got rejected, so we can stop tracking promises and let the score penalty apply. + // With the expection of obvious invalid messages + switch (reason) { + case RejectReason.Error: + return + default: + break + } + + this.promises.delete(msgIdStr) + } + + clear (): void { + this.promises.clear() + } + + prune (): void { + const maxMs = Date.now() - this.requestMsByMsgExpire + let count = 0 + + for (const [k, v] of this.requestMsByMsg.entries()) { + if (v < maxMs) { + // messages that stay too long in the requestMsByMsg map, delete + this.requestMsByMsg.delete(k) + count++ + } else { + // recent messages, keep them + // sort by insertion order + break + } + } + + this.metrics?.iwantMessagePruned.inc(count) + } + + private trackMessage (msgIdStr: MsgIdStr): void { + if (this.metrics != null) { + const requestMs = this.requestMsByMsg.get(msgIdStr) + if (requestMs !== undefined) { + this.metrics.iwantPromiseDeliveryTime.observe((Date.now() - requestMs) / 1000) + this.requestMsByMsg.delete(msgIdStr) + } + } + } +} diff --git a/packages/gossipsub/src/types.ts b/packages/gossipsub/src/types.ts new file mode 100644 index 0000000000..025085b26d --- /dev/null +++ b/packages/gossipsub/src/types.ts @@ -0,0 +1,178 @@ +import { TopicValidatorResult } from './index.ts' +import type { Message } from './index.ts' +import type { RPC } from './message/rpc.js' +import type { PrivateKey, PeerId } from '@libp2p/interface' +import type { Multiaddr } from '@multiformats/multiaddr' + +export type MsgIdStr = string +export type PeerIdStr = string +export type TopicStr = string +export type IPStr = string + +export interface AddrInfo { + id: PeerId + addrs: Multiaddr[] +} + +/** + * Compute a local non-spec'ed msg-id for faster de-duplication of seen messages. + * Used exclusively for a local seen_cache + */ +export interface FastMsgIdFn { (msg: RPC.Message): string | number } + +/** + * By default, gossipsub only provide a browser friendly function to convert Uint8Array message id to string. + * Application could use this option to provide a more efficient function. + */ +export interface MsgIdToStrFn { (msgId: Uint8Array): string } + +/** + * Compute spec'ed msg-id. Used for IHAVE / IWANT messages + */ +export interface MsgIdFn { + (msg: Message): Promise | Uint8Array +} + +export interface DataTransform { + /** + * Takes the data published by peers on a topic and transforms the data. + * Should be the reverse of outboundTransform(). Example: + * - `inboundTransform()`: decompress snappy payload + * - `outboundTransform()`: compress snappy payload + */ + inboundTransform(topic: TopicStr, data: Uint8Array): Uint8Array + + /** + * Takes the data to be published (a topic and associated data) transforms the data. The + * transformed data will then be used to create a `RawGossipsubMessage` to be sent to peers. + */ + outboundTransform(topic: TopicStr, data: Uint8Array): Uint8Array +} + +export enum SignaturePolicy { + /** + * On the producing side: + * - Build messages with the signature, key (from may be enough for certain inlineable public key types), from and seqno fields. + * + * On the consuming side: + * - Enforce the fields to be present, reject otherwise. + * - Propagate only if the fields are valid and signature can be verified, reject otherwise. + */ + StrictSign = 'StrictSign', + /** + * On the producing side: + * - Build messages without the signature, key, from and seqno fields. + * - The corresponding protobuf key-value pairs are absent from the marshalled message, not just empty. + * + * On the consuming side: + * - Enforce the fields to be absent, reject otherwise. + * - Propagate only if the fields are absent, reject otherwise. + * - A message_id function will not be able to use the above fields, and should instead rely on the data field. A commonplace strategy is to calculate a hash. + */ + StrictNoSign = 'StrictNoSign' +} + +export interface PublishOpts { + /** + * Do not throw `PublishError.NoPeersSubscribedToTopic` error if there are no + * peers listening on the topic. + * + * N.B. if you sent this option to true, and you publish a message on a topic + * with no peers listening on that topic, no other network node will ever + * receive the message. + */ + allowPublishToZeroTopicPeers?: boolean + ignoreDuplicatePublishError?: boolean + /** serialize message once and send to all peers without control messages */ + batchPublish?: boolean +} + +export enum PublishConfigType { + Signing, + Anonymous +} + +export type PublishConfig = + | { + type: PublishConfigType.Signing + author: PeerId + key: Uint8Array + privateKey: PrivateKey + } + | { type: PublishConfigType.Anonymous } + +export type RejectReasonObj = + | { reason: RejectReason.Error, error: ValidateError } + | { reason: Exclude } + +export enum RejectReason { + /** + * The message failed the configured validation during decoding. + * SelfOrigin is considered a ValidationError + */ + Error = 'error', + /** + * Custom validator fn reported status IGNORE. + */ + Ignore = 'ignore', + /** + * Custom validator fn reported status REJECT. + */ + Reject = 'reject', + /** + * The peer that sent the message OR the source from field is blacklisted. + * Causes messages to be ignored, not penalized, neither do score record creation. + */ + Blacklisted = 'blacklisted' +} + +export enum ValidateError { + /// The message has an invalid signature, + InvalidSignature = 'invalid_signature', + /// The sequence number was the incorrect size + InvalidSeqno = 'invalid_seqno', + /// The PeerId was invalid + InvalidPeerId = 'invalid_peerid', + /// Signature existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SignaturePresent = 'signature_present', + /// Sequence number existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SeqnoPresent = 'seqno_present', + /// Message source existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + FromPresent = 'from_present', + /// The data transformation failed. + TransformFailed = 'transform_failed' +} + +export enum MessageStatus { + duplicate = 'duplicate', + invalid = 'invalid', + valid = 'valid' +} + +/** + * Store both Uint8Array and string message id so that we don't have to convert data between the two. + * See https://github.com/ChainSafe/js-libp2p-gossipsub/pull/274 + */ +export interface MessageId { + msgId: Uint8Array + msgIdStr: MsgIdStr +} + +/** + * Typesafe conversion of MessageAcceptance -> RejectReason. TS ensures all values covered + */ +export function rejectReasonFromAcceptance ( + acceptance: Exclude +): RejectReason.Ignore | RejectReason.Reject { + switch (acceptance) { + case TopicValidatorResult.Ignore: + return RejectReason.Ignore + case TopicValidatorResult.Reject: + return RejectReason.Reject + default: + throw new Error('Unreachable') + } +} diff --git a/packages/gossipsub/src/utils/buildRawMessage.ts b/packages/gossipsub/src/utils/buildRawMessage.ts new file mode 100644 index 0000000000..51e97d009e --- /dev/null +++ b/packages/gossipsub/src/utils/buildRawMessage.ts @@ -0,0 +1,174 @@ +import { randomBytes } from '@libp2p/crypto' +import { publicKeyFromProtobuf } from '@libp2p/crypto/keys' +import { peerIdFromMultihash } from '@libp2p/peer-id' +import * as Digest from 'multiformats/hashes/digest' +import { concat as uint8ArrayConcat } from 'uint8arrays/concat' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { StrictSign, StrictNoSign } from '../index.ts' +import { RPC } from '../message/rpc.js' +import { PublishConfigType, ValidateError } from '../types.js' +import type { Message } from '../index.ts' +import type { PublishConfig, TopicStr } from '../types.js' +import type { PublicKey, PeerId } from '@libp2p/interface' + +export const SignPrefix = uint8ArrayFromString('libp2p-pubsub:') + +export interface RawMessageAndMessage { + raw: RPC.Message + msg: Message +} + +export async function buildRawMessage ( + publishConfig: PublishConfig, + topic: TopicStr, + originalData: Uint8Array, + transformedData: Uint8Array +): Promise { + switch (publishConfig.type) { + case PublishConfigType.Signing: { + const rpcMsg: RPC.Message = { + from: publishConfig.author.toMultihash().bytes, + data: transformedData, + seqno: randomBytes(8), + topic, + signature: undefined, // Exclude signature field for signing + key: undefined // Exclude key field for signing + } + + // Get the message in bytes, and prepend with the pubsub prefix + // the signature is over the bytes "libp2p-pubsub:" + const bytes = uint8ArrayConcat([SignPrefix, RPC.Message.encode(rpcMsg)]) + + rpcMsg.signature = await publishConfig.privateKey.sign(bytes) + rpcMsg.key = publishConfig.key + + const msg: Message = { + type: 'signed', + from: publishConfig.author, + data: originalData, + sequenceNumber: BigInt(`0x${uint8ArrayToString(rpcMsg.seqno ?? new Uint8Array(0), 'base16')}`), + topic, + signature: rpcMsg.signature, + key: publicKeyFromProtobuf(rpcMsg.key) + } + return { + raw: rpcMsg, + msg + } + } + + case PublishConfigType.Anonymous: { + return { + raw: { + from: undefined, + data: transformedData, + seqno: undefined, + topic, + signature: undefined, + key: undefined + }, + msg: { + type: 'unsigned', + data: originalData, + topic + } + } + } + + default: + throw new Error('Unreachable') + } +} + +export type ValidationResult = { valid: true, message: Message } | { valid: false, error: ValidateError } + +export async function validateToRawMessage ( + signaturePolicy: typeof StrictNoSign | typeof StrictSign, + msg: RPC.Message +): Promise { + // If strict-sign, verify all + // If anonymous (no-sign), ensure no preven + + switch (signaturePolicy) { + case StrictNoSign: + if (msg.signature != null) { return { valid: false, error: ValidateError.SignaturePresent } } + if (msg.seqno != null) { return { valid: false, error: ValidateError.SeqnoPresent } } + if (msg.key != null) { return { valid: false, error: ValidateError.FromPresent } } + + return { valid: true, message: { type: 'unsigned', topic: msg.topic, data: msg.data ?? new Uint8Array(0) } } + + case StrictSign: { + // Verify seqno + if (msg.seqno == null) { return { valid: false, error: ValidateError.InvalidSeqno } } + if (msg.seqno.length !== 8) { + return { valid: false, error: ValidateError.InvalidSeqno } + } + + if (msg.signature == null) { return { valid: false, error: ValidateError.InvalidSignature } } + if (msg.from == null) { return { valid: false, error: ValidateError.InvalidPeerId } } + + let fromPeerId: PeerId + try { + // TODO: Fix PeerId types + fromPeerId = peerIdFromMultihash(Digest.decode(msg.from)) + } catch (e) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + + // - check from defined + // - transform source to PeerId + // - parse signature + // - get .key, else from source + // - check key == source if present + // - verify sig + + let publicKey: PublicKey + if (msg.key != null) { + publicKey = publicKeyFromProtobuf(msg.key) + // TODO: Should `fromPeerId.pubKey` be optional? + if (fromPeerId.publicKey !== undefined && !publicKey.equals(fromPeerId.publicKey)) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + } else { + if (fromPeerId.publicKey == null) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + publicKey = fromPeerId.publicKey + } + + const rpcMsgPreSign: RPC.Message = { + from: msg.from, + data: msg.data, + seqno: msg.seqno, + topic: msg.topic, + signature: undefined, // Exclude signature field for signing + key: undefined // Exclude key field for signing + } + + // Get the message in bytes, and prepend with the pubsub prefix + // the signature is over the bytes "libp2p-pubsub:" + const bytes = uint8ArrayConcat([SignPrefix, RPC.Message.encode(rpcMsgPreSign)]) + + if (!(await publicKey.verify(bytes, msg.signature))) { + return { valid: false, error: ValidateError.InvalidSignature } + } + + return { + valid: true, + message: { + type: 'signed', + from: fromPeerId, + data: msg.data ?? new Uint8Array(0), + sequenceNumber: BigInt(`0x${uint8ArrayToString(msg.seqno, 'base16')}`), + topic: msg.topic, + signature: msg.signature, + key: msg.key != null ? publicKeyFromProtobuf(msg.key) : publicKey + } + } + } + + default: + throw new Error('Unreachable') + } +} diff --git a/packages/gossipsub/src/utils/create-gossip-rpc.ts b/packages/gossipsub/src/utils/create-gossip-rpc.ts new file mode 100644 index 0000000000..efd59fcb1c --- /dev/null +++ b/packages/gossipsub/src/utils/create-gossip-rpc.ts @@ -0,0 +1,34 @@ +import type { RPC } from '../message/rpc.js' + +/** + * Create a gossipsub RPC object + */ +export function createGossipRpc (messages: RPC.Message[] = [], control?: Partial): RPC { + return { + subscriptions: [], + messages, + control: control !== undefined + ? { + graft: control.graft ?? [], + prune: control.prune ?? [], + ihave: control.ihave ?? [], + iwant: control.iwant ?? [], + idontwant: control.idontwant ?? [] + } + : undefined + } +} + +export function ensureControl (rpc: RPC): Required { + if (rpc.control === undefined) { + rpc.control = { + graft: [], + prune: [], + ihave: [], + iwant: [], + idontwant: [] + } + } + + return rpc as Required +} diff --git a/packages/gossipsub/src/utils/index.ts b/packages/gossipsub/src/utils/index.ts new file mode 100644 index 0000000000..40c1ad035b --- /dev/null +++ b/packages/gossipsub/src/utils/index.ts @@ -0,0 +1,3 @@ +export * from './shuffle.js' +export * from './messageIdToString.js' +export { getPublishConfigFromPeerId } from './publishConfig.js' diff --git a/packages/gossipsub/src/utils/messageIdToString.ts b/packages/gossipsub/src/utils/messageIdToString.ts new file mode 100644 index 0000000000..0fd6b9c4f7 --- /dev/null +++ b/packages/gossipsub/src/utils/messageIdToString.ts @@ -0,0 +1,8 @@ +import { toString } from 'uint8arrays/to-string' + +/** + * Browser friendly function to convert Uint8Array message id to base64 string. + */ +export function messageIdToString (msgId: Uint8Array): string { + return toString(msgId, 'base64') +} diff --git a/packages/gossipsub/src/utils/msgIdFn.ts b/packages/gossipsub/src/utils/msgIdFn.ts new file mode 100644 index 0000000000..59b00e749a --- /dev/null +++ b/packages/gossipsub/src/utils/msgIdFn.ts @@ -0,0 +1,24 @@ +import { msgId } from '@libp2p/pubsub/utils' +import { sha256 } from 'multiformats/hashes/sha2' +import type { Message } from '../index.js' + +/** + * Generate a message id, based on the `key` and `seqno` + */ +export function msgIdFnStrictSign (msg: Message): Uint8Array { + if (msg.type !== 'signed') { + throw new Error('expected signed message type') + } + // Should never happen + if (msg.sequenceNumber == null) { throw Error('missing seqno field') } + + // TODO: Should use .from here or key? + return msgId(msg.from.publicKey ?? msg.key, msg.sequenceNumber) +} + +/** + * Generate a message id, based on message `data` + */ +export async function msgIdFnStrictNoSign (msg: Message): Promise { + return sha256.encode(msg.data) +} diff --git a/packages/gossipsub/src/utils/multiaddr.ts b/packages/gossipsub/src/utils/multiaddr.ts new file mode 100644 index 0000000000..ffd5b29f1d --- /dev/null +++ b/packages/gossipsub/src/utils/multiaddr.ts @@ -0,0 +1,19 @@ +import { getNetConfig, isNetworkAddress } from '@libp2p/utils' +import type { Multiaddr } from '@multiformats/multiaddr' + +export function multiaddrToIPStr (multiaddr: Multiaddr): string | null { + if (isNetworkAddress(multiaddr)) { + const config = getNetConfig(multiaddr) + + switch (config.type) { + case 'ip4': + case 'ip6': + + return config.host + default: + break + } + } + + return null +} diff --git a/packages/gossipsub/src/utils/publishConfig.ts b/packages/gossipsub/src/utils/publishConfig.ts new file mode 100644 index 0000000000..4516b7026e --- /dev/null +++ b/packages/gossipsub/src/utils/publishConfig.ts @@ -0,0 +1,33 @@ +import { publicKeyToProtobuf } from '@libp2p/crypto/keys' +import { StrictSign, StrictNoSign } from '../index.ts' +import { PublishConfigType } from '../types.js' +import type { PublishConfig } from '../types.js' +import type { PeerId, PrivateKey } from '@libp2p/interface' + +/** + * Prepare a PublishConfig object from a PeerId. + */ +export function getPublishConfigFromPeerId ( + signaturePolicy: typeof StrictSign | typeof StrictNoSign, + peerId: PeerId, + privateKey: PrivateKey +): PublishConfig { + switch (signaturePolicy) { + case StrictSign: { + return { + type: PublishConfigType.Signing, + author: peerId, + key: publicKeyToProtobuf(privateKey.publicKey), + privateKey + } + } + + case StrictNoSign: + return { + type: PublishConfigType.Anonymous + } + + default: + throw new Error(`Unknown signature policy "${signaturePolicy}"`) + } +} diff --git a/packages/gossipsub/src/utils/set.ts b/packages/gossipsub/src/utils/set.ts new file mode 100644 index 0000000000..c5c2187845 --- /dev/null +++ b/packages/gossipsub/src/utils/set.ts @@ -0,0 +1,43 @@ +/** + * Exclude up to `ineed` items from a set if item meets condition `cond` + */ +export function removeItemsFromSet ( + superSet: Set, + ineed: number, + cond: (peer: T) => boolean = () => true +): Set { + const subset = new Set() + if (ineed <= 0) { return subset } + + for (const id of superSet) { + if (subset.size >= ineed) { break } + if (cond(id)) { + subset.add(id) + superSet.delete(id) + } + } + + return subset +} + +/** + * Exclude up to `ineed` items from a set + */ +export function removeFirstNItemsFromSet (superSet: Set, ineed: number): Set { + return removeItemsFromSet(superSet, ineed, () => true) +} + +export class MapDef extends Map { + constructor (private readonly getDefault: () => V) { + super() + } + + getOrDefault (key: K): V { + let value = super.get(key) + if (value === undefined) { + value = this.getDefault() + this.set(key, value) + } + return value + } +} diff --git a/packages/gossipsub/src/utils/shuffle.ts b/packages/gossipsub/src/utils/shuffle.ts new file mode 100644 index 0000000000..824a8b5dae --- /dev/null +++ b/packages/gossipsub/src/utils/shuffle.ts @@ -0,0 +1,21 @@ +/** + * Pseudo-randomly shuffles an array + * + * Mutates the input array + */ +export function shuffle (arr: T[]): T[] { + if (arr.length <= 1) { + return arr + } + const randInt = (): number => { + return Math.floor(Math.random() * Math.floor(arr.length)) + } + + for (let i = 0; i < arr.length; i++) { + const j = randInt() + const tmp = arr[i] + arr[i] = arr[j] + arr[j] = tmp + } + return arr +} diff --git a/packages/gossipsub/src/utils/time-cache.ts b/packages/gossipsub/src/utils/time-cache.ts new file mode 100644 index 0000000000..888588550b --- /dev/null +++ b/packages/gossipsub/src/utils/time-cache.ts @@ -0,0 +1,71 @@ +interface SimpleTimeCacheOpts { + validityMs: number +} + +interface CacheValue { + value: T + validUntilMs: number +} + +/** + * This is similar to https://github.com/daviddias/time-cache/blob/master/src/index.js + * for our own need, we don't use lodash throttle to improve performance. + * This gives 4x - 5x performance gain compared to npm TimeCache + */ +export class SimpleTimeCache { + private readonly entries = new Map>() + private readonly validityMs: number + + constructor (opts: SimpleTimeCacheOpts) { + this.validityMs = opts.validityMs + + // allow negative validityMs so that this does not cache anything, spec test compliance.spec.js + // sends duplicate messages and expect peer to receive all. Application likely uses positive validityMs + } + + get size (): number { + return this.entries.size + } + + /** Returns true if there was a key collision and the entry is dropped */ + put (key: string | number, value: T): boolean { + if (this.entries.has(key)) { + // Key collisions break insertion order in the entries cache, which break prune logic. + // prune relies on each iterated entry to have strictly ascending validUntilMs, else it + // won't prune expired entries and SimpleTimeCache will grow unexpectedly. + // As of Oct 2022 NodeJS v16, inserting the same key twice with different value does not + // change the key position in the iterator stream. A unit test asserts this behaviour. + return true + } + + this.entries.set(key, { value, validUntilMs: Date.now() + this.validityMs }) + return false + } + + prune (): void { + const now = Date.now() + + for (const [k, v] of this.entries.entries()) { + if (v.validUntilMs < now) { + this.entries.delete(k) + } else { + // Entries are inserted with strictly ascending validUntilMs. + // Stop early to save iterations + break + } + } + } + + has (key: string): boolean { + return this.entries.has(key) + } + + get (key: string | number): T | undefined { + const value = this.entries.get(key) + return (value != null) && value.validUntilMs >= Date.now() ? value.value : undefined + } + + clear (): void { + this.entries.clear() + } +} diff --git a/packages/gossipsub/test/2-nodes.spec.ts b/packages/gossipsub/test/2-nodes.spec.ts new file mode 100644 index 0000000000..87106f35bd --- /dev/null +++ b/packages/gossipsub/test/2-nodes.spec.ts @@ -0,0 +1,376 @@ +import { start, stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import defer from 'p-defer' +import { pEvent } from 'p-event' +import pWaitFor from 'p-wait-for' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { toString as uint8ArrayToString } from 'uint8arrays/to-string' +import { + connectAllPubSubNodes, + connectPubsubNodes, + createComponentsArray + +} from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' +import type { Message, SubscriptionChangeData } from '../src/index.js' + +const shouldNotHappen = (): never => expect.fail() + +async function nodesArePubSubPeers (node0: GossipSubAndComponents, node1: GossipSubAndComponents, timeout = 60000): Promise { + await pWaitFor( + () => { + const node0SeesNode1 = node0.pubsub + .getPeers() + .map((p) => p.toString()) + .includes(node1.components.peerId.toString()) + const node1SeesNode0 = node1.pubsub + .getPeers() + .map((p) => p.toString()) + .includes(node0.components.peerId.toString()) + return node0SeesNode1 && node1SeesNode0 + }, + { + timeout + } + ) +} + +describe('2 nodes', () => { + describe('Pubsub dial', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ number: 2 }) + + await start(...nodes.map(n => n.pubsub)) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('Dial from nodeA to nodeB happened with FloodsubID', async () => { + await connectPubsubNodes(nodes[0], nodes[1]) + await nodesArePubSubPeers(nodes[0], nodes[1]) + }) + }) + + describe('basics', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ number: 2 }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('Dial from nodeA to nodeB happened with GossipsubIDv11', async () => { + await connectPubsubNodes(nodes[0], nodes[1]) + await nodesArePubSubPeers(nodes[0], nodes[1]) + }) + }) + + describe('subscription functionality', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: 2, + connected: true + }) + await nodesArePubSubPeers(nodes[0], nodes[1]) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('Subscribe to a topic', async () => { + const topic = 'test_topic' + + nodes[0].pubsub.subscribe(topic) + nodes[1].pubsub.subscribe(topic) + + // await subscription change + const [evt0] = await Promise.all([ + pEvent<'subscription-change', CustomEvent>(nodes[0].pubsub, 'subscription-change'), + pEvent<'subscription-change', CustomEvent>(nodes[1].pubsub, 'subscription-change') + ]) + + const { peerId: changedPeerId, subscriptions: changedSubs } = evt0.detail + + expect(nodes[0].pubsub.getTopics()).to.include(topic) + expect(nodes[1].pubsub.getTopics()).to.include(topic) + expect(nodes[0].pubsub.getSubscribers(topic).map((p) => p.toString())).to.include( + nodes[1].components.peerId.toString() + ) + expect(nodes[1].pubsub.getSubscribers(topic).map((p) => p.toString())).to.include( + nodes[0].components.peerId.toString() + ) + + expect(changedPeerId.toString()).to.equal(nodes[1].components.peerId.toString()) + expect(changedSubs).to.have.lengthOf(1) + expect(changedSubs[0].topic).to.equal(topic) + expect(changedSubs[0].subscribe).to.equal(true) + + // await heartbeats + await Promise.all([ + pEvent(nodes[0].pubsub, 'gossipsub:heartbeat'), + pEvent(nodes[1].pubsub, 'gossipsub:heartbeat') + ]) + + expect((nodes[0].pubsub).mesh.get(topic)?.has(nodes[1].components.peerId.toString())).to.be.true() + expect((nodes[1].pubsub).mesh.get(topic)?.has(nodes[0].components.peerId.toString())).to.be.true() + }) + }) + + describe('publish functionality', () => { + const topic = 'Z' + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: 2, + connected: true + }) + + // Create subscriptions + nodes[0].pubsub.subscribe(topic) + nodes[1].pubsub.subscribe(topic) + + // await subscription change and heartbeat + await Promise.all([ + pEvent(nodes[0].pubsub, 'subscription-change'), + pEvent(nodes[1].pubsub, 'subscription-change'), + pEvent(nodes[0].pubsub, 'gossipsub:heartbeat'), + pEvent(nodes[1].pubsub, 'gossipsub:heartbeat') + ]) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('Publish to a topic - nodeA', async () => { + const promise = pEvent<'message', CustomEvent>(nodes[1].pubsub, 'message') + nodes[0].pubsub.addEventListener('message', shouldNotHappen) + const data = uint8ArrayFromString('hey') + + await nodes[0].pubsub.publish(topic, data) + + const evt = await promise + + if (evt.detail.type !== 'signed') { + throw new Error('unexpected msg type') + } + expect(evt.detail.data).to.equalBytes(data) + expect(evt.detail.from.toString()).to.equal(nodes[0].components.peerId.toString()) + + nodes[0].pubsub.removeEventListener('message', shouldNotHappen) + }) + + it('Publish to a topic - nodeB', async () => { + const promise = pEvent<'message', CustomEvent>(nodes[0].pubsub, 'message') + nodes[1].pubsub.addEventListener('message', shouldNotHappen) + const data = uint8ArrayFromString('banana') + + await nodes[1].pubsub.publish(topic, data) + + const evt = await promise + + if (evt.detail.type !== 'signed') { + throw new Error('unexpected msg type') + } + expect(evt.detail.data).to.equalBytes(data) + expect(evt.detail.from.toString()).to.equal(nodes[1].components.peerId.toString()) + + nodes[1].pubsub.removeEventListener('message', shouldNotHappen) + }) + + it('Publish 10 msg to a topic', async () => { + let counter = 0 + + nodes[1].pubsub.addEventListener('message', shouldNotHappen) + nodes[0].pubsub.addEventListener('message', receivedMsg) + + const done = defer() + + function receivedMsg (evt: CustomEvent): void { + const msg = evt.detail + + expect(uint8ArrayToString(msg.data)).to.startWith('banana') + + if (msg.type !== 'signed') { + throw new Error('unexpected msg type') + } + expect(msg.from.toString()).to.equal(nodes[1].components.peerId.toString()) + expect(msg.sequenceNumber).to.be.a('BigInt') + expect(msg.topic).to.equal(topic) + + if (++counter === 10) { + nodes[0].pubsub.removeEventListener('message', receivedMsg) + nodes[1].pubsub.removeEventListener('message', shouldNotHappen) + done.resolve() + } + } + + await Promise.all( + Array.from({ length: 10 }).map(async (_, i) => { + await nodes[1].pubsub.publish(topic, uint8ArrayFromString(`banana${i}`)) + }) + ) + + await done.promise + }) + }) + + describe('publish after unsubscribe', () => { + const topic = 'Z' + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ number: 2, init: { allowPublishToZeroTopicPeers: true } }) + await connectAllPubSubNodes(nodes) + + // Create subscriptions + nodes[0].pubsub.subscribe(topic) + nodes[1].pubsub.subscribe(topic) + + // await subscription change and heartbeat + await Promise.all([ + pEvent(nodes[0].pubsub, 'subscription-change'), + pEvent(nodes[1].pubsub, 'subscription-change') + ]) + await Promise.all([ + pEvent(nodes[0].pubsub, 'gossipsub:heartbeat'), + pEvent(nodes[1].pubsub, 'gossipsub:heartbeat') + ]) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('Unsubscribe from a topic', async () => { + nodes[0].pubsub.unsubscribe(topic) + expect(nodes[0].pubsub.getTopics()).to.be.empty() + + const evt = await pEvent<'subscription-change', CustomEvent>( + nodes[1].pubsub, + 'subscription-change' + ) + const { peerId: changedPeerId, subscriptions: changedSubs } = evt.detail + + await pEvent(nodes[1].pubsub, 'gossipsub:heartbeat') + + expect(nodes[1].pubsub.getPeers()).to.have.lengthOf(1) + expect(nodes[1].pubsub.getSubscribers(topic)).to.be.empty() + + expect(changedPeerId.toString()).to.equal(nodes[0].components.peerId.toString()) + expect(changedSubs).to.have.lengthOf(1) + expect(changedSubs[0].topic).to.equal(topic) + expect(changedSubs[0].subscribe).to.equal(false) + }) + + it('Publish to a topic after unsubscribe', async () => { + const promises = [pEvent(nodes[1].pubsub, 'subscription-change'), pEvent(nodes[1].pubsub, 'gossipsub:heartbeat')] + + nodes[0].pubsub.unsubscribe(topic) + + await Promise.all(promises) + + const promise = new Promise((resolve, reject) => { + nodes[0].pubsub.addEventListener('message', reject) + + setTimeout(() => { + nodes[0].pubsub.removeEventListener('message', reject) + resolve() + }, 100) + }) + + await nodes[1].pubsub.publish('Z', uint8ArrayFromString('banana')) + await nodes[0].pubsub.publish('Z', uint8ArrayFromString('banana')) + + try { + await promise + } catch (e) { + expect.fail('message should not be received') + } + }) + }) + + describe('nodes send state on connection', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: 2 + }) + + // Make subscriptions prior to new nodes + nodes[0].pubsub.subscribe('Za') + nodes[1].pubsub.subscribe('Zb') + + expect(nodes[0].pubsub.getPeers()).to.be.empty() + expect(nodes[0].pubsub.getTopics()).to.include('Za') + expect(nodes[1].pubsub.getPeers()).to.be.empty() + expect(nodes[1].pubsub.getTopics()).to.include('Zb') + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('existing subscriptions are sent upon peer connection', async function () { + this.timeout(5000) + + await Promise.all([ + connectPubsubNodes(nodes[0], nodes[1]), + pEvent(nodes[0].pubsub, 'subscription-change'), + pEvent(nodes[1].pubsub, 'subscription-change') + ]) + + expect(nodes[0].pubsub.getTopics()).to.include('Za') + expect(nodes[1].pubsub.getPeers()).to.have.lengthOf(1) + expect(nodes[1].pubsub.getSubscribers('Za').map((p) => p.toString())).to.include( + nodes[0].components.peerId.toString() + ) + + expect(nodes[1].pubsub.getTopics()).to.include('Zb') + expect(nodes[0].pubsub.getPeers()).to.have.lengthOf(1) + expect(nodes[0].pubsub.getSubscribers('Zb').map((p) => p.toString())).to.include( + nodes[1].components.peerId.toString() + ) + }) + }) + + describe('nodes handle stopping', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: 2, + connected: true + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it("nodes don't have peers after stopped", async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + expect(nodes[0].pubsub.getPeers()).to.be.empty() + expect(nodes[1].pubsub.getPeers()).to.be.empty() + }) + }) +}) diff --git a/packages/gossipsub/test/accept-from.spec.ts b/packages/gossipsub/test/accept-from.spec.ts new file mode 100644 index 0000000000..7326ef2e02 --- /dev/null +++ b/packages/gossipsub/test/accept-from.spec.ts @@ -0,0 +1,109 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { defaultLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' +import { GossipSub as GossipSubClass } from '../src/gossipsub.js' +import { fastMsgIdFn } from './utils/msgId.js' +import type { PeerStore } from '@libp2p/interface' +import type { ConnectionManager, Registrar } from '@libp2p/interface-internal' + +const peerA = '16Uiu2HAmMkH6ZLen2tbhiuNCTZLLvrZaDgufNdT5MPjtC9Hr9YNA' + +describe('Gossipsub acceptFrom', () => { + let gossipsub: GossipSubClass + let sandbox: sinon.SinonSandbox + let scoreSpy: sinon.SinonSpy<[id: string], number> + + beforeEach(async () => { + sandbox = sinon.createSandbox() + // not able to use fake timers or tests in browser are suspended + // sandbox.useFakeTimers(Date.now()) + + const privateKey = await generateKeyPair('Ed25519') + const peerId = peerIdFromPrivateKey(privateKey) + gossipsub = new GossipSubClass( + { + privateKey, + peerId, + registrar: stubInterface(), + peerStore: stubInterface(), + connectionManager: stubInterface(), + logger: defaultLogger() + }, + { emitSelf: false, fastMsgIdFn } + ) + + // stubbing PeerScore causes some pending issue in firefox browser environment + // we can only spy it + // using scoreSpy.withArgs("peerA").calledOnce causes the pending issue in firefox + // while spy.getCall() is fine + scoreSpy = sandbox.spy(gossipsub.score, 'score') + }) + + afterEach(() => { + sandbox.restore() + }) + + it('should only white list peer with positive score', () => { + // by default the score is 0 + gossipsub.acceptFrom(peerA) + // 1st time, we have to compute score + expect(scoreSpy.getCall(0).args[0]).to.be.equal(peerA) + expect(scoreSpy.getCall(0).returnValue).to.be.equal(0) + expect(scoreSpy.getCall(1)).to.not.be.ok() + // 2nd time, use a cached score since it's white listed + gossipsub.acceptFrom(peerA) + expect(scoreSpy.getCall(1)).to.not.be.ok() + }) + + it('should recompute score after 1s', async () => { + // by default the score is 0 + gossipsub.acceptFrom(peerA) + // 1st time, we have to compute score + expect(scoreSpy.getCall(0).args[0]).to.be.equal(peerA) + expect(scoreSpy.getCall(1)).to.not.be.ok() + gossipsub.acceptFrom(peerA) + // score is cached + expect(scoreSpy.getCall(1)).to.not.be.ok() + + // after 1s + await new Promise((resolve) => setTimeout(resolve, 1001)) + + gossipsub.acceptFrom(peerA) + expect(scoreSpy.getCall(1).args[0]).to.be.equal(peerA) + expect(scoreSpy.getCall(2)).to.not.be.ok() + }) + + it('should recompute score after max messages accepted', () => { + // by default the score is 0 + gossipsub.acceptFrom(peerA) + // 1st time, we have to compute score + expect(scoreSpy.getCall(0).args[0]).to.be.equal(peerA) + expect(scoreSpy.getCall(1)).to.not.be.ok() + + for (let i = 0; i < 128; i++) { + gossipsub.acceptFrom(peerA) + } + expect(scoreSpy.getCall(1)).to.not.be.ok() + + // max messages reached + gossipsub.acceptFrom(peerA) + expect(scoreSpy.getCall(1).args[0]).to.be.equal(peerA) + expect(scoreSpy.getCall(2)).to.not.be.ok() + }) + + // TODO: run this in a unit test setup + // this causes the test to not finish in firefox environment + // it.skip('should NOT white list peer with negative score', () => { + // // peerB is not white listed since score is negative + // scoreStub.score.withArgs('peerB').returns(-1) + // gossipsub["acceptFrom"]('peerB') + // // 1st time, we have to compute score + // expect(scoreStub.score.withArgs('peerB').calledOnce).to.be.true() + // // 2nd time, still have to compute score since it's NOT white listed + // gossipsub["acceptFrom"]('peerB') + // expect(scoreStub.score.withArgs('peerB').calledTwice).to.be.true() + // }) +}) diff --git a/packages/gossipsub/test/allowedTopics.spec.ts b/packages/gossipsub/test/allowedTopics.spec.ts new file mode 100644 index 0000000000..2c45d94777 --- /dev/null +++ b/packages/gossipsub/test/allowedTopics.spec.ts @@ -0,0 +1,52 @@ +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import { pEvent } from 'p-event' +import { connectAllPubSubNodes, createComponentsArray } from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' + +describe('gossip / allowedTopics', () => { + let nodes: GossipSubAndComponents[] + + const allowedTopic = 'topic_allowed' + const notAllowedTopic = 'topic_not_allowed' + const allowedTopics = [allowedTopic] + const allTopics = [allowedTopic, notAllowedTopic] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: 2, + connected: false, + init: { + allowedTopics + } + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('should send gossip to non-mesh peers in topic', async function () { + this.timeout(10 * 1000) + const [nodeA, nodeB] = nodes + + // add subscriptions to each node + for (const topic of allTopics) { + nodeA.pubsub.subscribe(topic) + } + + // every node connected to every other + await Promise.all([ + connectAllPubSubNodes(nodes), + // nodeA should send nodeB all its subscriptions on connection + pEvent(nodeB.pubsub, 'subscription-change') + ]) + + const nodeASubscriptions = Array.from((nodeA.pubsub)['subscriptions'].keys()) + expect(nodeASubscriptions).deep.equals(allTopics, 'nodeA.subscriptions should be subcribed to all') + + const nodeBTopics = Array.from((nodeB.pubsub)['topics'].keys()) + expect(nodeBTopics).deep.equals(allowedTopics, 'nodeB.topics should only contain allowedTopics') + }) +}) diff --git a/packages/gossipsub/test/benchmark/asyncIterable.test.ts b/packages/gossipsub/test/benchmark/asyncIterable.test.ts new file mode 100644 index 0000000000..724c202559 --- /dev/null +++ b/packages/gossipsub/test/benchmark/asyncIterable.test.ts @@ -0,0 +1,111 @@ +import { itBench } from '@dapplion/benchmark' +import { abortableSource } from 'abortable-iterator' +import all from 'it-all' +import { pipe } from 'it-pipe' + +describe('abortableSource cost', function () { + const n = 10000 + const bytes = new Uint8Array(200) + const controller = new AbortController() + + async function * bytesSource (): AsyncGenerator { + let i = 0 + while (i++ < n) { + yield bytes + } + } + + for (let k = 0; k < 5; k++) { + itBench({ + id: `async iterate abortable x${k} bytesSource ${n}`, + beforeEach: () => { + let source = bytesSource() + for (let i = 0; i < k; i++) { + source = abortableSource(source, controller.signal) + } + return source + }, + fn: async (source) => { + for await (const chunk of source) { + // eslint-disable-next-line @typescript-eslint/no-unused-expressions + chunk + } + } + }) + } +}) + +describe('pipe extra iterables cost', function () { + const n = 10000 + + async function * numberSource (): AsyncGenerator { + let i = 0 + while (i < n) { + yield i++ + } + } + + async function * numberTransform (source: AsyncIterable): AsyncIterable { + for await (const num of source) { + yield num + 1 + } + } + + itBench({ + id: `async iterate pipe x0 transforms ${n}`, + fn: async () => { + await pipe(numberSource, all) + } + }) + + itBench({ + id: `async iterate pipe x1 transforms ${n}`, + fn: async () => { + await pipe(numberSource, numberTransform, all) + } + }) + + itBench({ + id: `async iterate pipe x2 transforms ${n}`, + fn: async () => { + await pipe( + numberSource, + numberTransform, + numberTransform, + all + ) + } + }) + + itBench({ + id: `async iterate pipe x4 transforms ${n}`, + fn: async () => { + await pipe( + numberSource, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + all + ) + } + }) + + itBench({ + id: `async iterate pipe x8 transforms ${n}`, + fn: async () => { + await pipe( + numberSource, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + numberTransform, + all + ) + } + }) +}) diff --git a/packages/gossipsub/test/benchmark/index.test.ts b/packages/gossipsub/test/benchmark/index.test.ts new file mode 100644 index 0000000000..e2a1daea9a --- /dev/null +++ b/packages/gossipsub/test/benchmark/index.test.ts @@ -0,0 +1,141 @@ +import { itBench } from '@dapplion/benchmark' +import { expect } from 'aegir/chai' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { + connectPubsubNodes, + createComponentsArray, + denseConnect + +} from '../utils/create-pubsub.js' +import { awaitEvents, checkReceivedSubscriptions, checkReceivedSubscription } from '../utils/events.js' +import type { GossipSubAndComponents } from '../utils/create-pubsub.js' + +describe('heartbeat', function () { + const topic = 'foobar' + const numTopic = 70 + const numPeers = 50 + const numPeersPerTopic = 30 + let numLoop = 0 + + const getTopic = (i: number): string => { + return topic + String(i) + } + + const getTopicPeerIndices = (topic: number): number[] => { + // peer 0 join all topics + const peers = [0] + // topic 0 starts from index 1 + // topic 1 starts from index 2... + for (let i = 0; i < numPeersPerTopic - 1; i++) { + const peerIndex = (i + topic + 1) % numPeers + if (peerIndex !== 0) { peers.push(peerIndex) } + } + return peers + } + + /** + * Star topology + * peer 1 + * / + * peer 0 - peer 2 + * \ + * peer 3 + * + * A topic contains peer 0 and some other peers, with numPeersPerTopic = 4 + * + * |Topic| Peers | + * |-----|-----------| + * | 0 | 0, 1, 2, 3| + * | 1 | 0, 2, 3, 4| + */ + itBench({ + id: 'heartbeat', + before: async () => { + const psubs = await createComponentsArray({ + number: numPeers, + init: { + scoreParams: { + IPColocationFactorWeight: 0 + }, + floodPublish: true, + // TODO: why we need to configure this low score + // probably we should tweak topic score params + // is that why we don't have mesh peers? + scoreThresholds: { + gossipThreshold: -10, + publishThreshold: -100, + graylistThreshold: -1000 + } + } + }) + + // build the star + await Promise.all(psubs.slice(1).map(async (ps) => connectPubsubNodes(psubs[0], ps))) + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + await denseConnect(psubs) + + // make sure psub 0 has `numPeers - 1` peers + expect(psubs[0].pubsub.getPeers().length).to.be.gte( + numPeers - 1, + `peer 0 should have at least ${numPeers - 1} peers` + ) + + const peerIds = psubs.map((psub) => psub.components.peerId.toString()) + for (let topicIndex = 0; topicIndex < numTopic; topicIndex++) { + const topic = getTopic(topicIndex) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + const peerIndices = getTopicPeerIndices(topicIndex) + const peerIdsOnTopic = peerIndices.map((peerIndex) => peerIds[peerIndex]) + // peer 0 see all subscriptions from other + const subscription = checkReceivedSubscriptions(psubs[0], peerIdsOnTopic, topic) + // other peers should see the subsription from peer 0 to prevent PublishError.InsufficientPeers error + const otherSubscriptions = peerIndices + .slice(1) + .map((peerIndex) => psubs[peerIndex]) + .map(async (psub) => checkReceivedSubscription(psub, peerIds[0], topic, 0)) + peerIndices.forEach((peerIndex) => { psubs[peerIndex].pubsub.subscribe(topic) }) + await Promise.all([subscription, ...otherSubscriptions]) + } + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 3))) + + // make sure psubs 0 have at least 10 topic peers and 4 mesh peers for each topic + for (let i = 0; i < numTopic; i++) { + expect((psubs[0].pubsub).getSubscribers(getTopic(i)).length).to.be.gte( + 10, + `psub 0: topic ${i} does not have enough topic peers` + ) + + expect((psubs[0].pubsub).getMeshPeers(getTopic(i)).length).to.be.gte( + 4, + `psub 0: topic ${i} does not have enough mesh peers` + ) + } + + return psubs + }, + beforeEach: async (psubs) => { + numLoop++ + const msg = `its not a flooooood ${numLoop}` + const promises = [] + for (let topicIndex = 0; topicIndex < numTopic; topicIndex++) { + for (const peerIndex of getTopicPeerIndices(topicIndex)) { + promises.push( + psubs[peerIndex].pubsub.publish( + getTopic(topicIndex), + uint8ArrayFromString(psubs[peerIndex].components.peerId.toString() + msg) + ) + ) + } + } + await Promise.all(promises) + + return psubs[0] + }, + fn: async (firstPsub: GossipSubAndComponents) => { + return (firstPsub.pubsub).heartbeat() + } + }) +}) diff --git a/packages/gossipsub/test/benchmark/protobuf.test.ts b/packages/gossipsub/test/benchmark/protobuf.test.ts new file mode 100644 index 0000000000..e94b369a8a --- /dev/null +++ b/packages/gossipsub/test/benchmark/protobuf.test.ts @@ -0,0 +1,53 @@ +import crypto from 'node:crypto' +import { itBench } from '@dapplion/benchmark' +import { RPC } from '../../src/message/rpc.js' + +describe('protobuf', function () { + const testCases: Array<{ name: string, length: number }> = [ + // As of Oct 2023, Attestation length = 281 + { name: 'Attestation', length: 300 }, + // A SignedBeaconBlock could be from 70_000 to 300_000 + { name: 'SignedBeaconBlock', length: 70_000 }, + { name: 'SignedBeaconBlock', length: 140_000 }, + { name: 'SignedBeaconBlock', length: 210_000 }, + { name: 'SignedBeaconBlock', length: 280_000 } + ] + + for (const { name, length } of testCases) { + const rpc: RPC = { + subscriptions: [], + messages: [ + { + topic: 'topic1', + data: crypto.randomBytes(length), + signature: Uint8Array.from(Array.from({ length: 96 }, () => 100)) + } + ], + control: undefined + } + + const bytes = RPC.encode(rpc) + + const runsFactor = 1000 + + itBench({ + id: `decode ${name} message ${length} bytes`, + fn: () => { + for (let i = 0; i < runsFactor; i++) { + RPC.decode(bytes) + } + }, + runsFactor + }) + + itBench({ + id: `encode ${name} message ${length} bytes`, + fn: () => { + for (let i = 0; i < runsFactor; i++) { + RPC.encode(rpc) + } + }, + runsFactor + }) + } +}) diff --git a/packages/gossipsub/test/benchmark/time-cache.test.ts b/packages/gossipsub/test/benchmark/time-cache.test.ts new file mode 100644 index 0000000000..ef06d8d2de --- /dev/null +++ b/packages/gossipsub/test/benchmark/time-cache.test.ts @@ -0,0 +1,21 @@ +import { itBench } from '@dapplion/benchmark' +// @ts-expect-error no types +import TimeCache from 'time-cache' +import { SimpleTimeCache } from '../../src/utils/time-cache.js' + +// TODO: errors with "Error: root suite not found" +describe('npm TimeCache vs SimpleTimeCache', () => { + const iterations = [1_000_000, 4_000_000, 8_000_000, 16_000_000] + const timeCache = new TimeCache({ validity: 1 }) + const simpleTimeCache = new SimpleTimeCache({ validityMs: 1000 }) + + for (const iteration of iterations) { + itBench(`npm TimeCache.put x${iteration}`, () => { + for (let j = 0; j < iteration; j++) { timeCache.put(String(j)) } + }) + + itBench(`SimpleTimeCache.put x${iteration}`, () => { + for (let j = 0; j < iteration; j++) { simpleTimeCache.put(String(j), true) } + }) + } +}) diff --git a/packages/gossipsub/test/e2e/go-gossipsub.spec.ts b/packages/gossipsub/test/e2e/go-gossipsub.spec.ts new file mode 100644 index 0000000000..f8c5548d1c --- /dev/null +++ b/packages/gossipsub/test/e2e/go-gossipsub.spec.ts @@ -0,0 +1,1331 @@ +import { floodsub } from '@libp2p/floodsub' +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import delay from 'delay' +import pRetry from 'p-retry' +import pWaitFor from 'p-wait-for' +import { equals as uint8ArrayEquals } from 'uint8arrays/equals' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { GossipsubD } from '../../src/constants.js' +import { TopicValidatorResult } from '../../src/index.js' +import { + sparseConnect, + denseConnect, + connectSome, + createComponentsArray, + createComponents, + connectPubsubNodes + +} from '../utils/create-pubsub.js' +import { awaitEvents, checkReceivedSubscription, checkReceivedSubscriptions } from '../utils/events.js' +import { fastMsgIdFn } from '../utils/index.js' +import type { Message } from '../../src/index.js' +import type { RPC } from '../../src/message/rpc.js' +import type { TopicScoreParams } from '../../src/score/peer-score-params.js' +import type { GossipSubAndComponents } from '../utils/create-pubsub.js' +import type { Libp2pEvents } from '@libp2p/interface' + +/** + * These tests were translated from: + * https://github.com/libp2p/go-libp2p-pubsub/blob/master/gossipsub_test.go + */ + +/** + * Given a topic and data (and debug metadata -- sender index and msg index) + * Return a function (takes a gossipsub (and receiver index)) + * that returns a Promise that awaits the message being received + * and checks that the received message equals the given message + */ +const checkReceivedMessage = + (topic: string, data: Uint8Array, senderIx: number, msgIx: number) => + async (node: GossipSubAndComponents, receiverIx: number) => + new Promise((resolve, reject) => { + const t = setTimeout(() => { + node.pubsub.removeEventListener('message', cb) + reject(new Error(`Message never received, sender ${senderIx}, receiver ${receiverIx}, index ${msgIx}`)) + }, 60000) + const cb = (evt: CustomEvent): void => { + const msg = evt.detail + + if (msg.topic !== topic) { + return + } + + if (uint8ArrayEquals(data, msg.data)) { + clearTimeout(t) + node.pubsub.removeEventListener('message', cb) + resolve() + } + } + node.pubsub.addEventListener('message', cb) + }) + +describe('go-libp2p-pubsub gossipsub tests', function () { + // In Github runners it takes ~10sec the longest test + this.timeout(120 * 1000) + this.retries(3) + + let psubs: GossipSubAndComponents[] + + afterEach(async () => { + await stop(...psubs.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('test sparse gossipsub', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes + // Sparsely connect the nodes + // Publish 100 messages, each from a random node + // Assert that subscribed nodes receive the message + psubs = await createComponentsArray({ + number: 20, + init: { + floodPublish: false, + batchPublish: true, + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await sparseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + const sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + const batchOpts = [true, false] + for (const batchPublish of batchOpts) { + // eslint-disable-next-line no-loop-func + it(`test dense gossipsub batchPublish=${batchPublish}`, async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes + // Densely connect the nodes + // Publish 100 messages, each from a random node + // Assert that subscribed nodes receive the message + psubs = await createComponentsArray({ + number: 20, + init: { + floodPublish: false, + batchPublish, + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + const sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + } + + it('test gossipsub fanout', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes except the first + // Densely connect the nodes + // Publish 100 messages, each from the first node + // Assert that subscribed nodes receive the message + // Subscribe to the topic, first node + // Publish 100 messages, each from the first node + // Assert that subscribed nodes receive the message + psubs = await createComponentsArray({ + number: 20, + init: { + floodPublish: false, + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + const promises = psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2)) + psubs.slice(1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(promises) + + let sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + + const owner = 0 + + const results = Promise.all(psubs.slice(1).map(checkReceivedMessage(topic, msg, owner, i))) + await psubs[owner].pubsub.publish(topic, msg) + await results + } + // await Promise.all(sendRecv) + + psubs[0].pubsub.subscribe(topic) + + // wait for a heartbeat + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 1))) + + sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`2nd - ${i} its not a flooooood ${i}`) + + const owner = 0 + + const results = Promise.all( + psubs + .slice(1) + .filter((psub, j) => j !== owner) + .map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub fanout maintenance', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes except the first + // Densely connect the nodes + // Publish 100 messages, each from the first node + // Assert that subscribed nodes receive the message + // Unsubscribe to the topic, all nodes except the first + // Resubscribe to the topic, all nodes except the first + // Publish 100 messages, each from the first node + // Assert that the subscribed nodes receive the message + psubs = await createComponentsArray({ + number: 20, + init: { + floodPublish: false, + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const promises = psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2)) + const topic = 'foobar' + psubs.slice(1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(promises) + let sendRecv: Array> = [] + const sendMessages = async (time: number): Promise => { + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${time} ${i} its not a flooooood ${i}`) + + const owner = 0 + + const results = Promise.all( + psubs + .slice(1) + .filter((psub, j) => j !== owner) + .map(checkReceivedMessage(topic, msg, owner, i)) + ) + await psubs[owner].pubsub.publish(topic, msg) + sendRecv.push(results) + } + } + await sendMessages(1) + await Promise.all(sendRecv) + + psubs.slice(1).forEach((ps) => { ps.pubsub.unsubscribe(topic) }) + + // wait for heartbeats + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + psubs.slice(1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait for heartbeats + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + sendRecv = [] + await sendMessages(2) + await Promise.all(sendRecv) + }) + + it('test gossipsub fanout expiry', async function () { + // Create 10 gossipsub nodes + // Subscribe to the topic, all nodes except the first + // Densely connect the nodes + // Publish 5 messages, each from the first node + // Assert that the subscribed nodes receive every message + // Assert that the first node has fanout peers + // Wait until fanout expiry + // Assert that the first node has no fanout + psubs = await createComponentsArray({ + number: 10, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + }, + floodPublish: false, + fanoutTTL: 1000 + } + }) + const promises = psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2)) + const topic = 'foobar' + psubs.slice(1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(promises) + + const sendRecv = [] + for (let i = 0; i < 5; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + + const owner = 0 + + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + await psubs[owner].pubsub.publish(topic, msg) + sendRecv.push(results) + } + await Promise.all(sendRecv) + + expect((psubs[0].pubsub).fanout).to.not.be.empty() + + await pWaitFor(async () => { + return (psubs[0].pubsub).fanout.size === 0 + }) + }) + + it('test gossipsub gossip', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes + // Densely connect the nodes + // Publish 100 messages, each from a random node + // Assert that the subscribed nodes receive the message + // Wait a bit between each message so gossip can be interleaved + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const promises = psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2)) + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(promises) + + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + await psubs[owner].pubsub.publish(topic, msg) + await results + // wait a bit to have some gossip interleaved + await delay(100) + } + // and wait for some gossip flushing + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + }) + + it('test gossipsub gossip propagation', async function () { + // Create 20 gossipsub nodes + // Split into two groups, just a single node shared between + // Densely connect each group to itself + // Subscribe to the topic, first group minus the shared node + // Publish 10 messages, each from the shared node + // Assert that the first group receives the messages + // Subscribe to the topic, second group minus the shared node + // Assert that the second group receives the messages (via gossip) + psubs = await createComponentsArray({ + number: 20, + init: { + floodPublish: false, + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + const group1 = psubs.slice(0, GossipsubD + 1) + const group2 = psubs.slice(GossipsubD + 1) + group2.unshift(psubs[0]) + + await denseConnect(group1) + await denseConnect(group2) + + group1.slice(1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 3))) + + const sendRecv: Array> = [] + for (let i = 0; i < 10; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = 0 + const results = Promise.all(group1.slice(1).map(checkReceivedMessage(topic, msg, owner, i))) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + + await delay(100) + + psubs.slice(GossipsubD + 1).forEach((ps) => { ps.pubsub.subscribe(topic) }) + + const received: Message[][] = Array.from({ length: psubs.length - (GossipsubD + 1) }, () => []) + const results = Promise.all( + group2.slice(1).map( + async (ps, ix) => + new Promise((resolve, reject) => { + const t = setTimeout(() => { reject(new Error('Timed out')) }, 10000) + ps.pubsub.addEventListener('message', (e: CustomEvent) => { + if (e.detail.topic !== topic) { + return + } + + received[ix].push(e.detail) + if (received[ix].length >= 10) { + clearTimeout(t) + resolve() + } + }) + }) + ) + ) + + await results + }) + + it('test gossipsub prune', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes + // Densely connect nodes + // Unsubscribe to the topic, first 5 nodes + // Publish 100 messages, each from a random node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + } + } + }) + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await denseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + // disconnect some peers from the mesh to get some PRUNEs + psubs.slice(0, 5).forEach((ps) => { ps.pubsub.unsubscribe(topic) }) + + // wait a bit to take effect + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + const sendRecv: Array> = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs + .slice(5) + .filter((psub, j) => j + 5 !== owner) + .map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub graft', async function () { + // Create 20 gossipsub nodes + // Sparsely connect nodes + // Subscribe to the topic, all nodes, waiting for each subscription to propagate first + // Publish 100 messages, each from a random node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + + await sparseConnect(psubs) + + for (const ps of psubs) { + ps.pubsub.subscribe(topic) + // wait for announce to propagate + await delay(100) + } + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + const sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub remove peer', async function () { + // Create 20 gossipsub nodes + // Subscribe to the topic, all nodes + // Densely connect nodes + // Stop 5 nodes + // Publish 100 messages, each from a random still-started node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + } + } + }) + const topic = 'foobar' + + await denseConnect(psubs) + + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + // disconnect some peers to exercise _removePeer paths + afterEach(async () => { + await stop( + ...psubs + .slice(0, 5) + .reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), []) + ) + }) + + const sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * (psubs.length - 5)) + const results = Promise.all( + psubs + .slice(5) + .filter((psub, j) => j !== owner) + .map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs.slice(5)[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub graft prune retry', async function () { + // Create 10 gossipsub nodes + // Densely connect nodes + // Subscribe to 35 topics, all nodes + // Publish a message from each topic, each from a random node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 10, + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + } + } + }) + const topic = 'foobar' + + await denseConnect(psubs) + + for (let i = 0; i < 35; i++) { + psubs.forEach((ps) => { ps.pubsub.subscribe(`${topic}${i}`) }) + } + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 9))) + + for (let i = 0; i < 35; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(`${topic}${i}`, msg, owner, i)) + ) + await psubs[owner].pubsub.publish(`${topic}${i}`, msg) + await delay(20) + await results + } + }) + + it.skip('test gossipsub control piggyback', async function () { + // Create 10 gossipsub nodes + // Densely connect nodes + // Subscribe to a 'flood' topic, all nodes + // Publish 10k messages on the flood topic, each from a random node, in the background + // Subscribe to 5 topics, all nodes + // Wait for the flood to stop + // Publish a message to each topic, each from a random node + // Assert that subscribed nodes receive each message + // Publish a message from each topic, each from a random node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 10, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + + await denseConnect(psubs) + + const floodTopic = 'flood' + psubs.forEach((ps) => { ps.pubsub.subscribe(floodTopic) }) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 1))) + + // create a background flood of messages that overloads the queues + const floodOwner = Math.floor(Math.random() * psubs.length) + const floodMsg = uint8ArrayFromString('background flooooood') + const backgroundFlood = Promise.resolve().then(async () => { + for (let i = 0; i < 10000; i++) { + await psubs[floodOwner].pubsub.publish(floodTopic, floodMsg) + } + }) + + await delay(20) + + // and subscribe to a bunch of topics in the meantime -- this should + // result in some dropped control messages, with subsequent piggybacking + // in the background flood + for (let i = 0; i < 5; i++) { + psubs.forEach((ps) => { ps.pubsub.subscribe(`${topic}${i}`) }) + } + + // wait for the flood to stop + await backgroundFlood + + // and test that we have functional overlays + const sendRecv: Array> = [] + for (let i = 0; i < 5; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(`${topic}${i}`, msg, owner, i)) + ) + await psubs[owner].pubsub.publish(`${topic}${i}`, msg) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test mixed gossipsub', async function () { + // Create 20 gossipsub nodes + // Create 10 floodsub nodes + // Subscribe to the topic, all nodes + // Sparsely connect nodes + // Publish 100 messages, each from a random node + // Assert that the subscribed nodes receive every message + const gsubs: GossipSubAndComponents[] = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + }, + fastMsgIdFn + } + }) + const fsubs = await createComponentsArray({ + number: 10, + pubsub: floodsub as any + }) + psubs = gsubs.concat(fsubs) + + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await sparseConnect(psubs) + + // wait for heartbeats to build mesh + await Promise.all(gsubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + const sendRecv = [] + for (let i = 0; i < 100; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = Math.floor(Math.random() * psubs.length) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub multihops', async function () { + // Create 6 gossipsub nodes + // Connect nodes in a line (eg: 0 -> 1 -> 2 -> 3 ...) + // Subscribe to the topic, all nodes + // Publish a message from node 0 + // Assert that the last node receives the message + const numPeers = 6 + psubs = await createComponentsArray({ + number: numPeers, + init: { + scoreParams: { + IPColocationFactorThreshold: 20, + behaviourPenaltyWeight: 0 + } + } + }) + const topic = 'foobar' + + for (let i = 0; i < numPeers - 1; i++) { + await connectPubsubNodes(psubs[i], psubs[i + 1]) + } + const peerIdStrsByIdx: string[][] = [] + for (let i = 0; i < numPeers; i++) { + if (i === 0) { + // first + peerIdStrsByIdx[i] = [psubs[i + 1].components.peerId.toString()] + } else if (i > 0 && i < numPeers - 1) { + // middle + peerIdStrsByIdx[i] = [psubs[i + 1].components.peerId.toString(), psubs[i - 1].components.peerId.toString()] + } else if (i === numPeers - 1) { + // last + peerIdStrsByIdx[i] = [psubs[i - 1].components.peerId.toString()] + } + } + + const subscriptionPromises = psubs.map( + async (psub, i) => checkReceivedSubscriptions(psub, peerIdStrsByIdx[i], topic) + ) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + await Promise.all(subscriptionPromises) + + const msg = uint8ArrayFromString(`${0} its not a flooooood ${0}`) + const owner = 0 + const results = checkReceivedMessage(topic, msg, owner, 0)(psubs[5], 5) + await psubs[owner].pubsub.publish(topic, msg) + await results + }) + + it('test gossipsub tree topology', async function () { + // Create 10 gossipsub nodes + // Connect nodes in a tree, diagram below + // Subscribe to the topic, all nodes + // Assert that the nodes are peered appropriately + // Publish two messages, one from either end of the tree + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 10, + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + } + } + }) + const topic = 'foobar' + + /* + [0] -> [1] -> [2] -> [3] + | L->[4] + v + [5] -> [6] -> [7] + | + v + [8] -> [9] + */ + const treeTopology = [ + [1, 5], // 0 + [2, 4], // 1 + [3], // 2 + [], // 3 leaf + [], // 4 leaf + [6, 8], // 5 + [7], // 6 + [], // 7 leaf + [9], // 8 + [] // 9 leaf + ] + for (let from = 0; from < treeTopology.length; from++) { + for (const to of treeTopology[from]) { + await connectPubsubNodes(psubs[from], psubs[to]) + } + } + + const getPeerIdStrs = (idx: number): string[] => { + const outbounds = treeTopology[idx] + const inbounds = [] + for (let i = 0; i < treeTopology.length; i++) { + if (treeTopology[i].includes(idx)) { inbounds.push(i) } + } + return Array.from(new Set([...inbounds, ...outbounds])).map((i) => psubs[i].components.peerId.toString()) + } + + const subscriptionPromises = psubs.map( + async (psub, i) => checkReceivedSubscriptions(psub, getPeerIdStrs(i), topic) + ) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait for heartbeats to build mesh + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + await Promise.all(subscriptionPromises) + + expect(psubs[0].pubsub.getPeers().map((s) => s.toString())).to.have.members([ + psubs[1].components.peerId.toString(), + psubs[5].components.peerId.toString() + ]) + expect(psubs[1].pubsub.getPeers().map((s) => s.toString())).to.have.members([ + psubs[0].components.peerId.toString(), + psubs[2].components.peerId.toString(), + psubs[4].components.peerId.toString() + ]) + expect(psubs[2].pubsub.getPeers().map((s) => s.toString())).to.have.members([ + psubs[1].components.peerId.toString(), + psubs[3].components.peerId.toString() + ]) + + const sendRecv = [] + for (const owner of [9, 3]) { + const msg = uint8ArrayFromString(`${owner} its not a flooooood ${owner}`) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, owner)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub star topology with signed peer records', async function () { + // Create 20 gossipsub nodes with lower degrees + // Connect nodes to a center node, with the center having very low degree + // Subscribe to the topic, all nodes + // Assert that all nodes have > 1 connection + // Publish one message per node + // Assert that the subscribed nodes receive every message + psubs = await createComponentsArray({ + number: 20, + init: { + scoreThresholds: { + acceptPXThreshold: 0 + }, + scoreParams: { + IPColocationFactorThreshold: 20 + }, + doPX: true, + D: 4, + Dhi: 5, + Dlo: 3, + Dscore: 3, + prunePeers: 5 + } + }) + + // configure the center of the star with very low D + ;(psubs[0].pubsub).opts.D = 0 + ;(psubs[0].pubsub).opts.Dhi = 0 + ;(psubs[0].pubsub).opts.Dlo = 0 + ;(psubs[0].pubsub).opts.Dscore = 0 + + // build the star + await Promise.all(psubs.slice(1).map(async (ps) => connectPubsubNodes(psubs[0], ps))) + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + // build the mesh + const topic = 'foobar' + const peerIdStrs = psubs.map((psub) => psub.components.peerId.toString()) + const subscriptionPromise = checkReceivedSubscriptions(psubs[0], peerIdStrs, topic) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + // wait a bit for the mesh to build + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 15, 25000))) + await subscriptionPromise + + // check that all peers have > 1 connection + psubs.forEach((ps) => { + expect(ps.components.connectionManager.getConnections().length).to.be.gt(1) + }) + + // send a message from each peer and assert it was propagated + const sendRecv = [] + for (let i = 0; i < psubs.length; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = i + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub direct peers', async function () { + // Create 3 gossipsub nodes + // 2 and 3 with direct peer connections with each other + // Connect nodes: 2 <- 1 -> 3 + // Assert that the nodes are connected + // Subscribe to the topic, all nodes + // Publish a message from each node + // Assert that all nodes receive the messages + // Disconnect peers + // Assert peers reconnect + // Publish a message from each node + // Assert that all nodes receive the messages + psubs = await Promise.all([ + createComponents({ + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + }, + fastMsgIdFn, + directConnectTicks: 2 + } + }), + createComponents({ + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + }, + fastMsgIdFn, + directConnectTicks: 2 + } + }), + createComponents({ + init: { + scoreParams: { + IPColocationFactorThreshold: 20 + }, + fastMsgIdFn + } + }) + ]) + ;(psubs[1].pubsub).direct.add(psubs[2].components.peerId.toString()) + await connectPubsubNodes(psubs[1], psubs[2]) + ;(psubs[2].pubsub).direct.add(psubs[1].components.peerId.toString()) + await connectPubsubNodes(psubs[2], psubs[1]) + + // each peer connects to 2 other peers + await connectPubsubNodes(psubs[0], psubs[1]) + await connectPubsubNodes(psubs[0], psubs[2]) + + const topic = 'foobar' + const peerIdStrs = psubs.map((libp2p) => libp2p.components.peerId.toString()) + let subscriptionPromises = psubs.map(async (libp2ps) => checkReceivedSubscriptions(libp2ps, peerIdStrs, topic)) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 1))) + await Promise.all(subscriptionPromises) + + let sendRecv = [] + for (let i = 0; i < 3; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = i + const results = Promise.all(psubs.filter((_, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i))) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + + const connectPromises = [1, 2].map(async (i) => awaitEvents(psubs[i].components.events, 'peer:connect', 1)) + // disconnect the direct peers to test reconnection + // need more time to disconnect/connect/send subscriptions again + subscriptionPromises = [ + checkReceivedSubscription(psubs[1], peerIdStrs[2], topic, 2, 10000), + checkReceivedSubscription(psubs[2], peerIdStrs[1], topic, 1, 10000) + ] + await psubs[1].components.connectionManager.closeConnections(psubs[2].components.peerId) + // TODO remove when https://github.com/libp2p/js-libp2p-interfaces/pull/268 is merged + await psubs[2].components.connectionManager.closeConnections(psubs[1].components.peerId) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 5))) + await Promise.all(connectPromises) + await Promise.all(subscriptionPromises) + expect(psubs[1].components.connectionManager.getConnections(psubs[2].components.peerId)).to.not.be.empty() + + sendRecv = [] + for (let i = 0; i < 3; i++) { + const msg = uint8ArrayFromString(`2nd - ${i} its not a flooooood ${i}`) + const owner = i + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub flood publish', async function () { + // Create 30 gossipsub nodes + // Connect in star topology + // Subscribe to the topic, all nodes + // Publish 20 messages, each from the center node + // Assert that the other nodes receive the message + const numPeers = 30 + psubs = await createComponentsArray({ + number: numPeers, + init: { + scoreParams: { + IPColocationFactorThreshold: 30 + } + } + }) + + await Promise.all( + psubs.slice(1).map(async (ps) => { + await connectPubsubNodes(psubs[0], ps) + }) + ) + + const owner = 0 + const psub0 = psubs[owner] + const peerIdStrs = psubs.filter((_, j) => j !== owner).map((psub) => psub.components.peerId.toString()) + // build the (partial, unstable) mesh + const topic = 'foobar' + const subscriptionPromise = checkReceivedSubscriptions(psub0, peerIdStrs, topic) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 1))) + await subscriptionPromise + + // send messages from the star and assert they were received + const sendRecv = [] + for (let i = 0; i < 20; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const results = Promise.all( + psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) + ) + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + sendRecv.push(results) + } + await Promise.all(sendRecv) + }) + + it('test gossipsub negative score', async function () { + // Create 20 gossipsub nodes, with scoring params to quickly lower node 0's score + // Connect densely + // Subscribe to the topic, all nodes + // Publish 20 messages, each from a different node, collecting all received messages + // Assert that nodes other than 0 should not receive any messages from node 0 + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 30, + appSpecificScore: (p) => (p === psubs[0].components.peerId.toString() ? -1000 : 0), + decayInterval: 1000, + decayToZero: 0.01 + }, + scoreThresholds: { + gossipThreshold: -10, + publishThreshold: -100, + graylistThreshold: -1000 + }, + fastMsgIdFn + } + }) + + await denseConnect(psubs) + + const topic = 'foobar' + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 3))) + + psubs.slice(1).forEach((ps) => { + ps.pubsub.addEventListener('message', (evt) => { + if (evt.detail.type !== 'signed') { + throw new Error('unexpected message type') + } + expect(evt.detail.from.equals(psubs[0].components.peerId)).to.be.false() + }) + } + ) + + const sendRecv = [] + for (let i = 0; i < 20; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = i + sendRecv.push(psubs[owner].pubsub.publish(topic, msg)) + } + await Promise.all(sendRecv) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + }) + + it('test gossipsub score validator ex', async function () { + // Create 3 gossipsub nodes + // Connect fully + // Register a topic validator on node 0: ignore 1, reject 2 + // Subscribe to the topic, node 0 + // Publish 2 messages, from 1 and 2 + // Assert that 0 received neither message + // Assert that 1's score is 0, 2's score is negative + const topic = 'foobar' + psubs = await createComponentsArray({ + number: 3, + init: { + scoreParams: { + topics: { + [topic]: { + topicWeight: 1, + timeInMeshQuantum: 1000, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.9999, + timeInMeshWeight: 0, + timeInMeshCap: 0, + firstMessageDeliveriesWeight: 0, + firstMessageDeliveriesDecay: 0, + firstMessageDeliveriesCap: 0, + meshMessageDeliveriesWeight: 0, + meshMessageDeliveriesDecay: 0, + meshMessageDeliveriesCap: 0, + meshMessageDeliveriesThreshold: 0, + meshMessageDeliveriesWindow: 0, + meshMessageDeliveriesActivation: 0, + meshFailurePenaltyWeight: 0, + meshFailurePenaltyDecay: 0 + } + } + } + } + }) + + await connectPubsubNodes(psubs[0], psubs[1]) + await connectPubsubNodes(psubs[1], psubs[2]) + await connectPubsubNodes(psubs[0], psubs[2]) + ;(psubs[0].pubsub).topicValidators.set(topic, async (propagationSource, m) => { + if (propagationSource.equals(psubs[1].components.peerId)) { return TopicValidatorResult.Ignore } + if (propagationSource.equals(psubs[2].components.peerId)) { return TopicValidatorResult.Reject } + throw Error('Unknown PeerId') + }) + + psubs[0].pubsub.subscribe(topic) + + await delay(200) + + psubs[0].pubsub.addEventListener('message', () => expect.fail('node 0 should not receive any messages')) + + const msg = uint8ArrayFromString('its not a flooooood') + await psubs[1].pubsub.publish(topic, msg) + const msg2 = uint8ArrayFromString('2nd - its not a flooooood') + await psubs[2].pubsub.publish(topic, msg2) + + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 2))) + + expect((psubs[0].pubsub).score.score(psubs[1].components.peerId.toString())).to.be.eql(0) + expect((psubs[0].pubsub).score.score(psubs[2].components.peerId.toString())).to.be.lt(0) + }) + + it('test gossipsub piggyback control', async function () { + psubs = await createComponentsArray({ number: 2 }) + const otherId = psubs[1].components.peerId.toString() + const psub = psubs[0].pubsub + + const topic1 = 'topic_1' + const topic2 = 'topic_2' + const topic3 = 'topic_3' + psub.mesh.set(topic1, new Set([otherId])) + psub.mesh.set(topic2, new Set()) + + const rpc: RPC = { + subscriptions: [], + messages: [] + } + + const toGraft = (topicID: string): RPC.ControlGraft => ({ topicID }) + const toPrune = (topicID: string): RPC.ControlPrune => ({ topicID, peers: [] }) + + psub.piggybackControl(otherId, rpc, { + graft: [toGraft(topic1), toGraft(topic2), toGraft(topic3)], + prune: [toPrune(topic1), toPrune(topic2), toPrune(topic3)], + ihave: [], + iwant: [], + idontwant: [] + }) + + const expectedRpc: RPC = { + subscriptions: [], + messages: [], + control: { + graft: [toGraft(topic1)], + prune: [toPrune(topic2), toPrune(topic3)], + ihave: [], + iwant: [], + idontwant: [] + } + } + + expect(rpc).deep.equals(expectedRpc) + + await psub.stop() + }) + + it('test gossipsub opportunistic grafting', async function () { + // Create 20 nodes + // 6 real gossip nodes, 14 'sybil' nodes, unresponsive nodes + // Connect some of the real nodes + // Connect every sybil to every real node + // Subscribe to the topic, all real nodes + // Publish 300 messages from the real nodes + // Wait for opgraft + // Assert the real peer meshes have at least 2 honest peers + const topic = 'test' + psubs = await createComponentsArray({ + number: 20, + init: { + scoreParams: { + IPColocationFactorThreshold: 50, + decayToZero: 0.01, + topics: { + + [topic]: { + topicWeight: 1, + timeInMeshWeight: 0.00002777, + timeInMeshQuantum: 1000, + timeInMeshCap: 3600, + firstMessageDeliveriesWeight: 100, + firstMessageDeliveriesDecay: 0.99997, + firstMessageDeliveriesCap: 1000, + meshMessageDeliveriesWeight: 0, + invalidMessageDeliveriesDecay: 0.99997 + } as TopicScoreParams + } + }, + scoreThresholds: { + gossipThreshold: -10, + publishThreshold: -100, + graylistThreshold: -10000, + opportunisticGraftThreshold: 1 + } + } + }) + const promises = psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 1)) + const real = psubs.slice(0, 6) + const sybils = psubs.slice(6) + + const connectPromises = real.map( + async (psub) => awaitEvents(psub.components.events, 'peer:connect', 3) + ) + await connectSome(real, 5) + await Promise.all(connectPromises) + sybils.forEach((s) => { + (s.pubsub).handleReceivedRpc = async function () { + // + } + }) + + for (let i = 0; i < sybils.length; i++) { + for (let j = 0; j < real.length; j++) { + await connectPubsubNodes(sybils[i], real[j]) + } + } + + await Promise.all(promises) + + const realPeerIdStrs = real.map((psub) => psub.components.peerId.toString()) + const subscriptionPromises = real.map(async (psub) => { + const waitingPeerIdStrs = Array.from(psub.pubsub.getPeers().values()) + .map((p) => p.toString()) + .filter((peerId) => realPeerIdStrs.includes(peerId.toString())) + return checkReceivedSubscriptions(psub, waitingPeerIdStrs, topic) + }) + psubs.forEach((ps) => { ps.pubsub.subscribe(topic) }) + await Promise.all(subscriptionPromises) + + for (let i = 0; i < 300; i++) { + const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) + const owner = i % real.length + await psubs[owner].pubsub.publish(topic, msg) + } + + // now wait for opgraft cycles + await Promise.all(psubs.map(async (ps) => awaitEvents(ps.pubsub, 'gossipsub:heartbeat', 7))) + + // check the honest node meshes, they should have at least 3 honest peers each + const realPeerIds = real.map((r) => r.components.peerId.toString()) + + await pRetry( + async () => { + for (const r of real) { + const meshPeers = (r.pubsub).mesh.get(topic) + + if (meshPeers == null) { + throw new Error('meshPeers was null') + } + + let count = 0 + realPeerIds.forEach((p) => { + if (meshPeers.has(p)) { + count++ + } + }) + + if (count < 2) { + await delay(100) + throw new Error('Count was less than 3') + } + } + }, + { retries: 10 } + ) + }) +}) diff --git a/packages/gossipsub/test/floodsub.spec.ts b/packages/gossipsub/test/floodsub.spec.ts new file mode 100644 index 0000000000..61dda6675c --- /dev/null +++ b/packages/gossipsub/test/floodsub.spec.ts @@ -0,0 +1,289 @@ +import { floodsub } from '@libp2p/floodsub' +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import delay from 'delay' +import { pEvent } from 'p-event' +import pRetry from 'p-retry' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { connectPubsubNodes, createComponents } from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' +import type { SubscriptionChangeData, Message } from '../src/index.js' + +describe.skip('gossipsub fallbacks to floodsub', () => { + describe('basics', () => { + let nodeGs: GossipSubAndComponents + let nodeFs: GossipSubAndComponents + + beforeEach(async () => { + nodeGs = await createComponents({ + init: { + fallbackToFloodsub: true + } + }) + nodeFs = await createComponents({ + pubsub: floodsub as any + }) + }) + + afterEach(async () => { + await stop( + ...[nodeGs, nodeFs].reduce( + (acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), + [] + ) + ) + }) + + it.skip('Dial event happened from nodeGs to nodeFs', async () => { + await connectPubsubNodes(nodeGs, nodeFs) + + await pRetry(() => { + // eslint-disable-next-line max-nested-callbacks + expect(nodeGs.pubsub.getPeers().map((s) => s.toString())).to.include(nodeFs.components.peerId.toString()) + // eslint-disable-next-line max-nested-callbacks + expect(nodeFs.pubsub.getPeers().map((s) => s.toString())).to.include(nodeGs.components.peerId.toString()) + }) + }) + }) + + describe.skip('should not be added if fallback disabled', () => { + let nodeGs: GossipSubAndComponents + let nodeFs: GossipSubAndComponents + + beforeEach(async () => { + nodeGs = await createComponents({ + init: { + fallbackToFloodsub: false + } + }) + nodeFs = await createComponents({ + pubsub: floodsub as any + }) + }) + + afterEach(async () => { + await stop( + ...[nodeGs, nodeFs].reduce( + (acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), + [] + ) + ) + }) + + it('Dial event happened from nodeGs to nodeFs, but nodeGs does not support floodsub', async () => { + try { + await connectPubsubNodes(nodeGs, nodeFs) + expect.fail('Dial should not have succeed') + } catch (err) { + expect(err).to.have.property('name', 'UnsupportedProtocolError') + } + }) + }) + + describe('subscription functionality', () => { + let nodeGs: GossipSubAndComponents + let nodeFs: GossipSubAndComponents + + before(async () => { + nodeGs = await createComponents({ + init: { + fallbackToFloodsub: true + }, + logPrefix: 'gossipsub-peer' + }) + nodeFs = await createComponents({ + pubsub: floodsub as any, + logPrefix: 'floodsub-peer' + }) + + await connectPubsubNodes(nodeGs, nodeFs) + }) + + afterEach(async () => { + await stop( + ...[nodeGs, nodeFs].reduce((acc, curr) => { + acc.push(curr.pubsub, ...Object.entries(curr.components)) + + return acc + }, []) + ) + }) + + it('Subscribe to a topic', async function () { + this.timeout(10000) + const topic = 'Z' + nodeGs.pubsub.subscribe(topic) + nodeFs.pubsub.subscribe(topic) + + // await subscription change + const [evt] = await Promise.all([ + pEvent<'subscription-change', CustomEvent>(nodeGs.pubsub, 'subscription-change'), + pEvent<'subscription-change', CustomEvent>(nodeFs.pubsub, 'subscription-change') + ]) + const { peerId: changedPeerId, subscriptions: changedSubs } = evt.detail + + expect(nodeGs.pubsub.getTopics()).to.include(topic) + expect(nodeFs.pubsub.getTopics()).to.include(topic) + expect(nodeGs.pubsub.getPeers()).to.have.lengthOf(1) + expect(nodeFs.pubsub.getPeers()).to.have.lengthOf(1) + expect(nodeGs.pubsub.getSubscribers(topic).map((p) => p.toString())).to.include( + nodeFs.components.peerId.toString() + ) + expect(nodeFs.pubsub.getSubscribers(topic).map((p) => p.toString())).to.include( + nodeGs.components.peerId.toString() + ) + + expect(nodeGs.pubsub.getPeers().map((p) => p.toString())).to.include(changedPeerId.toString()) + expect(changedSubs).to.have.lengthOf(1) + expect(changedSubs[0].topic).to.equal(topic) + expect(changedSubs[0].subscribe).to.equal(true) + }) + }) + + describe('publish functionality', () => { + let nodeGs: GossipSubAndComponents + let nodeFs: GossipSubAndComponents + const topic = 'Z' + + beforeEach(async () => { + nodeGs = await createComponents({ + init: { + fallbackToFloodsub: true + } + }) + nodeFs = await createComponents({ + pubsub: floodsub as any + }) + + await connectPubsubNodes(nodeGs, nodeFs) + + nodeGs.pubsub.subscribe(topic) + nodeFs.pubsub.subscribe(topic) + + // await subscription change + await Promise.all([pEvent(nodeGs.pubsub, 'subscription-change'), pEvent(nodeFs.pubsub, 'subscription-change')]) + }) + + afterEach(async () => { + await stop( + ...[nodeGs, nodeFs].reduce( + (acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), + [] + ) + ) + }) + + const batchPublishOpts = [true, false] + for (const batchPublish of batchPublishOpts) { + // eslint-disable-next-line no-loop-func + it(`Publish to a topic - nodeGs - batchPublish: ${batchPublish}`, async () => { + const promise = pEvent<'message', CustomEvent>(nodeFs.pubsub, 'message') + const data = uint8ArrayFromString('hey') + + await nodeGs.pubsub.publish(topic, data, { batchPublish }) + + const evt = await promise + if (evt.detail.type !== 'signed') { + throw new Error('unexpected message type') + } + expect(evt.detail.data).to.equalBytes(data) + expect(evt.detail.from.toString()).to.be.eql(nodeGs.components.peerId.toString()) + }) + + // eslint-disable-next-line no-loop-func + it(`Publish to a topic - nodeFs - batchPublish: ${batchPublish}`, async () => { + const promise = pEvent<'message', CustomEvent>(nodeGs.pubsub, 'message') + const data = uint8ArrayFromString('banana') + + await nodeFs.pubsub.publish(topic, data, { batchPublish }) + + const evt = await promise + if (evt.detail.type !== 'signed') { + throw new Error('unexpected message type') + } + expect(evt.detail.data).to.equalBytes(data) + expect(evt.detail.from.toString()).to.be.eql(nodeFs.components.peerId.toString()) + }) + } + }) + + describe('publish after unsubscribe', () => { + let nodeGs: GossipSubAndComponents + let nodeFs: GossipSubAndComponents + const topic = 'Z' + + beforeEach(async () => { + nodeGs = await createComponents({ + init: { + fallbackToFloodsub: true + } + }) + nodeFs = await createComponents({ + pubsub: floodsub as any + }) + + await connectPubsubNodes(nodeGs, nodeFs) + + nodeGs.pubsub.subscribe(topic) + nodeFs.pubsub.subscribe(topic) + + // await subscription change + await Promise.all([pEvent(nodeGs.pubsub, 'subscription-change'), pEvent(nodeFs.pubsub, 'subscription-change')]) + // allow subscriptions to propagate to the other peer + await delay(10) + }) + + afterEach(async () => { + await stop( + ...[nodeGs, nodeFs].reduce( + (acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), + [] + ) + ) + }) + + it('Unsubscribe from a topic', async () => { + const promise = pEvent<'subscription-change', CustomEvent>( + nodeFs.pubsub, + 'subscription-change' + ) + + nodeGs.pubsub.unsubscribe(topic) + expect(nodeGs.pubsub.getTopics()).to.be.empty() + + const evt = await promise + const { peerId: changedPeerId, subscriptions: changedSubs } = evt.detail + + expect(nodeFs.pubsub.getPeers()).to.have.lengthOf(1) + expect(nodeFs.pubsub.getSubscribers(topic)).to.be.empty() + expect(nodeFs.pubsub.getPeers().map((p) => p.toString())).to.include(changedPeerId.toString()) + expect(changedSubs).to.have.lengthOf(1) + expect(changedSubs[0].topic).to.equal(topic) + expect(changedSubs[0].subscribe).to.equal(false) + }) + + it('Publish to a topic after unsubscribe', async () => { + nodeGs.pubsub.unsubscribe(topic) + await pEvent(nodeFs.pubsub, 'subscription-change') + + const promise = new Promise((resolve, reject) => { + nodeGs.pubsub.addEventListener('message', reject, { + once: true + }) + setTimeout(() => { + nodeGs.pubsub.removeEventListener('message', reject) + resolve() + }, 100) + }) + + await nodeFs.pubsub.publish(topic, uint8ArrayFromString('banana')) + await nodeGs.pubsub.publish(topic, uint8ArrayFromString('banana')) + + try { + await promise + } catch (e) { + expect.fail('message should not be received') + } + }) + }) +}) diff --git a/packages/gossipsub/test/gossip.spec.ts b/packages/gossipsub/test/gossip.spec.ts new file mode 100644 index 0000000000..2dd028e172 --- /dev/null +++ b/packages/gossipsub/test/gossip.spec.ts @@ -0,0 +1,405 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { stop } from '@libp2p/interface' +import { defaultLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import { pEvent } from 'p-event' +import sinon from 'sinon' +import { stubInterface } from 'sinon-ts' +import { concat } from 'uint8arrays' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { GossipsubDhi } from '../src/constants.js' +import { GossipSub as GossipSubClass } from '../src/gossipsub.js' +import { connectAllPubSubNodes, createComponentsArray } from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' +import type { PeerStore } from '@libp2p/interface' +import type { ConnectionManager, Registrar } from '@libp2p/interface-internal' +import type { SinonStubbedInstance } from 'sinon' + +describe('gossip', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: GossipsubDhi + 2, + connected: false, + init: { + scoreParams: { + IPColocationFactorThreshold: GossipsubDhi + 3 + }, + maxInboundDataLength: 4000000, + allowPublishToZeroTopicPeers: false, + idontwantMaxMessages: 10 + } + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('should send gossip to non-mesh peers in topic', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const topic = 'Z' + + const subscriptionPromises = nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change')) + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(nodes) + + // wait for subscriptions to be transmitted + await Promise.all(subscriptionPromises) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // set spy. NOTE: Forcing private property to be public + const nodeASpy = nodeA.pubsub as Partial as SinonStubbedInstance<{ + pushGossip: GossipSubClass['pushGossip'] + }> + sinon.spy(nodeASpy, 'pushGossip') + + await nodeA.pubsub.publish(topic, uint8ArrayFromString('hey')) + + // gossip happens during the heartbeat + await pEvent(nodeA.pubsub, 'gossipsub:heartbeat') + + const mesh = (nodeA.pubsub).mesh.get(topic) + + if (mesh == null) { + throw new Error('No mesh for topic') + } + + nodeASpy.pushGossip + .getCalls() + .map((call) => call.args[0]) + .forEach((peerId) => { + expect(mesh).to.not.include(peerId) + }) + + // unset spy + nodeASpy.pushGossip.restore() + }) + + it('should send idontwant to peers in topic', async function () { + // This test checks that idontwants and idontwantsCounts are correctly incrmemented + // - idontwantCounts should track the number of idontwant messages received from a peer for a single heartbeat + // - it should increment on receive of idontwant msgs (up to limit) + // - it should be emptied after heartbeat + // - idontwants should track the idontwant messages received from a peer along with the heartbeatId when received + // - it should increment on receive of idontwant msgs (up to limit) + // - it should be emptied after mcacheLength heartbeats + this.timeout(10e4) + const nodeA = nodes[0] + const otherNodes = nodes.slice(1) + const topic = 'Z' + const idontwantMaxMessages = nodeA.pubsub.opts.idontwantMaxMessages + const idontwantMinDataSize = nodeA.pubsub.opts.idontwantMinDataSize + + const subscriptionPromises = nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change')) + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(nodes) + + // wait for subscriptions to be transmitted + await Promise.all(subscriptionPromises) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a bunch of messages, enough to fill up our idontwant caches + for (let i = 0; i < idontwantMaxMessages * 2; i++) { + const msg = concat([ + uint8ArrayFromString(i.toString()), + new Uint8Array(idontwantMinDataSize) + ]) + await nodeA.pubsub.publish(topic, msg) + } + // track the heartbeat when each node received the last message + + const ticks = otherNodes.map((n) => n.pubsub['heartbeatTicks']) + + // there's no event currently implemented to await, so just wait a bit - flaky :( + // TODO figure out something more robust + await new Promise((resolve) => setTimeout(resolve, 200)) + + // other nodes should have received idontwant messages + // check that idontwants <= GossipsubIdontwantMaxMessages + for (let i = 0; i < otherNodes.length; i++) { + const node = otherNodes[i] + + const currentTick = node.pubsub['heartbeatTicks'] + + const idontwantCounts = node.pubsub['idontwantCounts'] + let minCount = Infinity + let maxCount = 0 + for (const count of idontwantCounts.values()) { + minCount = Math.min(minCount, count) + maxCount = Math.max(maxCount, count) + } + // expect(minCount).to.be.greaterThan(0) + expect(maxCount).to.be.lessThanOrEqual(idontwantMaxMessages) + + const idontwants = node.pubsub['idontwants'] + let minIdontwants = Infinity + let maxIdontwants = 0 + for (const idontwant of idontwants.values()) { + minIdontwants = Math.min(minIdontwants, idontwant.size) + maxIdontwants = Math.max(maxIdontwants, idontwant.size) + } + // expect(minIdontwants).to.be.greaterThan(0) + expect(maxIdontwants).to.be.lessThanOrEqual(idontwantMaxMessages) + + // sanity check that the idontwantCount matches idontwants.size + // only the case if there hasn't been a heartbeat + if (currentTick === ticks[i]) { + expect(minCount).to.be.equal(minIdontwants) + expect(maxCount).to.be.equal(maxIdontwants) + } + } + + await Promise.all(otherNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // after a heartbeat + // idontwants are still tracked + // but idontwantCounts have been cleared + for (const node of nodes) { + const idontwantCounts = node.pubsub['idontwantCounts'] + for (const count of idontwantCounts.values()) { + expect(count).to.be.equal(0) + } + + const idontwants = node.pubsub['idontwants'] + let minIdontwants = Infinity + let maxIdontwants = 0 + for (const idontwant of idontwants.values()) { + minIdontwants = Math.min(minIdontwants, idontwant.size) + maxIdontwants = Math.max(maxIdontwants, idontwant.size) + } + // expect(minIdontwants).to.be.greaterThan(0) + expect(maxIdontwants).to.be.lessThanOrEqual(idontwantMaxMessages) + } + }) + + it('Should allow publishing to zero peers if flag is passed', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const topic = 'Z' + + const publishResult = await nodeA.pubsub.publish(topic, uint8ArrayFromString('hey'), { + allowPublishToZeroTopicPeers: true + }) + + // gossip happens during the heartbeat + await pEvent(nodeA.pubsub, 'gossipsub:heartbeat') + + // should have sent message to peerB + expect(publishResult.recipients).to.deep.equal([]) + }) + + it('should tag peers', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const nodeB = nodes[1] + const topic = 'Z' + + const twoNodes = [nodeA, nodeB] + + const graftPromises = twoNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:graft')) + + // add subscriptions to each node + twoNodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(twoNodes) + + // await grafts + await Promise.all(graftPromises) + + // await mesh rebalancing + await Promise.all(twoNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + const peerInfoA = await nodeA.components.peerStore.get(nodeB.components.peerId).catch((e) => undefined) + const peerInfoB = await nodeB.components.peerStore.get(nodeA.components.peerId).catch((e) => undefined) + expect(peerInfoA?.tags.get(topic)?.value).to.equal(100) + expect(peerInfoB?.tags.get(topic)?.value).to.equal(100) + }) + + it('should remove the tags upon pruning', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const nodeB = nodes[1] + const topic = 'Z' + + const twoNodes = [nodeA, nodeB] + + const subscriptionPromises = nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change')) + // add subscriptions to each node + twoNodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(nodes) + + // await for subscriptions to be transmitted + await Promise.all(subscriptionPromises) + + // await mesh rebalancing + await Promise.all(twoNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + twoNodes.forEach((n) => { n.pubsub.unsubscribe(topic) }) + + // await for unsubscriptions to be transmitted + // await mesh rebalancing + await Promise.all(twoNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + const peerInfoA = await nodeA.components.peerStore.get(nodeB.components.peerId).catch((e) => undefined) + const peerInfoB = await nodeB.components.peerStore.get(nodeA.components.peerId).catch((e) => undefined) + expect(peerInfoA?.tags.get(topic)).to.be.undefined() + expect(peerInfoB?.tags.get(topic)).to.be.undefined() + }) + + it.skip('should reject incoming messages bigger than maxInboundDataLength limit', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const nodeB = nodes[1] + + const twoNodes = [nodeA, nodeB] + const topic = 'Z' + const subscriptionPromises = twoNodes.map(async (n) => pEvent(n.pubsub, 'subscription-change')) + // add subscriptions to each node + twoNodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(twoNodes) + + // wait for subscriptions to be transmitted + await Promise.all(subscriptionPromises) + + // await mesh rebalancing + await Promise.all(twoNodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // set spy. NOTE: Forcing private property to be public + const nodeBSpy = nodeB.pubsub as Partial as SinonStubbedInstance<{ + handlePeerReadStreamError: GossipSubClass['handlePeerReadStreamError'] + }> + sinon.spy(nodeBSpy, 'handlePeerReadStreamError') + + // This should lead to handlePeerReadStreamError at nodeB + await nodeA.pubsub.publish(topic, new Uint8Array(5000000)) + await pEvent(nodeA.pubsub, 'gossipsub:heartbeat') + const expectedError = nodeBSpy.handlePeerReadStreamError.getCalls()[0]?.args[0] + expect(expectedError).to.have.property('name', 'InvalidDataLengthError') + + // unset spy + nodeBSpy.handlePeerReadStreamError.restore() + }) + + it('should send piggyback control into other sent messages', async function () { + this.timeout(10e4) + const nodeA = nodes[0] + const topic = 'Z' + + const promises = nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change')) + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // every node connected to every other + await connectAllPubSubNodes(nodes) + + // wait for subscriptions to be transmitted + await Promise.all(promises) + + // await nodeA mesh rebalancing + await pEvent(nodeA.pubsub, 'gossipsub:heartbeat') + + const mesh = (nodeA.pubsub).mesh.get(topic) + + if (mesh == null) { + throw new Error('No mesh for topic') + } + + if (mesh.size === 0) { + throw new Error('Topic mesh was empty') + } + + const peerB = Array.from(mesh)[0] + + if (peerB == null) { + throw new Error('Could not get peer from mesh') + } + + // should have peerB as a subscriber to the topic + expect(nodeA.pubsub.getSubscribers(topic).map((p) => p.toString())).to.include( + peerB, + "did not know about peerB's subscription to topic" + ) + + // should be able to send them messages + expect((nodeA.pubsub).streamsOutbound.has(peerB)).to.be.true( + 'nodeA did not have connection open to peerB' + ) + + // set spy. NOTE: Forcing private property to be public + const nodeASpy = sinon.spy(nodeA.pubsub, 'piggybackControl') + // manually add control message to be sent to peerB + const graft = { ihave: [], iwant: [], graft: [{ topicID: topic }], prune: [], idontwant: [] } + ;(nodeA.pubsub).control.set(peerB, graft) + ;(nodeA.pubsub).gossip.set(peerB, []) + + const publishResult = await nodeA.pubsub.publish(topic, uint8ArrayFromString('hey')) + + // should have sent message to peerB + expect(publishResult.recipients.map((p) => p.toString())).to.include(peerB, 'did not send pubsub message to peerB') + + // wait until spy is called + const startTime = Date.now() + while (Date.now() - startTime < 5000) { + if (nodeASpy.callCount > 0) { break } + } + + expect(nodeASpy.callCount).to.be.equal(1) + // expect control message to be sent alongside published message + const call = nodeASpy.getCalls()[0] + expect(call).to.have.deep.nested.property('args[1].control.graft', graft.graft) + + // unset spy + nodeASpy.restore() + }) + + it('should allow configuring stream limits', async () => { + const maxInboundStreams = 7 + const maxOutboundStreams = 5 + + const registrar = stubInterface() + const privateKey = await generateKeyPair('Ed25519') + const peerId = peerIdFromPrivateKey(privateKey) + const pubsub = new GossipSubClass( + { + privateKey, + peerId, + registrar, + peerStore: stubInterface(), + connectionManager: stubInterface(), + logger: defaultLogger() + }, + { + maxInboundStreams, + maxOutboundStreams + } + ) + + await pubsub.start() + + expect(registrar.register.called).to.be.true() + expect(registrar.handle.getCall(0)).to.have.nested.property('args[2].maxInboundStreams', maxInboundStreams) + expect(registrar.handle.getCall(0)).to.have.nested.property('args[2].maxOutboundStreams', maxOutboundStreams) + + await pubsub.stop() + }) +}) diff --git a/packages/gossipsub/test/heartbeat.spec.ts b/packages/gossipsub/test/heartbeat.spec.ts new file mode 100644 index 0000000000..ce05cac8ef --- /dev/null +++ b/packages/gossipsub/test/heartbeat.spec.ts @@ -0,0 +1,35 @@ +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import { pEvent } from 'p-event' +import { GossipsubHeartbeatInterval } from '../src/constants.js' +import { createComponents } from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' + +describe('heartbeat', () => { + let node: GossipSubAndComponents + + before(async () => { + node = await createComponents({ + init: { + emitSelf: true + } + }) + }) + + after(async () => { + await stop(node.pubsub, ...Object.entries(node.components)) + }) + + it('should occur with regularity defined by a constant', async function () { + this.timeout(GossipsubHeartbeatInterval * 5) + + await pEvent(node.pubsub, 'gossipsub:heartbeat') + const t1 = Date.now() + + await pEvent(node.pubsub, 'gossipsub:heartbeat') + const t2 = Date.now() + + const safeFactor = 1.5 + expect(t2 - t1).to.be.lt(GossipsubHeartbeatInterval * safeFactor) + }) +}) diff --git a/packages/gossipsub/test/mesh.spec.ts b/packages/gossipsub/test/mesh.spec.ts new file mode 100644 index 0000000000..ce78ceed86 --- /dev/null +++ b/packages/gossipsub/test/mesh.spec.ts @@ -0,0 +1,73 @@ +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import delay from 'delay' +import { pEvent } from 'p-event' +import { GossipsubDhi } from '../src/constants.js' +import { connectAllPubSubNodes, createComponentsArray } from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' + +describe('mesh overlay', () => { + let nodes: GossipSubAndComponents[] + + // Create pubsub nodes + beforeEach(async () => { + nodes = await createComponentsArray({ + number: GossipsubDhi + 2, + connected: false, + init: { + scoreParams: { + IPColocationFactorThreshold: GossipsubDhi + 3 + } + } + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('should add mesh peers below threshold', async function () { + this.timeout(10e3) + + // test against node0 + const node0 = nodes[0] + const topic = 'Z' + + // add subscriptions to each node + nodes.forEach((node) => { node.pubsub.subscribe(topic) }) + + // connect N (< GossipsubD) nodes to node0 + const N = 4 + await connectAllPubSubNodes(nodes.slice(0, N + 1)) + + await delay(50) + // await mesh rebalancing + await new Promise((resolve) => { + (node0.pubsub).addEventListener('gossipsub:heartbeat', resolve, { + once: true + }) + } + ) + + const mesh = (node0.pubsub).mesh.get(topic) + expect(mesh).to.have.property('size', N) + }) + + it('should remove mesh peers once above threshold', async function () { + this.timeout(10e4) + // test against node0 + const node0 = nodes[0] + const topic = 'Z' + + // add subscriptions to each node + nodes.forEach((node) => { node.pubsub.subscribe(topic) }) + + await connectAllPubSubNodes(nodes) + + // await mesh rebalancing + await pEvent(node0.pubsub, 'gossipsub:heartbeat') + + const mesh = (node0.pubsub).mesh.get(topic) + expect(mesh).to.have.property('size').that.is.lte(GossipsubDhi) + }) +}) diff --git a/packages/gossipsub/test/message-cache.spec.ts b/packages/gossipsub/test/message-cache.spec.ts new file mode 100644 index 0000000000..e3f76d1538 --- /dev/null +++ b/packages/gossipsub/test/message-cache.spec.ts @@ -0,0 +1,162 @@ +import * as utils from '@libp2p/pubsub/utils' +import { expect } from 'aegir/chai' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { MessageCache } from '../src/message-cache.js' +import { messageIdToString } from '../src/utils/messageIdToString.js' +import { getMsgId } from './utils/index.js' +import type { RPC } from '../src/message/rpc.js' +import type { MessageId } from '../src/types.js' + +const toMessageId = (msgId: Uint8Array): MessageId => { + return { + msgId, + msgIdStr: messageIdToString(msgId) + } +} + +describe('Testing Message Cache Operations', () => { + const messageCache = new MessageCache(3, 5, messageIdToString) + const testMessages: RPC.Message[] = [] + const topic = 'test' + const getGossipIDs = (mcache: MessageCache, topic: string): Uint8Array[] => { + const gossipIDsByTopic = mcache.getGossipIDs(new Set([topic])) + return gossipIDsByTopic.get(topic) ?? [] + } + + before(async () => { + const makeTestMessage = (n: number): RPC.Message => { + return { + from: new Uint8Array(0), + data: uint8ArrayFromString(n.toString()), + seqno: uint8ArrayFromString(utils.randomSeqno().toString(16).padStart(16, '0'), 'base16'), + topic + } + } + + for (let i = 0; i < 60; i++) { + testMessages.push(makeTestMessage(i)) + } + + for (let i = 0; i < 10; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + }) + + it('Should retrieve correct messages for each test message', () => { + for (let i = 0; i < 10; i++) { + const messageId = getMsgId(testMessages[i]) + const message = messageCache.get(messageId) + expect(message).to.equal(testMessages[i]) + } + }) + + it('Get GossipIDs', () => { + const gossipIDs = getGossipIDs(messageCache, topic) + expect(gossipIDs.length).to.equal(10) + + for (let i = 0; i < 10; i++) { + const messageID = getMsgId(testMessages[i]) + expect(messageID).to.deep.equal(gossipIDs[i]) + } + }) + + it('Shift message cache', async () => { + messageCache.shift() + for (let i = 10; i < 20; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + + for (let i = 0; i < 20; i++) { + const messageID = getMsgId(testMessages[i]) + const message = messageCache.get(messageID) + expect(message).to.equal(testMessages[i]) + } + + let gossipIDs = getGossipIDs(messageCache, topic) + expect(gossipIDs.length).to.equal(20) + + for (let i = 0; i < 10; i++) { + const messageID = getMsgId(testMessages[i]) + expect(messageID).to.deep.equal(gossipIDs[10 + i]) + } + + for (let i = 10; i < 20; i++) { + const messageID = getMsgId(testMessages[i]) + expect(messageID).to.deep.equal(gossipIDs[i - 10]) + } + + messageCache.shift() + for (let i = 20; i < 30; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + + messageCache.shift() + for (let i = 30; i < 40; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + + messageCache.shift() + for (let i = 40; i < 50; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + + messageCache.shift() + for (let i = 50; i < 60; i++) { + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], true) + } + + expect(messageCache.msgs.size).to.equal(50) + + for (let i = 0; i < 10; i++) { + const messageID = getMsgId(testMessages[i]) + const message = messageCache.get(messageID) + expect(message).to.be.an('undefined') + } + + for (let i = 10; i < 60; i++) { + const messageID = getMsgId(testMessages[i]) + const message = messageCache.get(messageID) + expect(message).to.equal(testMessages[i]) + } + + gossipIDs = getGossipIDs(messageCache, topic) + expect(gossipIDs.length).to.equal(30) + + for (let i = 0; i < 10; i++) { + const messageID = getMsgId(testMessages[50 + i]) + expect(messageID).to.deep.equal(gossipIDs[i]) + } + + for (let i = 10; i < 20; i++) { + const messageID = getMsgId(testMessages[30 + i]) + expect(messageID).to.deep.equal(gossipIDs[i]) + } + + for (let i = 20; i < 30; i++) { + const messageID = getMsgId(testMessages[10 + i]) + expect(messageID).to.deep.equal(gossipIDs[i]) + } + }) + + it('should not gossip not-validated message ids', () => { + let gossipIDs = getGossipIDs(messageCache, topic) + while (gossipIDs.length > 0) { + messageCache.shift() + gossipIDs = getGossipIDs(messageCache, topic) + } + expect(gossipIDs.length).to.be.equal(0) + + for (let i = 10; i < 20; i++) { + // 5 last messages are not validated + const validated = i < 15 + messageCache.put(toMessageId(getMsgId(testMessages[i])), testMessages[i], validated) + } + + gossipIDs = getGossipIDs(messageCache, topic) + expect(gossipIDs.length).to.be.equal(5) + // only validate the new gossip ids + for (let i = 0; i < 5; i++) { + expect(gossipIDs[i]).to.deep.equal(getMsgId(testMessages[i + 10]), 'incorrect gossip message id ' + String(i)) + } + }) +}) diff --git a/packages/gossipsub/test/peer-score-params.spec.ts b/packages/gossipsub/test/peer-score-params.spec.ts new file mode 100644 index 0000000000..0da8a4b3cf --- /dev/null +++ b/packages/gossipsub/test/peer-score-params.spec.ts @@ -0,0 +1,537 @@ +import { expect } from 'aegir/chai' +import * as constants from '../src/constants.js' +import { + createTopicScoreParams, + validateTopicScoreParams, + createPeerScoreParams, + validatePeerScoreParams +} from '../src/score/index.js' + +describe('TopicScoreParams validation', () => { + it('should not throw on default TopicScoreParams', () => { + expect(() => { validateTopicScoreParams(createTopicScoreParams({})) }).to.not.throw() + }) + it('should throw on invalid TopicScoreParams', () => { + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + topicWeight: -1 + }) + ) + }, + 'topicWeight must be >= 0' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshWeight: -1, + timeInMeshQuantum: 1000 + }) + ) + }, + 'timeInMeshWeight must be positive (or 0 to disable)' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshWeight: 1, + timeInMeshQuantum: -1 + }) + ) + }, + 'timeInMeshQuantum must be positive' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshWeight: 1, + timeInMeshQuantum: 1000, + timeInMeshCap: -1 + }) + ) + }, + 'timeInMeshCap must be positive' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + firstMessageDeliveriesWeight: -1 + }) + ) + }, + 'firstMessageDeliveriesWeight must be positive (or 0 to disable)' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: -1 + }) + ) + }, + 'firstMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 2 + }) + ) + }, + 'firstMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: -1 + }) + ) + }, + 'firstMessageDeliveriesCap must be positive' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: 1 + }) + ) + }, + 'meshMessageDeliveriesWeight must be negative (or 0 to disable)' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: -1 + }) + ) + }, + 'meshMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 2 + }) + ) + }, + 'meshMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesCap: -1 + }) + ) + }, + 'meshMessageDeliveriesCap must be positive' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 5, + meshMessageDeliveriesThreshold: -3 + }) + ) + }, + 'meshMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesThreshold: -3, + meshMessageDeliveriesWindow: -1 + }) + ) + }, + 'meshMessageDeliveriesThreshold must be positive' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesThreshold: 3, + meshMessageDeliveriesWindow: -1, + meshMessageDeliveriesActivation: 1 + }) + ) + }, + 'meshMessageDeliveriesWindow must be non-negative' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshFailurePenaltyWeight: 1 + }) + ) + }, + 'meshFailurePenaltyWeight must be negative' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: -1 + }) + ) + }, + 'meshFailurePenaltyDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 2 + }) + ) + }, + 'meshFailurePenaltyDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + invalidMessageDeliveriesWeight: 1 + }) + ) + }, + 'invalidMessageDeliveriesWeight must be negative' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: -1 + }) + ) + }, + 'invalidMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + expect( + () => { + validateTopicScoreParams( + createTopicScoreParams({ + timeInMeshQuantum: 1000, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 2 + }) + ) + }, + 'invalidMessageDeliveriesDecay must be between 0 and 1' + ).to.throw() + }) + it('should not throw on valid TopicScoreParams', () => { + expect(() => { + validateTopicScoreParams( + createTopicScoreParams({ + topicWeight: 2, + timeInMeshWeight: 0.01, + timeInMeshQuantum: 1000, + timeInMeshCap: 10, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 10, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesCap: 10, + meshMessageDeliveriesThreshold: 5, + meshMessageDeliveriesWindow: 1, + meshMessageDeliveriesActivation: 1000, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 0.5, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.5 + }) + ) + } + ).to.not.throw() + }) +}) + +describe('PeerScoreParams validation', () => { + const appScore = (): number => 0 + + it('should throw on invalid PeerScoreParams', () => { + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: -1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01 + }) + ) + }, + 'topicScoreCap must be positive' + ).to.throw() + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + decayInterval: 999, + decayToZero: 0.01 + }) + ) + }, + 'decayInterval must be at least 1s' + ).to.throw() + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: 1 + }) + ) + }, + 'IPColocationFactorWeight should be negative' + ).to.throw() + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: -1 + }) + ) + }, + 'IPColocationFactorThreshold should be at least 1' + ).to.throw() + /* + TODO: appears to be valid config? + expect(() => + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 0.99 + }) + ), "IPColocationFactorThreshold should be at least 1" + ).to.throw() + */ + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: -1, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1 + }) + ) + }, + 'decayToZero must be between 0 and 1' + ).to.throw() + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 2, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1 + }) + ) + }, + 'decayToZero must be between 0 and 1' + ).to.throw() + expect(() => { + validatePeerScoreParams( + createPeerScoreParams({ + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + behaviourPenaltyWeight: 1 + }) + ) + } + ).to.throw() + /* + TODO: appears to be valid config? + expect(() => + validatePeerScoreParams( + createPeerScoreParams({ + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + behaviourPenaltyWeight: -1 + }) + ), "behaviourPenaltyWeight MUST be negative (or zero to disable)" + ).to.throw() + */ + expect( + () => { + validatePeerScoreParams( + createPeerScoreParams({ + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + behaviourPenaltyWeight: -1, + behaviourPenaltyDecay: 2 + }) + ) + }, + 'behaviourPenaltyDecay must be between 0 and 1' + ).to.throw() + expect(() => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + topics: { + test: { + topicWeight: -1, + timeInMeshWeight: 0.01, + timeInMeshQuantum: Number(constants.second), + timeInMeshCap: 10, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 10, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesCap: 10, + meshMessageDeliveriesThreshold: 5, + meshMessageDeliveriesWindow: 1, + meshMessageDeliveriesActivation: 1000, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 0.5, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.5 + } + } + }) + ) + } + ).to.throw() + }) + it('should not throw on valid PeerScoreParams', () => { + expect(() => { + validatePeerScoreParams( + createPeerScoreParams({ + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + behaviourPenaltyWeight: -1, + behaviourPenaltyDecay: 0.999 + }) + ) + } + ).to.not.throw() + expect(() => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: 1000, + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + behaviourPenaltyWeight: -1, + behaviourPenaltyDecay: 0.999 + }) + ) + } + ).to.not.throw() + expect(() => { + validatePeerScoreParams( + createPeerScoreParams({ + topicScoreCap: 1, + appSpecificScore: appScore, + decayInterval: Number(constants.second), + decayToZero: 0.01, + IPColocationFactorWeight: -1, + IPColocationFactorThreshold: 1, + topics: { + test: { + topicWeight: 1, + timeInMeshWeight: 0.01, + timeInMeshQuantum: 1000, + timeInMeshCap: 10, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 10, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesCap: 10, + meshMessageDeliveriesThreshold: 5, + meshMessageDeliveriesWindow: 1, + meshMessageDeliveriesActivation: 1000, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 0.5, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.5 + } + } + }) + ) + } + ).to.not.throw() + }) +}) diff --git a/packages/gossipsub/test/peer-score-thresholds.spec.ts b/packages/gossipsub/test/peer-score-thresholds.spec.ts new file mode 100644 index 0000000000..4b0d7df771 --- /dev/null +++ b/packages/gossipsub/test/peer-score-thresholds.spec.ts @@ -0,0 +1,93 @@ +import { expect } from 'aegir/chai' +import { createPeerScoreThresholds, validatePeerScoreThresholds } from '../src/score/index.js' + +describe('PeerScoreThresholds validation', () => { + it('should throw on invalid PeerScoreThresholds', () => { + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + gossipThreshold: 1 + }) + ) + }, + 'gossipThreshold must be <= 0' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + publishThreshold: 1 + }) + ) + }, + 'publishThreshold must be <= 0 and <= gossip threshold' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + gossipThreshold: -1, + publishThreshold: 0 + }) + ) + }, + 'publishThreshold must be <= 0 and <= gossip threshold' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + graylistThreshold: 1 + }) + ) + }, + 'graylistThreshold must be <= 0 and <= publish threshold' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + publishThreshold: -1, + graylistThreshold: -2 + }) + ) + }, + 'graylistThreshold must be <= 0 and <= publish threshold' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + acceptPXThreshold: -1 + }) + ) + }, + 'acceptPXThreshold must be >= 0' + ).to.throw() + expect( + () => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + opportunisticGraftThreshold: -1 + }) + ) + }, + 'opportunisticGraftThreshold must be >= 0' + ).to.throw() + }) + it('should not throw on valid PeerScoreThresholds', () => { + expect(() => { + validatePeerScoreThresholds( + createPeerScoreThresholds({ + gossipThreshold: -1, + publishThreshold: -2, + graylistThreshold: -3, + acceptPXThreshold: 1, + opportunisticGraftThreshold: 2 + }) + ) + } + ).to.not.throw() + }) +}) diff --git a/packages/gossipsub/test/peer-score.spec.ts b/packages/gossipsub/test/peer-score.spec.ts new file mode 100644 index 0000000000..75108561f4 --- /dev/null +++ b/packages/gossipsub/test/peer-score.spec.ts @@ -0,0 +1,754 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { defaultLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import delay from 'delay' +import sinon from 'sinon' +import { ScorePenalty } from '../src/metrics.js' +import { PeerScore, createPeerScoreParams, createTopicScoreParams } from '../src/score/index.js' +import { RejectReason } from '../src/types.js' +import { getMsgIdStr, makeTestMessage } from './utils/index.js' +import type { PeerScoreParams, TopicScoreParams } from '../src/score/peer-score-params.js' +import type { PeerStats } from '../src/score/peer-stats.js' + +/** Placeholder for some ScorePenalty value, only used for metrics */ +const scorePenaltyAny = ScorePenalty.BrokenPromise + +describe('PeerScore', () => { + const logger = defaultLogger() + + it('should score based on time in mesh', async () => { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 0.5, + timeInMeshWeight: 1, + timeInMeshQuantum: 1, + timeInMeshCap: 3600 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + let aScore = ps.score(peerA) + expect(aScore, 'expected score to start at zero').to.equal(0) + + // The time in mesh depends on how long the peer has been grafted + ps.graft(peerA, mytopic) + const elapsed = tparams.timeInMeshQuantum * 100 + await delay(elapsed + 10) + + ps.refreshScores() + aScore = ps.score(peerA) + expect(aScore).to.be.gte(((tparams.topicWeight * tparams.timeInMeshWeight) / tparams.timeInMeshQuantum) * elapsed) + }) + + it('should cap time in mesh score', async () => { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 0.5, + timeInMeshWeight: 1, + timeInMeshQuantum: 1, + timeInMeshCap: 10, + invalidMessageDeliveriesDecay: 0.1 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + let aScore = ps.score(peerA) + expect(aScore, 'expected score to start at zero').to.equal(0) + + // The time in mesh depends on how long the peer has been grafted + ps.graft(peerA, mytopic) + const elapsed = tparams.timeInMeshQuantum * 40 + await delay(elapsed) + + ps.refreshScores() + aScore = ps.score(peerA) + expect(aScore).to.be.gt(tparams.topicWeight * tparams.timeInMeshWeight * tparams.timeInMeshCap * 0.5) + expect(aScore).to.be.lt(tparams.topicWeight * tparams.timeInMeshWeight * tparams.timeInMeshCap * 1.5) + }) + + it('should score first message deliveries', async () => { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.9, + firstMessageDeliveriesCap: 50000, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + + // deliver a bunch of messages from peer A + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + } + + ps.refreshScores() + const aScore = ps.score(peerA) + expect(aScore).to.be.equal( + tparams.topicWeight * tparams.firstMessageDeliveriesWeight * nMessages * tparams.firstMessageDeliveriesDecay + ) + }) + + it('should cap first message deliveries score', async () => { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.9, + invalidMessageDeliveriesDecay: 0.9, + firstMessageDeliveriesCap: 50, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + let aScore = ps.score(peerA) + expect(aScore, 'expected score to start at zero').to.equal(0) + + // The time in mesh depends on how long the peer has been grafted + ps.graft(peerA, mytopic) + + // deliver a bunch of messages from peer A + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + } + + ps.refreshScores() + aScore = ps.score(peerA) + expect(aScore).to.be.equal( + tparams.topicWeight * + tparams.firstMessageDeliveriesWeight * + tparams.firstMessageDeliveriesCap * + tparams.firstMessageDeliveriesDecay + ) + }) + + it('should decay first message deliveries score', async () => { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + firstMessageDeliveriesWeight: 1, + firstMessageDeliveriesDecay: 0.9, // decay 10% per decay interval + invalidMessageDeliveriesDecay: 0.9, + firstMessageDeliveriesCap: 50, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + let aScore = ps.score(peerA) + expect(aScore, 'expected score to start at zero').to.equal(0) + + // The time in mesh depends on how long the peer has been grafted + ps.graft(peerA, mytopic) + + // deliver a bunch of messages from peer A + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + } + + ps.refreshScores() + aScore = ps.score(peerA) + let expected = + tparams.topicWeight * + tparams.firstMessageDeliveriesWeight * + tparams.firstMessageDeliveriesCap * + tparams.firstMessageDeliveriesDecay + expect(aScore).to.be.equal(expected) + + // refreshing the scores applies the decay param + const decayInterals = 10 + for (let i = 0; i < decayInterals; i++) { + ps.refreshScores() + expected *= tparams.firstMessageDeliveriesDecay + } + aScore = ps.score(peerA) + expect(aScore).to.be.equal(expected) + }) + + it('should score mesh message deliveries', async function () { + this.timeout(10000) + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesActivation: 1000, + meshMessageDeliveriesWindow: 10, + meshMessageDeliveriesThreshold: 20, + meshMessageDeliveriesCap: 100, + meshMessageDeliveriesDecay: 0.9, + invalidMessageDeliveriesDecay: 0.9, + firstMessageDeliveriesWeight: 0, + timeInMeshWeight: 0 + })) + // peer A always delivers the message first + // peer B delivers next (within the delivery window) + // peer C delivers outside the delivery window + // we expect peers A and B to have a score of zero, since all other param weights are zero + // peer C should have a negative score + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerC = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peers = [peerA, peerB, peerC] + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + peers.forEach((p) => { + ps.addPeer(p) + ps.graft(p, mytopic) + }) + + // assert that nobody has been penalized yet for not delivering messages before activation time + ps.refreshScores() + peers.forEach((p) => { + const score = ps.score(p) + expect(score, 'expected no mesh delivery penalty before activation time').to.equal(0) + }) + // wait for the activation time to kick in + await delay(tparams.meshMessageDeliveriesActivation) + + // deliver a bunch of messages from peers + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) + + // deliver duplicate from peer C after the window + await delay(tparams.meshMessageDeliveriesWindow + 5) + ps.duplicateMessage(peerC, getMsgIdStr(msg), msg.topic) + } + ps.refreshScores() + const aScore = ps.score(peerA) + const bScore = ps.score(peerB) + const cScore = ps.score(peerC) + expect(aScore).to.be.gte(0) + expect(bScore).to.be.gte(0) + + // the penalty is the difference between the threshold and the actual mesh deliveries, squared. + // since we didn't deliver anything, this is just the value of the threshold + const penalty = tparams.meshMessageDeliveriesThreshold * tparams.meshMessageDeliveriesThreshold + const expected = tparams.topicWeight * tparams.meshMessageDeliveriesWeight * penalty + expect(cScore).to.be.equal(expected) + }) + + it('should decay mesh message deliveries score', async function () { + this.timeout(10000) + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + meshMessageDeliveriesWeight: -1, + meshMessageDeliveriesActivation: 1000, + meshMessageDeliveriesWindow: 10, + meshMessageDeliveriesThreshold: 20, + meshMessageDeliveriesCap: 100, + meshMessageDeliveriesDecay: 0.9, + invalidMessageDeliveriesDecay: 0.9, + firstMessageDeliveriesWeight: 0, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + + // wait for the activation time to kick in + await delay(tparams.meshMessageDeliveriesActivation + 10) + + // deliver a bunch of messages from peer A + const nMessages = 40 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + } + ps.refreshScores() + let aScore = ps.score(peerA) + expect(aScore).to.be.gte(0) + + // we need to refresh enough times for the decay to bring us below the threshold + let decayedDeliveryCount = nMessages * tparams.meshMessageDeliveriesDecay + for (let i = 0; i < 20; i++) { + ps.refreshScores() + decayedDeliveryCount *= tparams.meshMessageDeliveriesDecay + } + aScore = ps.score(peerA) + // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + const deficit = tparams.meshMessageDeliveriesThreshold - decayedDeliveryCount + const penalty = deficit * deficit + const expected = tparams.topicWeight * tparams.meshMessageDeliveriesWeight * penalty + expect(aScore).to.be.equal(expected) + }) + + it('should score mesh message failures', async function () { + this.timeout(10000) + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + // the mesh failure penalty is applied when a peer is pruned while their + // mesh deliveries are under the threshold. + // for this test, we set the mesh delivery threshold, but set + // meshMessageDeliveriesWeight to zero, so the only affect on the score + // is from the mesh failure penalty + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + meshFailurePenaltyWeight: -1, + meshFailurePenaltyDecay: 0.9, + + meshMessageDeliveriesWeight: 0, + meshMessageDeliveriesActivation: 1000, + meshMessageDeliveriesWindow: 10, + meshMessageDeliveriesThreshold: 20, + meshMessageDeliveriesCap: 100, + meshMessageDeliveriesDecay: 0.9, + + firstMessageDeliveriesWeight: 0, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peers = [peerA, peerB] + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + peers.forEach((p) => { + ps.addPeer(p) + ps.graft(p, mytopic) + }) + + // wait for the activation time to kick in + await delay(tparams.meshMessageDeliveriesActivation + 10) + + // deliver a bunch of messages from peer A. peer B does nothing + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.validateMessage(getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) + } + // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet + ps.refreshScores() + let aScore = ps.score(peerA) + let bScore = ps.score(peerB) + expect(aScore).to.be.equal(0) + expect(bScore).to.be.equal(0) + + // prune peer B to apply the penalty + ps.prune(peerB, mytopic) + ps.refreshScores() + aScore = ps.score(peerA) + bScore = ps.score(peerB) + expect(aScore).to.be.equal(0) + + // penalty calculation is the same as for meshMessageDeliveries, but multiplied by meshFailurePenaltyWeight + // instead of meshMessageDeliveriesWeight + const penalty = tparams.meshMessageDeliveriesThreshold * tparams.meshMessageDeliveriesThreshold + const expected = tparams.topicWeight * tparams.meshFailurePenaltyWeight * penalty * tparams.meshFailurePenaltyDecay + expect(bScore).to.be.equal(expected) + }) + + it('should score invalid message deliveries', async function () { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.9, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + + // deliver a bunch of messages from peer A + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) + } + ps.refreshScores() + const aScore = ps.score(peerA) + + const expected = + tparams.topicWeight * + tparams.invalidMessageDeliveriesWeight * + (nMessages * tparams.invalidMessageDeliveriesDecay) ** 2 + expect(aScore).to.be.equal(expected) + }) + + it('should decay invalid message deliveries score', async function () { + // Create parameters with reasonable default values + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + const tparams = (params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.9, + timeInMeshWeight: 0 + })) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + + // deliver a bunch of messages from peer A + const nMessages = 100 + for (let i = 0; i < nMessages; i++) { + const msg = makeTestMessage(i, mytopic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) + } + ps.refreshScores() + let aScore = ps.score(peerA) + + let expected = + tparams.topicWeight * + tparams.invalidMessageDeliveriesWeight * + (nMessages * tparams.invalidMessageDeliveriesDecay) ** 2 + expect(aScore).to.be.equal(expected) + + // refresh scores a few times to apply decay + for (let i = 0; i < 10; i++) { + ps.refreshScores() + expected *= tparams.invalidMessageDeliveriesDecay ** 2 + } + aScore = ps.score(peerA) + expect(aScore).to.be.equal(expected) + }) + + it('should score invalid/ignored messages', async function () { + // this test adds coverage for the dark corners of message rejection + const mytopic = 'mytopic' + const params = createPeerScoreParams({}) + params.topics[mytopic] = createTopicScoreParams({ + topicWeight: 1, + invalidMessageDeliveriesWeight: -1, + invalidMessageDeliveriesDecay: 0.9, + timeInMeshQuantum: 1000 + }) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.addPeer(peerB) + + const msg = makeTestMessage(0, mytopic) + + // insert a record + ps.validateMessage(getMsgIdStr(msg)) + + // this should have no effect in the score, and subsequent duplicate messages should have no effect either + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Ignore) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) + + let aScore = ps.score(peerA) + let bScore = ps.score(peerB) + let expected = 0 + expect(aScore).to.equal(expected) + expect(bScore).to.equal(expected) + + // now clear the delivery record + let record = ps.deliveryRecords.queue.peekFront() + + if (record == null) { + throw new Error('No record found') + } + + record.expire = Date.now() + + await delay(5) + ps.deliveryRecords.gc() + + // insert a new record in the message deliveries + ps.validateMessage(getMsgIdStr(msg)) + + // and reject the message to make sure duplicates are also penalized + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) + + aScore = ps.score(peerA) + bScore = ps.score(peerB) + expected = -1 + expect(aScore).to.equal(expected) + expect(bScore).to.equal(expected) + + // now clear the delivery record again + record = ps.deliveryRecords.queue.peekFront() + + if (record == null) { + throw new Error('No record found') + } + + record.expire = Date.now() + + await delay(5) + ps.deliveryRecords.gc() + + // insert a new record in the message deliveries + ps.validateMessage(getMsgIdStr(msg)) + + // and reject the message after a duplicate has arrived + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) + + aScore = ps.score(peerA) + bScore = ps.score(peerB) + expected = -4 + expect(aScore).to.equal(expected) + expect(bScore).to.equal(expected) + }) + + it('should score w/ application score', async function () { + const mytopic = 'mytopic' + let appScoreValue = 0 + const params = createPeerScoreParams({ + appSpecificScore: () => appScoreValue, + appSpecificWeight: 0.5 + }) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + + for (let i = -100; i < 100; i++) { + appScoreValue = i + ps.refreshScores() + const aScore = ps.score(peerA) + const expected = i * params.appSpecificWeight + expect(aScore).to.equal(expected) + } + }) + + it('should score w/ IP colocation', async function () { + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + IPColocationFactorThreshold: 1, + IPColocationFactorWeight: -1 + }) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerC = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerD = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peers = [peerA, peerB, peerC, peerD] + + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + peers.forEach((p) => { + ps.addPeer(p) + ps.graft(p, mytopic) + }) + + const setIPsForPeer = (p: string, ips: string[]): void => { + for (const ip of ips) { + ps.addIP(p, ip) + } + } + // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + setIPsForPeer(peerA, ['1.2.3.4']) + setIPsForPeer(peerB, ['2.3.4.5']) + setIPsForPeer(peerC, ['2.3.4.5', '3.4.5.6']) + setIPsForPeer(peerD, ['2.3.4.5']) + + ps.refreshScores() + const aScore = ps.score(peerA) + const bScore = ps.score(peerB) + const cScore = ps.score(peerC) + const dScore = ps.score(peerD) + + expect(aScore).to.equal(0) + + const nShared = 3 + const ipSurplus = nShared - params.IPColocationFactorThreshold + const penalty = ipSurplus ** 2 + const expected = params.IPColocationFactorWeight * penalty + expect(bScore).to.equal(expected) + expect(cScore).to.equal(expected) + expect(dScore).to.equal(expected) + }) + + it('should score w/ behavior penalty', async function () { + const params = createPeerScoreParams({ + behaviourPenaltyWeight: -1, + behaviourPenaltyDecay: 0.99 + }) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + + // add penalty on a non-existent peer + ps.addPenalty(peerA, 1, ScorePenalty.MessageDeficit) + let aScore = ps.score(peerA) + expect(aScore).to.equal(0) + + // add the peer and test penalties + ps.addPeer(peerA) + + aScore = ps.score(peerA) + expect(aScore).to.equal(0) + + ps.addPenalty(peerA, 1, scorePenaltyAny) + aScore = ps.score(peerA) + expect(aScore).to.equal(-1) + + ps.addPenalty(peerA, 1, scorePenaltyAny) + aScore = ps.score(peerA) + expect(aScore).to.equal(-4) + + ps.refreshScores() + + aScore = ps.score(peerA) + expect(aScore).to.equal(-3.9204) + }) + + it('should handle score retention', async function () { + const mytopic = 'mytopic' + const params = createPeerScoreParams({ + appSpecificScore: () => -1000, + appSpecificWeight: 1, + retainScore: 800 + }) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + ps.graft(peerA, mytopic) + // score should equal -1000 (app-specific score) + const expected = -1000 + ps.refreshScores() + let aScore = ps.score(peerA) + expect(aScore).to.equal(expected) + + // disconnect & wait half of the retainScoreTime + // should still have negative score + ps.removePeer(peerA) + const _delay = params.retainScore / 2 + await delay(_delay) + ps.refreshScores() + aScore = ps.score(peerA) + expect(aScore).to.equal(expected) + + // wait remaining time (plus a little slop) and the score should reset to 0 + await delay(_delay + 5) + ps.refreshScores() + aScore = ps.score(peerA) + expect(aScore).to.equal(0) + }) +}) + +// TODO: https://github.com/ChainSafe/js-libp2p-gossipsub/issues/238 +describe.skip('PeerScore score cache', function () { + const peerA = '16Uiu2HAmMkH6ZLen2tbhiuNCTZLLvrZaDgufNdT5MPjtC9Hr9YNG' + const logger = defaultLogger() + let sandbox: sinon.SinonSandbox + let computeStoreStub: sinon.SinonStub<[string, PeerStats, PeerScoreParams, Map>], number> + const params = createPeerScoreParams({ + appSpecificScore: () => -1000, + appSpecificWeight: 1, + retainScore: 800, + decayInterval: 1000, + + topics: { a: { topicWeight: 10 } as TopicScoreParams } + }) + let ps2: PeerScore + + beforeEach(() => { + sandbox = sinon.createSandbox() + const now = Date.now() + sandbox.useFakeTimers(now) + computeStoreStub = sinon.stub<[string, PeerStats, PeerScoreParams, Map>], number>() + + ps2 = new PeerScore(params, null, logger, { + scoreCacheValidityMs: 10, + computeScore: computeStoreStub + }) + }) + + afterEach(() => { + sandbox.restore() + }) + + it('should compute first time', function () { + computeStoreStub.returns(10) + ps2.addPeer(peerA) + expect(computeStoreStub.calledOnce).to.be.false() + ps2.score(peerA) + expect(computeStoreStub.calledOnce).to.be.true() + // this time peerA score is cached + ps2.score(peerA) + expect(computeStoreStub.calledOnce).to.be.true() + }) + + const testCases = [ + { name: 'decayInterval timeout', fun: () => sandbox.clock.tick(params.decayInterval) }, + { name: 'refreshScores', fun: () => { ps2.refreshScores() } }, + { name: 'addPenalty', fun: () => { ps2.addPenalty(peerA, 10, scorePenaltyAny) } }, + { name: 'graft', fun: () => { ps2.graft(peerA, 'a') } }, + { name: 'prune', fun: () => { ps2.prune(peerA, 'a') } }, + { name: 'markInvalidMessageDelivery', fun: () => { ps2.markInvalidMessageDelivery(peerA, 'a') } }, + { name: 'markFirstMessageDelivery', fun: () => { ps2.markFirstMessageDelivery(peerA, 'a') } }, + { name: 'markDuplicateMessageDelivery', fun: () => { ps2.markDuplicateMessageDelivery(peerA, 'a') } }, + { name: 'removeIPs', fun: () => { ps2.removeIP(peerA, '127.0.0.1') } } + ] + + for (const { name, fun } of testCases) { + // eslint-disable-next-line no-loop-func + it(`should invalidate the cache after ${name}`, function () { + computeStoreStub.returns(10) + ps2.addPeer(peerA) + ps2.score(peerA) + expect(computeStoreStub.calledOnce).to.be.true() + // the score is cached + ps2.score(peerA) + expect(computeStoreStub.calledOnce).to.be.true() + // invalidate the cache + fun() + // should not use the cache + ps2.score(peerA) + expect(computeStoreStub.calledTwice).to.be.true() + }) + } +}) diff --git a/packages/gossipsub/test/scoreMetrics.spec.ts b/packages/gossipsub/test/scoreMetrics.spec.ts new file mode 100644 index 0000000000..4c9575ec3b --- /dev/null +++ b/packages/gossipsub/test/scoreMetrics.spec.ts @@ -0,0 +1,50 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { defaultLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import { ScorePenalty } from '../src/metrics.js' +import { createPeerScoreParams, createTopicScoreParams, PeerScore } from '../src/score/index.js' +import { computeAllPeersScoreWeights } from '../src/score/scoreMetrics.js' + +describe('score / scoreMetrics', () => { + const logger = defaultLogger() + + it('computeScoreWeights', async () => { + // Create parameters with reasonable default values + const topic = 'test_topic' + + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + params.topics[topic] = createTopicScoreParams({ + topicWeight: 0.5, + timeInMeshWeight: 1, + timeInMeshQuantum: 1, + timeInMeshCap: 3600 + }) + + // Add Map for metrics + const topicStrToLabel = new Map() + topicStrToLabel.set(topic, topic) + + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + // Peer score should start at 0 + const ps = new PeerScore(params, null, logger, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + // Do some actions that penalize the peer + const msgId = 'aaaaaaaaaaaaaaaa' + ps.addPenalty(peerA, 1, ScorePenalty.BrokenPromise) + ps.validateMessage(msgId) + ps.deliverMessage(peerA, msgId, topic) + + const sw = computeAllPeersScoreWeights([peerA], ps.peerStats, ps.params, ps.peerIPs, topicStrToLabel) + + // Ensure score is the same + expect(sw.score).to.deep.equal([ps.score(peerA)], 'Score from metrics and actual score not equal') + expect(sw.byTopic.get(topic)).to.deep.equal( + { p1w: [0], p2w: [1], p3w: [0], p3bw: [0], p4w: [0] }, + 'Wrong score weights by topic' + ) + }) +}) diff --git a/packages/gossipsub/test/signature-policy.spec.ts b/packages/gossipsub/test/signature-policy.spec.ts new file mode 100644 index 0000000000..0c1c3dcadd --- /dev/null +++ b/packages/gossipsub/test/signature-policy.spec.ts @@ -0,0 +1,209 @@ +import { stop } from '@libp2p/interface' +import { expect } from 'aegir/chai' +import { pEvent } from 'p-event' +import { + connectAllPubSubNodes, + connectPubsubNodes, + createComponents, + createComponentsArray + +} from './utils/create-pubsub.js' +import type { GossipSubAndComponents } from './utils/create-pubsub.js' + +describe('signature policy', () => { + describe('strict-sign', () => { + const numNodes = 3 + let nodes: GossipSubAndComponents[] + + beforeEach(async () => { + nodes = await createComponentsArray({ + number: numNodes, + connected: false, + init: { + scoreParams: { + IPColocationFactorThreshold: 3 + }, + // crucial line + globalSignaturePolicy: 'StrictSign' + } + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('should publish a message', async () => { + const topic = 'foo' + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect all nodes + await connectAllPubSubNodes(nodes) + + // wait for subscriptions to be transmitted + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change'))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(numNodes - 1) + }) + + it('should forward a valid message', async () => { + const topic = 'foo' + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect in a line + await Promise.all(Array.from({ length: numNodes - 1 }, async (_, i) => connectPubsubNodes(nodes[i], nodes[i + 1]))) + + // wait for subscriptions to be transmitted + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change'))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(1) + + // the last node should get the message + await pEvent(nodes[nodes.length - 1].pubsub, 'gossipsub:message') + }) + + it('should not forward an strict-no-sign message', async () => { + const topic = 'foo' + + // add a no-sign peer to nodes + nodes.unshift( + await createComponents({ + init: { + globalSignaturePolicy: 'StrictNoSign' + } + }) + ) + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect in a line + await Promise.all(Array.from({ length: numNodes - 1 }, async (_, i) => connectPubsubNodes(nodes[i], nodes[i + 1]))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(1) + + // the last node should NOT get the message + try { + await pEvent(nodes[nodes.length - 1].pubsub, 'gossipsub:message', { timeout: 200 }) + expect.fail('no-sign message should not be emitted from strict-sign peer') + } catch (e) {} + }) + }) + + describe('strict-no-sign', () => { + const numNodes = 3 + let nodes: GossipSubAndComponents[] + + beforeEach(async () => { + nodes = await createComponentsArray({ + number: numNodes, + connected: false, + init: { + scoreParams: { + IPColocationFactorThreshold: 3 + }, + // crucial line + globalSignaturePolicy: 'StrictNoSign' + } + }) + }) + + afterEach(async () => { + await stop(...nodes.reduce((acc, curr) => acc.concat(curr.pubsub, ...Object.entries(curr.components)), [])) + }) + + it('should publish a message', async () => { + const topic = 'foo' + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect all nodes + await connectAllPubSubNodes(nodes) + + // wait for subscriptions to be transmitted + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change'))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(numNodes - 1) + }) + + it('should forward a valid message', async () => { + const topic = 'foo' + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect in a line + await Promise.all(Array.from({ length: numNodes - 1 }, async (_, i) => connectPubsubNodes(nodes[i], nodes[i + 1]))) + + // wait for subscriptions to be transmitted + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'subscription-change'))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(1) + + // the last node should get the message + await pEvent(nodes[nodes.length - 1].pubsub, 'gossipsub:message') + }) + + it('should not forward an strict-sign message', async () => { + const topic = 'foo' + + // add a no-sign peer to nodes + nodes.unshift( + await createComponents({ + init: { + globalSignaturePolicy: 'StrictSign' + } + }) + ) + + // add subscriptions to each node + nodes.forEach((n) => { n.pubsub.subscribe(topic) }) + + // connect in a line + await Promise.all(Array.from({ length: numNodes - 1 }, async (_, i) => connectPubsubNodes(nodes[i], nodes[i + 1]))) + + // await mesh rebalancing + await Promise.all(nodes.map(async (n) => pEvent(n.pubsub, 'gossipsub:heartbeat'))) + + // publish a message on the topic + const result = await nodes[0].pubsub.publish(topic, new Uint8Array()) + expect(result.recipients).to.length(1) + + // the last node should NOT get the message + try { + await pEvent(nodes[nodes.length - 1].pubsub, 'gossipsub:message', { timeout: 200 }) + expect.fail('no-sign message should not be emitted from strict-sign peer') + } catch (e) {} + }) + }) +}) diff --git a/packages/gossipsub/test/time-cache.spec.ts b/packages/gossipsub/test/time-cache.spec.ts new file mode 100644 index 0000000000..632d7d210f --- /dev/null +++ b/packages/gossipsub/test/time-cache.spec.ts @@ -0,0 +1,67 @@ +import { expect } from 'aegir/chai' +import sinon from 'sinon' +import { SimpleTimeCache } from '../src/utils/time-cache.js' + +describe('SimpleTimeCache', () => { + const validityMs = 1000 + const timeCache = new SimpleTimeCache({ validityMs }) + const sandbox = sinon.createSandbox() + + beforeEach(() => { + sandbox.useFakeTimers() + }) + + afterEach(() => { + sandbox.restore() + }) + + it('should delete items after 1sec', () => { + timeCache.put('aFirst') + timeCache.put('bFirst') + timeCache.put('cFirst') + + expect(timeCache.has('aFirst')).to.be.true() + expect(timeCache.has('bFirst')).to.be.true() + expect(timeCache.has('cFirst')).to.be.true() + + sandbox.clock.tick(validityMs + 1) + + // https://github.com/ChainSafe/js-libp2p-gossipsub/issues/232#issuecomment-1109589919 + timeCache.prune() + + timeCache.put('aSecond') + timeCache.put('bSecond') + timeCache.put('cSecond') + + expect(timeCache.has('aSecond')).to.be.true() + expect(timeCache.has('bSecond')).to.be.true() + expect(timeCache.has('cSecond')).to.be.true() + expect(timeCache.has('aFirst')).to.be.false() + expect(timeCache.has('bFirst')).to.be.false() + expect(timeCache.has('cFirst')).to.be.false() + }) + + it('Map insertion order', () => { + const key1 = 'key1' + const key2 = 'key2' + const key3 = 'key3' + + const map = new Map() + map.set(key1, Date.now()) + map.set(key2, Date.now()) + map.set(key3, Date.now()) + + expect(Array.from(map.keys())).deep.equals([key1, key2, key3], 'Map iterator order') + + // Does not change key position + map.set(key2, Date.now()) + + expect(Array.from(map.keys())).deep.equals([key1, key2, key3], 'Map iterator order after re-set') + + // Changes key position + map.delete(key2) + map.set(key2, Date.now()) + + expect(Array.from(map.keys())).deep.equals([key1, key3, key2], 'Map iterator order after delete set') + }) +}) diff --git a/packages/gossipsub/test/tracer.spec.ts b/packages/gossipsub/test/tracer.spec.ts new file mode 100644 index 0000000000..a676936548 --- /dev/null +++ b/packages/gossipsub/test/tracer.spec.ts @@ -0,0 +1,65 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { expect } from 'aegir/chai' +import delay from 'delay' +import * as constants from '../src/constants.js' +import { IWantTracer } from '../src/tracer.js' +import { messageIdToString } from '../src/utils/messageIdToString.js' +import { makeTestMessage, getMsgId, getMsgIdStr } from './utils/index.js' + +describe('IWantTracer', () => { + it('should track broken promises', async function () { + // tests that unfulfilled promises are tracked correctly + this.timeout(6000) + const t = new IWantTracer(constants.GossipsubIWantFollowupTime, messageIdToString, null) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + + const msgIds: Uint8Array[] = [] + for (let i = 0; i < 100; i++) { + const m = makeTestMessage(i, 'test_topic') + msgIds.push(getMsgId(m)) + } + + t.addPromise(peerA, msgIds) + t.addPromise(peerB, msgIds) + + // no broken promises yet + let brokenPromises = t.getBrokenPromises() + expect(brokenPromises.size).to.be.equal(0) + + // make promises break + await delay(constants.GossipsubIWantFollowupTime + 10) + + brokenPromises = t.getBrokenPromises() + expect(brokenPromises.size).to.be.equal(2) + expect(brokenPromises.get(peerA)).to.be.equal(1) + expect(brokenPromises.get(peerB)).to.be.equal(1) + }) + it('should track unbroken promises', async function () { + // like above, but this time we deliver messages to fullfil the promises + this.timeout(6000) + const t = new IWantTracer(constants.GossipsubIWantFollowupTime, messageIdToString, null) + const peerA = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + const peerB = peerIdFromPrivateKey(await generateKeyPair('Ed25519')).toString() + + const msgs = [] + const msgIds = [] + for (let i = 0; i < 100; i++) { + const m = makeTestMessage(i, 'test_topic') + msgs.push(m) + msgIds.push(getMsgId(m)) + } + + t.addPromise(peerA, msgIds) + t.addPromise(peerB, msgIds) + + msgs.forEach((msg) => { t.deliverMessage(getMsgIdStr(msg)) }) + + await delay(constants.GossipsubIWantFollowupTime + 10) + + // there should be no broken promises + const brokenPromises = t.getBrokenPromises() + expect(brokenPromises.size).to.be.equal(0) + }) +}) diff --git a/packages/gossipsub/test/unit/set.test.ts b/packages/gossipsub/test/unit/set.test.ts new file mode 100644 index 0000000000..d19c9c1572 --- /dev/null +++ b/packages/gossipsub/test/unit/set.test.ts @@ -0,0 +1,46 @@ +import { expect } from 'aegir/chai' +import { removeFirstNItemsFromSet, removeItemsFromSet } from '../../src/utils/set.js' + +describe('Set util', function () { + describe('removeItemsFromSet', function () { + let s: Set + this.beforeEach(() => { + s = new Set([1, 2, 3, 4, 5]) + }) + + const testCases: Array<{ id: string, ineed: number, fn(item: number): boolean, result: Set }> = [ + { id: 'remove even numbers - need 0', ineed: 0, fn: (item) => item % 2 === 0, result: new Set([]) }, + { id: 'remove even numbers - need 1', ineed: 1, fn: (item) => item % 2 === 0, result: new Set([2]) }, + { id: 'remove even numbers - need 2', ineed: 2, fn: (item) => item % 2 === 0, result: new Set([2, 4]) }, + { id: 'remove even numbers - need 10', ineed: 2, fn: (item) => item % 2 === 0, result: new Set([2, 4]) } + ] + + for (const { id, ineed, fn, result } of testCases) { + // eslint-disable-next-line no-loop-func + it(id, () => { + expect(removeItemsFromSet(s, ineed, fn)).to.deep.equal(result) + }) + } + }) + + describe('removeFirstNItemsFromSet', function () { + let s: Set + this.beforeEach(() => { + s = new Set([1, 2, 3, 4, 5]) + }) + + const testCases: Array<{ id: string, ineed: number, result: Set }> = [ + { id: 'remove first 0 item', ineed: 0, result: new Set([]) }, + { id: 'remove first 1 item', ineed: 1, result: new Set([1]) }, + { id: 'remove first 2 item', ineed: 2, result: new Set([1, 2]) }, + { id: 'remove first 10 item', ineed: 10, result: new Set([1, 2, 3, 4, 5]) } + ] + + for (const { id, ineed, result } of testCases) { + // eslint-disable-next-line no-loop-func + it(id, () => { + expect(removeFirstNItemsFromSet(s, ineed)).to.deep.equal(result) + }) + } + }) +}) diff --git a/packages/gossipsub/test/utils/create-pubsub.ts b/packages/gossipsub/test/utils/create-pubsub.ts new file mode 100644 index 0000000000..4af041fdaf --- /dev/null +++ b/packages/gossipsub/test/utils/create-pubsub.ts @@ -0,0 +1,197 @@ +import { setMaxListeners } from 'events' +import { generateKeyPair } from '@libp2p/crypto/keys' +import { start, TypedEventEmitter } from '@libp2p/interface' +import { defaultLogger, prefixLogger } from '@libp2p/logger' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { persistentPeerStore } from '@libp2p/peer-store' +import { mockMuxer, multiaddrConnectionPair } from '@libp2p/utils' +import { multiaddr } from '@multiformats/multiaddr' +import { MemoryDatastore } from 'datastore-core' +import { stubInterface } from 'sinon-ts' +import { GossipSub as GossipSubClass } from '../../src/gossipsub.ts' +import { gossipsub } from '../../src/index.js' +import type { GossipsubOpts } from '../../src/index.js' +import type { TypedEventTarget, Libp2pEvents, PeerStore, PrivateKey, PeerId, ComponentLogger, Connection } from '@libp2p/interface' +import type { ConnectionManager, Registrar } from '@libp2p/interface-internal' +import type { StubbedInstance } from 'sinon-ts' + +export interface CreateComponentsOpts { + init?: Partial + pubsub?(init?: any): (components: any) => GossipSubClass + logPrefix?: string +} + +export interface GossipSubTestComponents { + privateKey: PrivateKey + peerId: PeerId + peerStore: PeerStore + registrar: StubbedInstance + connectionManager: ConnectionManager + logger: ComponentLogger + events: TypedEventTarget +} + +export interface GossipSubAndComponents { + pubsub: GossipSubClass + components: GossipSubTestComponents +} + +export const createComponents = async (opts: CreateComponentsOpts): Promise => { + const fn = opts.pubsub ?? gossipsub + const privateKey = await generateKeyPair('Ed25519') + const peerId = peerIdFromPrivateKey(privateKey) + + const events = new TypedEventEmitter() + const logger = opts.logPrefix == null ? defaultLogger() : prefixLogger(opts.logPrefix) + + const components: GossipSubTestComponents = { + privateKey, + peerId, + registrar: stubInterface(), + connectionManager: stubInterface(), + peerStore: persistentPeerStore({ + peerId, + datastore: new MemoryDatastore(), + events, + logger + }), + events, + logger + } + + const pubsub = fn(opts.init)(components) as GossipSubClass + + await start(...Object.entries(components), pubsub) + + try { + // not available everywhere + setMaxListeners(Infinity, pubsub) + } catch {} + + return { pubsub, components } +} + +export const createComponentsArray = async ( + opts: CreateComponentsOpts & { number: number, connected?: boolean } = { number: 1, connected: true } +): Promise => { + const output = await Promise.all( + Array.from({ length: opts.number }).map(async (_, i) => + createComponents({ ...opts, init: { ...opts.init, debugName: `libp2p:gossipsub:${i}` } }) + ) + ) + + if (opts.connected ?? false) { + await connectAllPubSubNodes(output) + } + + return output +} + +export const connectPubsubNodes = async (a: GossipSubAndComponents, b: GossipSubAndComponents): Promise => { + const [outboundMultiaddrConnection, inboundMultiaddrConnection] = multiaddrConnectionPair() + const localMuxer = mockMuxer().createStreamMuxer(outboundMultiaddrConnection) + const remoteMuxer = mockMuxer().createStreamMuxer(inboundMultiaddrConnection) + + // TODO: need to do multistream select here because gossipsub supports + // multiple protocols and one of a or b could be running floodsub + + localMuxer.addEventListener('stream', (evt) => { + for (const call of a.components.registrar.handle.getCalls()) { + if (call.args[0] === evt.detail.protocol) { + call.args[1](evt.detail, outboundConnection) + } + } + }) + + remoteMuxer.addEventListener('stream', (evt) => { + for (const call of b.components.registrar.handle.getCalls()) { + if (call.args[0] === evt.detail.protocol) { + call.args[1](evt.detail, inboundConnection) + } + } + }) + + const outboundConnection = stubInterface({ + newStream: async (protocols, options) => { + return localMuxer.createStream({ + protocol: protocols[0] + }) + }, + status: 'open', + direction: 'outbound', + remotePeer: b.components.peerId, + remoteAddr: multiaddr('/memory/1234') + }) + + const inboundConnection = stubInterface({ + newStream: async (protocols, options) => { + return remoteMuxer.createStream({ + protocol: protocols[0] + }) + }, + status: 'open', + direction: 'inbound', + remotePeer: a.components.peerId, + remoteAddr: multiaddr('/memory/5678') + }) + + for (const multicodec of b.pubsub.protocols) { + for (const call of a.components.registrar.register.getCalls()) { + if (call.args[0] === multicodec) { + call.args[1].onConnect?.(b.components.peerId, outboundConnection) + } + } + } + + for (const multicodec of a.pubsub.protocols) { + for (const call of b.components.registrar.register.getCalls()) { + if (call.args[0] === multicodec) { + call.args[1].onConnect?.(a.components.peerId, inboundConnection) + } + } + } +} + +export const connectAllPubSubNodes = async (components: GossipSubAndComponents[]): Promise => { + for (let i = 0; i < components.length; i++) { + for (let j = i + 1; j < components.length; j++) { + await connectPubsubNodes(components[i], components[j]) + } + } +} + +/** + * Connect some gossipsub nodes to others, ensure each has num peers + * + * @param {GossipSubAndComponents[]} gss + * @param {number} num - number of peers to connect + */ +export async function connectSome (gss: GossipSubAndComponents[], num: number): Promise { + for (let i = 0; i < gss.length; i++) { + let count = 0 + // merely do a Math.random() and check for duplicate may take a lot of time to run a test + // so we make an array of candidate peers + // initially, don't populate i as a candidate to connect: candidatePeers[i] = i + 1 + const candidatePeers = Array.from({ length: gss.length - 1 }, (_, j) => (j >= i ? j + 1 : j)) + while (count < num) { + const n = Math.floor(Math.random() * candidatePeers.length) + const peer = candidatePeers[n] + await connectPubsubNodes(gss[i], gss[peer]) + // after connecting to a peer, update candidatePeers so that we don't connect to it again + for (let j = n; j < candidatePeers.length - 1; j++) { + candidatePeers[j] = candidatePeers[j + 1] + } + // remove the last item + candidatePeers.splice(candidatePeers.length - 1, 1) + count++ + } + } +} + +export async function sparseConnect (gss: GossipSubAndComponents[]): Promise { + await connectSome(gss, 3) +} + +export async function denseConnect (gss: GossipSubAndComponents[]): Promise { + await connectSome(gss, Math.min(gss.length - 1, 10)) +} diff --git a/packages/gossipsub/test/utils/events.ts b/packages/gossipsub/test/utils/events.ts new file mode 100644 index 0000000000..c50ad6cb85 --- /dev/null +++ b/packages/gossipsub/test/utils/events.ts @@ -0,0 +1,84 @@ +import { expect } from 'aegir/chai' +import pWaitFor from 'p-wait-for' +import type { GossipSubAndComponents } from './create-pubsub.js' +import type { GossipSubEvents, SubscriptionChangeData } from '../../src/index.js' +import type { TypedEventTarget } from '@libp2p/interface' + +export const checkReceivedSubscription = async ( + node: GossipSubAndComponents, + peerIdStr: string, + topic: string, + peerIdx: number, + timeout = 1000 +): Promise => + new Promise((resolve, reject) => { + const event = 'subscription-change' + const t = setTimeout( + () => { reject(new Error(`Not received subscriptions of psub ${peerIdx}, topic ${topic}`)) }, + timeout + ) + const cb = (evt: CustomEvent): void => { + const { peerId, subscriptions } = evt.detail + + // console.log('@@@ in test received subscriptions from peer id', peerId.toString()) + if (peerId.toString() === peerIdStr && subscriptions[0].topic === topic && subscriptions[0].subscribe) { + clearTimeout(t) + node.pubsub.removeEventListener(event, cb) + if ( + Array.from(node.pubsub.getSubscribers(topic)) + .map((p) => p.toString()) + .includes(peerIdStr) + ) { + resolve() + } else { + reject(Error('topics should include the peerId')) + } + } + } + node.pubsub.addEventListener(event, cb) + }) + +export const checkReceivedSubscriptions = async ( + node: GossipSubAndComponents, + peerIdStrs: string[], + topic: string, + timeout = 5000 +): Promise => { + const recvPeerIdStrs = peerIdStrs.filter((peerIdStr) => peerIdStr !== node.components.peerId.toString()) + const promises = recvPeerIdStrs.map( + async (peerIdStr, idx) => checkReceivedSubscription(node, peerIdStr, topic, idx, timeout) + ) + await Promise.all(promises) + for (const str of recvPeerIdStrs) { + expect(Array.from(node.pubsub.getSubscribers(topic)).map((p) => p.toString())).to.include(str) + } + await pWaitFor(() => { + return recvPeerIdStrs.every((peerIdStr) => { + return (node.pubsub).streamsOutbound.has(peerIdStr) + }) + }) +} + +export const awaitEvents = async = GossipSubEvents>( + emitter: TypedEventTarget, + event: keyof Events, + number: number, + timeout = 30000 +): Promise => { + return new Promise((resolve, reject) => { + let counter = 0 + const t = setTimeout(() => { + emitter.removeEventListener(event, cb) + reject(new Error(`${counter} of ${number} '${String(event)}' events received after ${timeout}ms`)) + }, timeout) + const cb = (): void => { + counter++ + if (counter >= number) { + clearTimeout(t) + emitter.removeEventListener(event, cb) + resolve() + } + } + emitter.addEventListener(event, cb) + }) +} diff --git a/packages/gossipsub/test/utils/index.ts b/packages/gossipsub/test/utils/index.ts new file mode 100644 index 0000000000..47f7ab23f1 --- /dev/null +++ b/packages/gossipsub/test/utils/index.ts @@ -0,0 +1,26 @@ +import { generateKeyPair } from '@libp2p/crypto/keys' +import { peerIdFromPrivateKey } from '@libp2p/peer-id' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import type { RPC } from '../../src/message/rpc.js' +import type { TopicStr } from '../../src/types.js' +import type { PeerId } from '@libp2p/interface' + +export * from './msgId.js' + +export const createPeerId = async (): Promise => { + const peerId = peerIdFromPrivateKey(await generateKeyPair('Ed25519')) + + return peerId +} + +let seq = 0n +const defaultPeer = uint8ArrayFromString('12D3KooWBsYhazxNL7aeisdwttzc6DejNaM48889t5ifiS6tTrBf', 'base58btc') + +export function makeTestMessage (i: number, topic: TopicStr, from?: PeerId): RPC.Message { + return { + seqno: uint8ArrayFromString((seq++).toString(16).padStart(16, '0'), 'base16'), + data: Uint8Array.from([i]), + from: from?.toMultihash().bytes ?? defaultPeer, + topic + } +} diff --git a/packages/gossipsub/test/utils/msgId.ts b/packages/gossipsub/test/utils/msgId.ts new file mode 100644 index 0000000000..1c4ee636f2 --- /dev/null +++ b/packages/gossipsub/test/utils/msgId.ts @@ -0,0 +1,19 @@ +import { digest } from '@chainsafe/as-sha256' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { messageIdToString } from '../../src/utils/messageIdToString.js' +import type { RPC } from '../../src/message/rpc.js' + +export const getMsgId = (msg: RPC.Message): Uint8Array => { + const from = msg.from ?? new Uint8Array(0) + const seqno = msg.seqno instanceof Uint8Array ? msg.seqno : uint8ArrayFromString(msg.seqno ?? '') + const result = new Uint8Array(from.length + seqno.length) + result.set(from, 0) + result.set(seqno, from.length) + return result +} + +export const getMsgIdStr = (msg: RPC.Message): string => messageIdToString(getMsgId(msg)) + +export const fastMsgIdFn = (msg: RPC.Message): string => + + msg.data != null ? messageIdToString(digest(msg.data)) : '0' diff --git a/packages/gossipsub/tsconfig.json b/packages/gossipsub/tsconfig.json new file mode 100644 index 0000000000..13e1e3bbd5 --- /dev/null +++ b/packages/gossipsub/tsconfig.json @@ -0,0 +1,13 @@ +{ + "extends": "aegir/src/config/tsconfig.aegir.json", + "compilerOptions": { + "importsNotUsedAsValues": "remove", + "noImplicitReturns": true, + "outDir": "dist", + "useUnknownInCatchVariables": true, + }, + "include": [ + "src", + "test" + ] +} \ No newline at end of file diff --git a/packages/integration-tests/package.json b/packages/integration-tests/package.json index b35da776d5..c38837a52b 100644 --- a/packages/integration-tests/package.json +++ b/packages/integration-tests/package.json @@ -18,7 +18,7 @@ "dep-check": "aegir dep-check" }, "devDependencies": { - "@chainsafe/libp2p-gossipsub": "^14.1.1", + "@libp2p/gossipsub": "^14.1.1", "@libp2p/bootstrap": "^11.0.47", "@libp2p/circuit-relay-v2": "^3.2.24", "@libp2p/crypto": "^5.1.8", diff --git a/packages/integration-tests/test/interop.ts b/packages/integration-tests/test/interop.ts index 93bb8ac1db..37c9311aec 100644 --- a/packages/integration-tests/test/interop.ts +++ b/packages/integration-tests/test/interop.ts @@ -1,10 +1,10 @@ import fs from 'fs' -import { gossipsub } from '@chainsafe/libp2p-gossipsub' import { circuitRelayServer, circuitRelayTransport } from '@libp2p/circuit-relay-v2' import { privateKeyFromProtobuf } from '@libp2p/crypto/keys' import { createClient } from '@libp2p/daemon-client' import { createServer } from '@libp2p/daemon-server' import { floodsub } from '@libp2p/floodsub' +import { gossipsub } from '@libp2p/gossipsub' import { identify } from '@libp2p/identify' import { UnsupportedError, interopTests } from '@libp2p/interop' import { kadDHT, passthroughMapper } from '@libp2p/kad-dht' diff --git a/packages/integration-tests/tsconfig.json b/packages/integration-tests/tsconfig.json index 243cc78e7e..80c70a3a05 100644 --- a/packages/integration-tests/tsconfig.json +++ b/packages/integration-tests/tsconfig.json @@ -75,7 +75,7 @@ "path": "../protocol-ping" }, { - "path": "../pubsub-floodsub" + "path": "../floodsub" }, { "path": "../stream-multiplexer-mplex" diff --git a/packages/interface/package.json b/packages/interface/package.json index cd2f3f669f..c09e106afb 100644 --- a/packages/interface/package.json +++ b/packages/interface/package.json @@ -43,7 +43,6 @@ "dependencies": { "@multiformats/dns": "^1.0.6", "@multiformats/multiaddr": "^13.0.1", - "it-pushable": "^3.2.3", "main-event": "^1.0.1", "multiformats": "^13.4.0", "progress-events": "^1.0.1", diff --git a/packages/interface/src/index.ts b/packages/interface/src/index.ts index 3ed4b1eca7..31ef741464 100644 --- a/packages/interface/src/index.ts +++ b/packages/interface/src/index.ts @@ -956,7 +956,6 @@ export * from './peer-id.js' export * from './peer-info.js' export * from './peer-routing.js' export * from './peer-store.js' -export * from './pubsub.js' export * from './record.js' export * from './startable.js' export * from './stream-handler.js' diff --git a/packages/interface/src/message-stream.ts b/packages/interface/src/message-stream.ts index 021727070c..812e2ecfeb 100644 --- a/packages/interface/src/message-stream.ts +++ b/packages/interface/src/message-stream.ts @@ -100,10 +100,25 @@ export interface MessageStream Don't use this module - * - * This module is a naive implementation of pubsub. It broadcasts all messages to all network peers, cannot provide older messages and has no protection against bad actors. - * - * It exists for academic purposes only, you should not use it in production. - * - * Instead please use [gossipsub](https://www.npmjs.com/package/@chainsafe/libp2p-gossipsub) - a more complete implementation which is also compatible with floodsub. - * - * @example Configuring libp2p to use floodsub - * - * ```TypeScript - * import { createLibp2p } from 'libp2p' - * import { floodsub } from '@libp2p/floodsub' - * - * const node = await createLibp2p({ - * services: { - * pubsub: floodsub() - * } - * //... other options - * }) - * await node.start() - * - * node.services.pubsub.subscribe('fruit') - * node.services.pubsub.addEventListener('message', (evt) => { - * console.log(evt) - * }) - * - * node.services.pubsub.publish('fruit', new TextEncoder().encode('banana')) - * ``` - */ - -import { pubSubSymbol, serviceCapabilities, serviceDependencies } from '@libp2p/interface' -import { PubSubBaseProtocol } from '@libp2p/pubsub' -import { toString } from 'uint8arrays/to-string' -import { SimpleTimeCache } from './cache.js' -import { multicodec } from './config.js' -import { RPC } from './message/rpc.js' -import type { PeerId, PubSubInit, Message, PubSubRPC, PubSubRPCMessage, PublishResult, PubSub } from '@libp2p/interface' -import type { PubSubComponents } from '@libp2p/pubsub' -import type { Uint8ArrayList } from 'uint8arraylist' - -export { multicodec } - -export interface FloodSubInit extends PubSubInit { - seenTTL?: number -} - -export interface FloodSubComponents extends PubSubComponents { - -} - -/** - * FloodSub (aka dumbsub is an implementation of pubsub focused on - * delivering an API for Publish/Subscribe, but with no CastTree Forming - * (it just floods the network). - */ -class FloodSub extends PubSubBaseProtocol { - public seenCache: SimpleTimeCache - - constructor (components: FloodSubComponents, init?: FloodSubInit) { - super(components, { - ...init, - canRelayMessage: true, - multicodecs: [multicodec] - }) - - this.log = components.logger.forComponent('libp2p:floodsub') - - /** - * Cache of seen messages - * - * @type {TimeCache} - */ - this.seenCache = new SimpleTimeCache({ - validityMs: init?.seenTTL ?? 30000 - }) - } - - readonly [pubSubSymbol] = true - - readonly [Symbol.toStringTag] = '@libp2p/floodsub' - - readonly [serviceCapabilities]: string[] = [ - '@libp2p/pubsub' - ] - - readonly [serviceDependencies]: string[] = [ - '@libp2p/identify' - ] - - /** - * Decode a Uint8Array into an RPC object - */ - decodeRpc (bytes: Uint8Array | Uint8ArrayList): PubSubRPC { - return RPC.decode(bytes) - } - - /** - * Encode an RPC object into a Uint8Array - */ - encodeRpc (rpc: PubSubRPC): Uint8Array { - return RPC.encode(rpc) - } - - decodeMessage (bytes: Uint8Array | Uint8ArrayList): PubSubRPCMessage { - return RPC.Message.decode(bytes) - } - - encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - return RPC.Message.encode(rpc) - } - - /** - * Process incoming message - * Extends base implementation to check router cache. - */ - async processMessage (from: PeerId, message: Message): Promise { - // Check if I've seen the message, if yes, ignore - const seqno = await super.getMsgId(message) - const msgIdStr = toString(seqno, 'base64') - - if (this.seenCache.has(msgIdStr)) { - return - } - - this.seenCache.put(msgIdStr, true) - - await super.processMessage(from, message) - } - - /** - * Publish message created. Forward it to the peers. - */ - async publishMessage (from: PeerId, message: Message): Promise { - const peers = this.getSubscribers(message.topic) - const recipients: PeerId[] = [] - - if (peers == null || peers.length === 0) { - this.log('no peers are subscribed to topic %s', message.topic) - return { recipients } - } - - peers.forEach(id => { - if (this.components.peerId.equals(id)) { - this.log('not sending message on topic %s to myself', message.topic) - return - } - - if (id.equals(from)) { - this.log('not sending message on topic %s to sender %p', message.topic, id) - return - } - - this.log('publish msgs on topics %s %p', message.topic, id) - - recipients.push(id) - this.send(id, { messages: [message] }) - }) - - return { recipients } - } -} - -export function floodsub (init: FloodSubInit = {}): (components: FloodSubComponents) => PubSub { - return (components: FloodSubComponents) => new FloodSub(components, init) -} diff --git a/packages/pubsub/CHANGELOG.md b/packages/pubsub/CHANGELOG.md deleted file mode 100644 index 05bd2010ac..0000000000 --- a/packages/pubsub/CHANGELOG.md +++ /dev/null @@ -1,1790 +0,0 @@ -## [7.0.3](https://github.com/libp2p/js-libp2p-pubsub/compare/v7.0.2...v7.0.3) (2023-06-27) - - -### Dependencies - -* **dev:** bump delay from 5.0.0 to 6.0.0 ([#144](https://github.com/libp2p/js-libp2p-pubsub/issues/144)) ([1364ce4](https://github.com/libp2p/js-libp2p-pubsub/commit/1364ce41815d3392cfca61169e113cc5414ac2d9)) - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^0.1.11 to ^1.0.0 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^1.0.1 to ^1.0.2 - * @libp2p/peer-collections bumped from ^5.0.0 to ^5.1.0 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^3.0.1 to ^3.0.2 - * @libp2p/interface bumped from ^1.0.1 to ^1.0.2 - * @libp2p/interface-internal bumped from ^1.0.2 to ^1.0.3 - * @libp2p/peer-collections bumped from ^5.1.0 to ^5.1.1 - * @libp2p/peer-id bumped from ^4.0.1 to ^4.0.2 - * @libp2p/utils bumped from ^5.0.2 to ^5.0.3 - * devDependencies - * @libp2p/logger bumped from ^4.0.1 to ^4.0.2 - * @libp2p/peer-id-factory bumped from ^4.0.0 to ^4.0.1 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/utils bumped from ^5.0.3 to ^5.1.0 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^3.0.2 to ^3.0.3 - * @libp2p/interface bumped from ^1.0.2 to ^1.1.0 - * @libp2p/interface-internal bumped from ^1.0.3 to ^1.0.4 - * @libp2p/peer-collections bumped from ^5.1.1 to ^5.1.2 - * @libp2p/peer-id bumped from ^4.0.2 to ^4.0.3 - * @libp2p/utils bumped from ^5.1.0 to ^5.1.1 - * devDependencies - * @libp2p/logger bumped from ^4.0.2 to ^4.0.3 - * @libp2p/peer-id-factory bumped from ^4.0.1 to ^4.0.2 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^3.0.4 to ^4.0.0 - * @libp2p/interface-internal bumped from ^1.0.5 to ^1.0.6 - * @libp2p/peer-collections bumped from ^5.1.3 to ^5.1.4 - * @libp2p/utils bumped from ^5.2.0 to ^5.2.1 - * devDependencies - * @libp2p/peer-id-factory bumped from ^4.0.3 to ^4.0.4 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/utils bumped from ^5.2.2 to ^5.2.3 - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/utils bumped from ^5.2.4 to ^5.2.5 - -## [10.1.18](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.17...pubsub-v10.1.18) (2025-08-19) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.7 to ^5.1.8 - * @libp2p/interface bumped from ^2.10.5 to ^2.11.0 - * @libp2p/interface-internal bumped from ^2.3.18 to ^2.3.19 - * @libp2p/peer-collections bumped from ^6.0.34 to ^6.0.35 - * @libp2p/peer-id bumped from ^5.1.8 to ^5.1.9 - * @libp2p/utils bumped from ^6.7.1 to ^6.7.2 - * devDependencies - * @libp2p/logger bumped from ^5.1.21 to ^5.2.0 - -## [10.1.17](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.16...pubsub-v10.1.17) (2025-06-25) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.6 to ^5.1.7 - * @libp2p/interface bumped from ^2.10.4 to ^2.10.5 - * @libp2p/interface-internal bumped from ^2.3.17 to ^2.3.18 - * @libp2p/peer-collections bumped from ^6.0.33 to ^6.0.34 - * @libp2p/peer-id bumped from ^5.1.7 to ^5.1.8 - * @libp2p/utils bumped from ^6.7.0 to ^6.7.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.20 to ^5.1.21 - -## [10.1.16](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.15...pubsub-v10.1.16) (2025-06-17) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.16 to ^2.3.17 - * @libp2p/peer-collections bumped from ^6.0.32 to ^6.0.33 - * @libp2p/utils bumped from ^6.6.7 to ^6.7.0 - -## [10.1.15](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.14...pubsub-v10.1.15) (2025-06-16) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.5 to ^5.1.6 - * @libp2p/interface bumped from ^2.10.3 to ^2.10.4 - * @libp2p/interface-internal bumped from ^2.3.15 to ^2.3.16 - * @libp2p/peer-collections bumped from ^6.0.31 to ^6.0.32 - * @libp2p/peer-id bumped from ^5.1.6 to ^5.1.7 - * @libp2p/utils bumped from ^6.6.6 to ^6.6.7 - * devDependencies - * @libp2p/logger bumped from ^5.1.19 to ^5.1.20 - -## [10.1.14](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.13...pubsub-v10.1.14) (2025-06-03) - - -### Bug Fixes - -* deduplicate typed event target ([#3170](https://github.com/libp2p/js-libp2p/issues/3170)) ([cc7b34c](https://github.com/libp2p/js-libp2p/commit/cc7b34c0fe3ac5745fd082ae0198b8742371a412)) - - -### Documentation - -* update typedoc config ([#3146](https://github.com/libp2p/js-libp2p/issues/3146)) ([14dbebe](https://github.com/libp2p/js-libp2p/commit/14dbebea8bd17addadac730afec0fa3b1cc6334a)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.4 to ^5.1.5 - * @libp2p/interface bumped from ^2.10.2 to ^2.10.3 - * @libp2p/interface-internal bumped from ^2.3.14 to ^2.3.15 - * @libp2p/peer-collections bumped from ^6.0.30 to ^6.0.31 - * @libp2p/peer-id bumped from ^5.1.5 to ^5.1.6 - * @libp2p/utils bumped from ^6.6.5 to ^6.6.6 - * devDependencies - * @libp2p/logger bumped from ^5.1.18 to ^5.1.19 - -## [10.1.13](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.12...pubsub-v10.1.13) (2025-05-22) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.3 to ^5.1.4 - * @libp2p/interface bumped from ^2.10.1 to ^2.10.2 - * @libp2p/interface-internal bumped from ^2.3.13 to ^2.3.14 - * @libp2p/peer-collections bumped from ^6.0.29 to ^6.0.30 - * @libp2p/peer-id bumped from ^5.1.4 to ^5.1.5 - * @libp2p/utils bumped from ^6.6.4 to ^6.6.5 - * devDependencies - * @libp2p/logger bumped from ^5.1.17 to ^5.1.18 - -## [10.1.12](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.11...pubsub-v10.1.12) (2025-05-20) - - -### Dependencies - -* bump sinon from 19.0.5 to 20.0.0 ([#3112](https://github.com/libp2p/js-libp2p/issues/3112)) ([d1ce677](https://github.com/libp2p/js-libp2p/commit/d1ce6774d8f7c338f15a05f80d09e361d21e7586)) -* update aegir, fix all linting issues ([#3110](https://github.com/libp2p/js-libp2p/issues/3110)) ([510b033](https://github.com/libp2p/js-libp2p/commit/510b033f6b15358c7fae21486c3b09e730aa26cd)) -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.2 to ^5.1.3 - * @libp2p/interface bumped from ^2.10.0 to ^2.10.1 - * @libp2p/interface-internal bumped from ^2.3.12 to ^2.3.13 - * @libp2p/peer-collections bumped from ^6.0.28 to ^6.0.29 - * @libp2p/peer-id bumped from ^5.1.3 to ^5.1.4 - * @libp2p/utils bumped from ^6.6.3 to ^6.6.4 - * devDependencies - * @libp2p/logger bumped from ^5.1.16 to ^5.1.17 - -## [10.1.11](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.10...pubsub-v10.1.11) (2025-05-19) - - -### Documentation - -* update comments in interface module and elsewhere ([#3107](https://github.com/libp2p/js-libp2p/issues/3107)) ([32627c8](https://github.com/libp2p/js-libp2p/commit/32627c8767587f7e8df88a700933ece6d5f5c3c4)), closes [#2112](https://github.com/libp2p/js-libp2p/issues/2112) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.1 to ^5.1.2 - * @libp2p/interface bumped from ^2.9.0 to ^2.10.0 - * @libp2p/interface-internal bumped from ^2.3.11 to ^2.3.12 - * @libp2p/peer-collections bumped from ^6.0.27 to ^6.0.28 - * @libp2p/peer-id bumped from ^5.1.2 to ^5.1.3 - * @libp2p/utils bumped from ^6.6.2 to ^6.6.3 - * devDependencies - * @libp2p/logger bumped from ^5.1.15 to ^5.1.16 - -## [10.1.10](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.9...pubsub-v10.1.10) (2025-04-16) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.1.0 to ^5.1.1 - * @libp2p/interface bumped from ^2.8.0 to ^2.9.0 - * @libp2p/interface-internal bumped from ^2.3.10 to ^2.3.11 - * @libp2p/peer-collections bumped from ^6.0.26 to ^6.0.27 - * @libp2p/peer-id bumped from ^5.1.1 to ^5.1.2 - * @libp2p/utils bumped from ^6.6.1 to ^6.6.2 - * devDependencies - * @libp2p/logger bumped from ^5.1.14 to ^5.1.15 - -## [10.1.9](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.8...pubsub-v10.1.9) (2025-04-09) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.15 to ^5.1.0 - * @libp2p/interface bumped from ^2.7.0 to ^2.8.0 - * @libp2p/interface-internal bumped from ^2.3.9 to ^2.3.10 - * @libp2p/peer-collections bumped from ^6.0.25 to ^6.0.26 - * @libp2p/peer-id bumped from ^5.1.0 to ^5.1.1 - * @libp2p/utils bumped from ^6.6.0 to ^6.6.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.13 to ^5.1.14 - -## [10.1.8](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.7...pubsub-v10.1.8) (2025-03-18) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.8 to ^2.3.9 - * @libp2p/peer-collections bumped from ^6.0.24 to ^6.0.25 - * @libp2p/utils bumped from ^6.5.8 to ^6.6.0 - -## [10.1.7](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.6...pubsub-v10.1.7) (2025-03-12) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.7 to ^2.3.8 - * @libp2p/peer-collections bumped from ^6.0.23 to ^6.0.24 - * @libp2p/peer-id bumped from ^5.0.16 to ^5.1.0 - * @libp2p/utils bumped from ^6.5.7 to ^6.5.8 - * devDependencies - * @libp2p/logger bumped from ^5.1.12 to ^5.1.13 - -## [10.1.6](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.5...pubsub-v10.1.6) (2025-03-03) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.14 to ^5.0.15 - * @libp2p/interface bumped from ^2.6.1 to ^2.7.0 - * @libp2p/interface-internal bumped from ^2.3.6 to ^2.3.7 - * @libp2p/peer-collections bumped from ^6.0.22 to ^6.0.23 - * @libp2p/peer-id bumped from ^5.0.15 to ^5.0.16 - * @libp2p/utils bumped from ^6.5.6 to ^6.5.7 - * devDependencies - * @libp2p/logger bumped from ^5.1.11 to ^5.1.12 - -## [10.1.5](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.4...pubsub-v10.1.5) (2025-03-03) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.13 to ^5.0.14 - * @libp2p/interface-internal bumped from ^2.3.5 to ^2.3.6 - * @libp2p/peer-collections bumped from ^6.0.21 to ^6.0.22 - * @libp2p/peer-id bumped from ^5.0.14 to ^5.0.15 - * @libp2p/utils bumped from ^6.5.5 to ^6.5.6 - * devDependencies - * @libp2p/logger bumped from ^5.1.10 to ^5.1.11 - -## [10.1.4](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.3...pubsub-v10.1.4) (2025-02-25) - - -### Documentation - -* add spellcheck to gh actions ([#2994](https://github.com/libp2p/js-libp2p/issues/2994)) ([5b084e9](https://github.com/libp2p/js-libp2p/commit/5b084e9682a572e82f7907714d7807b3b9856326)) -* update spell check ([#2999](https://github.com/libp2p/js-libp2p/issues/2999)) ([6f8cfea](https://github.com/libp2p/js-libp2p/commit/6f8cfeafb2f6ddc231a85ca369fb33cf759940f7)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.12 to ^5.0.13 - * @libp2p/interface bumped from ^2.6.0 to ^2.6.1 - * @libp2p/interface-internal bumped from ^2.3.4 to ^2.3.5 - * @libp2p/peer-collections bumped from ^6.0.20 to ^6.0.21 - * @libp2p/peer-id bumped from ^5.0.13 to ^5.0.14 - * @libp2p/utils bumped from ^6.5.4 to ^6.5.5 - * devDependencies - * @libp2p/logger bumped from ^5.1.9 to ^5.1.10 - -## [10.1.3](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.2...pubsub-v10.1.3) (2025-02-21) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.3 to ^2.3.4 - * @libp2p/peer-collections bumped from ^6.0.19 to ^6.0.20 - * @libp2p/utils bumped from ^6.5.3 to ^6.5.4 - -## [10.1.2](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.1...pubsub-v10.1.2) (2025-02-21) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.2 to ^2.3.3 - * @libp2p/peer-collections bumped from ^6.0.18 to ^6.0.19 - * @libp2p/utils bumped from ^6.5.2 to ^6.5.3 - -## [10.1.1](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.1.0...pubsub-v10.1.1) (2025-02-20) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.11 to ^5.0.12 - * @libp2p/interface bumped from ^2.5.0 to ^2.6.0 - * @libp2p/interface-internal bumped from ^2.3.1 to ^2.3.2 - * @libp2p/peer-collections bumped from ^6.0.17 to ^6.0.18 - * @libp2p/peer-id bumped from ^5.0.12 to ^5.0.13 - * @libp2p/utils bumped from ^6.5.1 to ^6.5.2 - * devDependencies - * @libp2p/logger bumped from ^5.1.8 to ^5.1.9 - -## [10.1.0](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.18...pubsub-v10.1.0) (2025-02-18) - - -### Features - -* expose maxDataLength for outgoing messages in PeerStreams ([#2954](https://github.com/libp2p/js-libp2p/issues/2954)) ([e7e2802](https://github.com/libp2p/js-libp2p/commit/e7e28025777d9ca5315d63cb822bcef6d7b961ed)) - - -### Dependencies - -* bump it-length-prefixed from 9.1.1 to 10.0.1 ([#2962](https://github.com/libp2p/js-libp2p/issues/2962)) ([1fc0e26](https://github.com/libp2p/js-libp2p/commit/1fc0e26620d2fd9d752179ab4f6dcc7b6ed5ee5c)) -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.3.0 to ^2.3.1 - -## [10.0.18](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.17...pubsub-v10.0.18) (2025-02-10) - - -### Bug Fixes - -* import types from interface module ([#2946](https://github.com/libp2p/js-libp2p/issues/2946)) ([d5b399e](https://github.com/libp2p/js-libp2p/commit/d5b399e3098e8dc20e33138d9b2cd5bcd844f700)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.10 to ^5.0.11 - * @libp2p/interface bumped from ^2.4.1 to ^2.5.0 - * @libp2p/interface-internal bumped from ^2.2.4 to ^2.3.0 - * @libp2p/peer-collections bumped from ^6.0.16 to ^6.0.17 - * @libp2p/peer-id bumped from ^5.0.11 to ^5.0.12 - * @libp2p/utils bumped from ^6.5.0 to ^6.5.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.7 to ^5.1.8 - -## [10.0.17](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.16...pubsub-v10.0.17) (2025-02-04) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.2.3 to ^2.2.4 - * @libp2p/peer-collections bumped from ^6.0.15 to ^6.0.16 - * @libp2p/utils bumped from ^6.4.0 to ^6.5.0 - -## [10.0.16](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.15...pubsub-v10.0.16) (2025-02-03) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.9 to ^5.0.10 - * @libp2p/interface bumped from ^2.4.0 to ^2.4.1 - * @libp2p/interface-internal bumped from ^2.2.2 to ^2.2.3 - * @libp2p/peer-collections bumped from ^6.0.14 to ^6.0.15 - * @libp2p/peer-id bumped from ^5.0.10 to ^5.0.11 - * @libp2p/utils bumped from ^6.3.1 to ^6.4.0 - * devDependencies - * @libp2p/logger bumped from ^5.1.6 to ^5.1.7 - -## [10.0.15](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.14...pubsub-v10.0.15) (2025-01-07) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.8 to ^5.0.9 - * @libp2p/interface bumped from ^2.3.0 to ^2.4.0 - * @libp2p/interface-internal bumped from ^2.2.1 to ^2.2.2 - * @libp2p/peer-collections bumped from ^6.0.13 to ^6.0.14 - * @libp2p/peer-id bumped from ^5.0.9 to ^5.0.10 - * @libp2p/utils bumped from ^6.3.0 to ^6.3.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.5 to ^5.1.6 - -## [10.0.14](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.13...pubsub-v10.0.14) (2024-12-10) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.2.0 to ^2.2.1 - -## [10.0.13](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.12...pubsub-v10.0.13) (2024-12-09) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.7 to ^5.0.8 - * @libp2p/interface bumped from ^2.2.1 to ^2.3.0 - * @libp2p/interface-internal bumped from ^2.1.1 to ^2.2.0 - * @libp2p/peer-collections bumped from ^6.0.12 to ^6.0.13 - * @libp2p/peer-id bumped from ^5.0.8 to ^5.0.9 - * @libp2p/utils bumped from ^6.2.1 to ^6.3.0 - * devDependencies - * @libp2p/logger bumped from ^5.1.4 to ^5.1.5 - -## [10.0.12](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.11...pubsub-v10.0.12) (2024-11-18) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.6 to ^5.0.7 - * @libp2p/interface bumped from ^2.2.0 to ^2.2.1 - * @libp2p/interface-internal bumped from ^2.1.0 to ^2.1.1 - * @libp2p/peer-collections bumped from ^6.0.11 to ^6.0.12 - * @libp2p/peer-id bumped from ^5.0.7 to ^5.0.8 - * @libp2p/utils bumped from ^6.2.0 to ^6.2.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.3 to ^5.1.4 - -## [10.0.11](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.10...pubsub-v10.0.11) (2024-11-16) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.0.10 to ^2.1.0 - * @libp2p/peer-collections bumped from ^6.0.10 to ^6.0.11 - * @libp2p/utils bumped from ^6.1.3 to ^6.2.0 - -## [10.0.10](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.9...pubsub-v10.0.10) (2024-10-28) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.5 to ^5.0.6 - * @libp2p/interface bumped from ^2.1.3 to ^2.2.0 - * @libp2p/interface-internal bumped from ^2.0.9 to ^2.0.10 - * @libp2p/peer-collections bumped from ^6.0.9 to ^6.0.10 - * @libp2p/peer-id bumped from ^5.0.6 to ^5.0.7 - * @libp2p/utils bumped from ^6.1.2 to ^6.1.3 - * devDependencies - * @libp2p/logger bumped from ^5.1.2 to ^5.1.3 - -## [10.0.9](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.8...pubsub-v10.0.9) (2024-10-23) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.0.8 to ^2.0.9 - * @libp2p/peer-collections bumped from ^6.0.8 to ^6.0.9 - * @libp2p/peer-id bumped from ^5.0.5 to ^5.0.6 - * @libp2p/utils bumped from ^6.1.1 to ^6.1.2 - * devDependencies - * @libp2p/logger bumped from ^5.1.1 to ^5.1.2 - -## [10.0.8](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.7...pubsub-v10.0.8) (2024-10-09) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.4 to ^5.0.5 - * @libp2p/interface bumped from ^2.1.2 to ^2.1.3 - * @libp2p/interface-internal bumped from ^2.0.7 to ^2.0.8 - * @libp2p/peer-collections bumped from ^6.0.7 to ^6.0.8 - * @libp2p/peer-id bumped from ^5.0.4 to ^5.0.5 - * @libp2p/utils bumped from ^6.1.0 to ^6.1.1 - * devDependencies - * @libp2p/logger bumped from ^5.1.0 to ^5.1.1 - -## [10.0.7](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.6...pubsub-v10.0.7) (2024-10-05) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.0.6 to ^2.0.7 - * @libp2p/peer-collections bumped from ^6.0.6 to ^6.0.7 - * @libp2p/utils bumped from ^6.0.6 to ^6.1.0 - -## [10.0.6](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.5...pubsub-v10.0.6) (2024-09-27) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.0.5 to ^2.0.6 - * @libp2p/peer-collections bumped from ^6.0.5 to ^6.0.6 - * @libp2p/utils bumped from ^6.0.5 to ^6.0.6 - -## [10.0.5](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.4...pubsub-v10.0.5) (2024-09-25) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^2.0.4 to ^2.0.5 - * @libp2p/peer-collections bumped from ^6.0.4 to ^6.0.5 - * @libp2p/utils bumped from ^6.0.4 to ^6.0.5 - * devDependencies - * @libp2p/logger bumped from ^5.0.4 to ^5.1.0 - -## [10.0.4](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.3...pubsub-v10.0.4) (2024-09-24) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.3 to ^5.0.4 - * @libp2p/interface bumped from ^2.1.1 to ^2.1.2 - * @libp2p/interface-internal bumped from ^2.0.3 to ^2.0.4 - * @libp2p/peer-collections bumped from ^6.0.3 to ^6.0.4 - * @libp2p/peer-id bumped from ^5.0.3 to ^5.0.4 - * @libp2p/utils bumped from ^6.0.3 to ^6.0.4 - * devDependencies - * @libp2p/logger bumped from ^5.0.3 to ^5.0.4 - -## [10.0.3](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.2...pubsub-v10.0.3) (2024-09-24) - - -### Bug Fixes - -* export transiently referenced types ([#2717](https://github.com/libp2p/js-libp2p/issues/2717)) ([7f7ec82](https://github.com/libp2p/js-libp2p/commit/7f7ec82ae4ee7761360bdfdd294de271feaf1841)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.2 to ^5.0.3 - * @libp2p/interface bumped from ^2.1.0 to ^2.1.1 - * @libp2p/interface-internal bumped from ^2.0.2 to ^2.0.3 - * @libp2p/peer-collections bumped from ^6.0.2 to ^6.0.3 - * @libp2p/peer-id bumped from ^5.0.2 to ^5.0.3 - * @libp2p/utils bumped from ^6.0.2 to ^6.0.3 - * devDependencies - * @libp2p/logger bumped from ^5.0.2 to ^5.0.3 - -## [10.0.2](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.1...pubsub-v10.0.2) (2024-09-23) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.1 to ^5.0.2 - * @libp2p/interface bumped from ^2.0.1 to ^2.1.0 - * @libp2p/interface-internal bumped from ^2.0.1 to ^2.0.2 - * @libp2p/peer-collections bumped from ^6.0.1 to ^6.0.2 - * @libp2p/peer-id bumped from ^5.0.1 to ^5.0.2 - * @libp2p/utils bumped from ^6.0.1 to ^6.0.2 - * devDependencies - * @libp2p/logger bumped from ^5.0.1 to ^5.0.2 - -## [10.0.1](https://github.com/libp2p/js-libp2p/compare/pubsub-v10.0.0...pubsub-v10.0.1) (2024-09-12) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^5.0.0 to ^5.0.1 - * @libp2p/interface bumped from ^2.0.0 to ^2.0.1 - * @libp2p/interface-internal bumped from ^2.0.0 to ^2.0.1 - * @libp2p/peer-collections bumped from ^6.0.0 to ^6.0.1 - * @libp2p/peer-id bumped from ^5.0.0 to ^5.0.1 - * @libp2p/utils bumped from ^6.0.0 to ^6.0.1 - * devDependencies - * @libp2p/logger bumped from ^5.0.0 to ^5.0.1 - -## [10.0.0](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.26...pubsub-v10.0.0) (2024-09-11) - - -### ⚠ BREAKING CHANGES - -* - `@libp2p/peer-id-factory` has been removed, use `generateKeyPair` and `peerIdFromPrivateKey` instead -* The `.code` property has been removed from most errors, use `.name` instead -* `@libp2p/interface` no longer exports a `CustomEvent` polyfill - -### Features - -* use `.name` property instead of `.code` for errors ([#2655](https://github.com/libp2p/js-libp2p/issues/2655)) ([0d20426](https://github.com/libp2p/js-libp2p/commit/0d20426fd5ea19b03345c70289bbd692e4348e1f)) - - -### Bug Fixes - -* remove CustomEvent export from `@libp2p/interface` ([#2656](https://github.com/libp2p/js-libp2p/issues/2656)) ([fab6fc9](https://github.com/libp2p/js-libp2p/commit/fab6fc960b6bc03a6bc00ae5a4b3551d7d080c73)) -* remove private key field from peer id ([#2660](https://github.com/libp2p/js-libp2p/issues/2660)) ([3eeb0c7](https://github.com/libp2p/js-libp2p/commit/3eeb0c705bd58285a6e1ec9fcbb6987c5959d504)), closes [#2659](https://github.com/libp2p/js-libp2p/issues/2659) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.9 to ^5.0.0 - * @libp2p/interface bumped from ^1.7.0 to ^2.0.0 - * @libp2p/interface-internal bumped from ^1.3.4 to ^2.0.0 - * @libp2p/peer-collections bumped from ^5.2.9 to ^6.0.0 - * @libp2p/peer-id bumped from ^4.2.4 to ^5.0.0 - * @libp2p/utils bumped from ^5.4.9 to ^6.0.0 - * devDependencies - * @libp2p/logger bumped from ^4.0.20 to ^5.0.0 - -## [9.0.26](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.25...pubsub-v9.0.26) (2024-08-15) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.8 to ^4.1.9 - * @libp2p/interface bumped from ^1.6.3 to ^1.7.0 - * @libp2p/interface-internal bumped from ^1.3.3 to ^1.3.4 - * @libp2p/peer-collections bumped from ^5.2.8 to ^5.2.9 - * @libp2p/peer-id bumped from ^4.2.3 to ^4.2.4 - * @libp2p/utils bumped from ^5.4.8 to ^5.4.9 - * devDependencies - * @libp2p/logger bumped from ^4.0.19 to ^4.0.20 - * @libp2p/peer-id-factory bumped from ^4.2.3 to ^4.2.4 - -## [9.0.25](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.24...pubsub-v9.0.25) (2024-08-02) - - -### Dependencies - -* bump aegir from 43.0.3 to 44.0.1 ([#2603](https://github.com/libp2p/js-libp2p/issues/2603)) ([944935f](https://github.com/libp2p/js-libp2p/commit/944935f8dbcc1083e4cb4a02b49a0aab3083d3d9)) -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.7 to ^4.1.8 - * @libp2p/interface bumped from ^1.6.2 to ^1.6.3 - * @libp2p/interface-internal bumped from ^1.3.2 to ^1.3.3 - * @libp2p/peer-collections bumped from ^5.2.7 to ^5.2.8 - * @libp2p/peer-id bumped from ^4.2.2 to ^4.2.3 - * @libp2p/utils bumped from ^5.4.7 to ^5.4.8 - * devDependencies - * @libp2p/logger bumped from ^4.0.18 to ^4.0.19 - * @libp2p/peer-id-factory bumped from ^4.2.2 to ^4.2.3 - -## [9.0.24](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.23...pubsub-v9.0.24) (2024-07-29) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.6 to ^4.1.7 - * @libp2p/interface bumped from ^1.6.1 to ^1.6.2 - * @libp2p/interface-internal bumped from ^1.3.1 to ^1.3.2 - * @libp2p/peer-collections bumped from ^5.2.6 to ^5.2.7 - * @libp2p/peer-id bumped from ^4.2.1 to ^4.2.2 - * @libp2p/utils bumped from ^5.4.6 to ^5.4.7 - * devDependencies - * @libp2p/logger bumped from ^4.0.17 to ^4.0.18 - * @libp2p/peer-id-factory bumped from ^4.2.1 to ^4.2.2 - -## [9.0.23](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.22...pubsub-v9.0.23) (2024-07-13) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.5 to ^4.1.6 - * @libp2p/interface bumped from ^1.6.0 to ^1.6.1 - * @libp2p/interface-internal bumped from ^1.3.0 to ^1.3.1 - * @libp2p/peer-collections bumped from ^5.2.5 to ^5.2.6 - * @libp2p/peer-id bumped from ^4.2.0 to ^4.2.1 - * @libp2p/utils bumped from ^5.4.5 to ^5.4.6 - * devDependencies - * @libp2p/logger bumped from ^4.0.16 to ^4.0.17 - * @libp2p/peer-id-factory bumped from ^4.2.0 to ^4.2.1 - -## [9.0.22](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.21...pubsub-v9.0.22) (2024-07-03) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.4 to ^4.1.5 - * @libp2p/interface bumped from ^1.5.0 to ^1.6.0 - * @libp2p/interface-internal bumped from ^1.2.4 to ^1.3.0 - * @libp2p/peer-collections bumped from ^5.2.4 to ^5.2.5 - * @libp2p/peer-id bumped from ^4.1.4 to ^4.2.0 - * @libp2p/utils bumped from ^5.4.4 to ^5.4.5 - * devDependencies - * @libp2p/logger bumped from ^4.0.15 to ^4.0.16 - * @libp2p/peer-id-factory bumped from ^4.1.4 to ^4.2.0 - -## [9.0.21](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.20...pubsub-v9.0.21) (2024-06-18) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.3 to ^4.1.4 - * @libp2p/interface bumped from ^1.4.1 to ^1.5.0 - * @libp2p/interface-internal bumped from ^1.2.3 to ^1.2.4 - * @libp2p/peer-collections bumped from ^5.2.3 to ^5.2.4 - * @libp2p/peer-id bumped from ^4.1.3 to ^4.1.4 - * @libp2p/utils bumped from ^5.4.3 to ^5.4.4 - * devDependencies - * @libp2p/logger bumped from ^4.0.14 to ^4.0.15 - * @libp2p/peer-id-factory bumped from ^4.1.3 to ^4.1.4 - -## [9.0.20](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.19...pubsub-v9.0.20) (2024-06-07) - - -### Dependencies - -* bump aegir from 42.2.11 to 43.0.1 ([#2571](https://github.com/libp2p/js-libp2p/issues/2571)) ([757fb26](https://github.com/libp2p/js-libp2p/commit/757fb2674f0a3e06fd46d3ff63f7f461c32d47d2)) -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.2 to ^4.1.3 - * @libp2p/interface bumped from ^1.4.0 to ^1.4.1 - * @libp2p/interface-internal bumped from ^1.2.2 to ^1.2.3 - * @libp2p/peer-collections bumped from ^5.2.2 to ^5.2.3 - * @libp2p/peer-id bumped from ^4.1.2 to ^4.1.3 - * @libp2p/utils bumped from ^5.4.2 to ^5.4.3 - * devDependencies - * @libp2p/logger bumped from ^4.0.13 to ^4.0.14 - * @libp2p/peer-id-factory bumped from ^4.1.2 to ^4.1.3 - -## [9.0.19](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.18...pubsub-v9.0.19) (2024-05-17) - - -### Bug Fixes - -* update project config ([48444f7](https://github.com/libp2p/js-libp2p/commit/48444f750ebe3f03290bf70e84d7590edc030ea4)) - - -### Dependencies - -* bump sinon from 17.0.2 to 18.0.0 ([#2548](https://github.com/libp2p/js-libp2p/issues/2548)) ([1eb5b27](https://github.com/libp2p/js-libp2p/commit/1eb5b2713585e0d4dde927ecd307ada0b774d824)) -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.1 to ^4.1.2 - * @libp2p/interface bumped from ^1.3.1 to ^1.4.0 - * @libp2p/interface-internal bumped from ^1.2.1 to ^1.2.2 - * @libp2p/peer-collections bumped from ^5.2.1 to ^5.2.2 - * @libp2p/peer-id bumped from ^4.1.1 to ^4.1.2 - * @libp2p/utils bumped from ^5.4.1 to ^5.4.2 - * devDependencies - * @libp2p/logger bumped from ^4.0.12 to ^4.0.13 - * @libp2p/peer-id-factory bumped from ^4.1.1 to ^4.1.2 - -## [9.0.18](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.17...pubsub-v9.0.18) (2024-05-14) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^1.2.0 to ^1.2.1 - * @libp2p/peer-collections bumped from ^5.2.0 to ^5.2.1 - * @libp2p/utils bumped from ^5.4.0 to ^5.4.1 - -## [9.0.17](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.16...pubsub-v9.0.17) (2024-05-01) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.1.0 to ^4.1.1 - * @libp2p/interface bumped from ^1.3.0 to ^1.3.1 - * @libp2p/interface-internal bumped from ^1.1.1 to ^1.2.0 - * @libp2p/peer-collections bumped from ^5.1.11 to ^5.2.0 - * @libp2p/peer-id bumped from ^4.1.0 to ^4.1.1 - * @libp2p/utils bumped from ^5.3.2 to ^5.4.0 - * devDependencies - * @libp2p/logger bumped from ^4.0.11 to ^4.0.12 - * @libp2p/peer-id-factory bumped from ^4.1.0 to ^4.1.1 - -## [9.0.16](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.15...pubsub-v9.0.16) (2024-04-24) - - -### Documentation - -* fix broken links in docs site ([#2497](https://github.com/libp2p/js-libp2p/issues/2497)) ([fd1f834](https://github.com/libp2p/js-libp2p/commit/fd1f8343db030d74cd08bca6a0cffda93532765f)), closes [#2423](https://github.com/libp2p/js-libp2p/issues/2423) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.6 to ^4.1.0 - * @libp2p/interface bumped from ^1.2.0 to ^1.3.0 - * @libp2p/interface-internal bumped from ^1.1.0 to ^1.1.1 - * @libp2p/peer-collections bumped from ^5.1.10 to ^5.1.11 - * @libp2p/peer-id bumped from ^4.0.10 to ^4.1.0 - * @libp2p/utils bumped from ^5.3.1 to ^5.3.2 - * devDependencies - * @libp2p/logger bumped from ^4.0.10 to ^4.0.11 - * @libp2p/peer-id-factory bumped from ^4.0.10 to ^4.1.0 - -## [9.0.15](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.14...pubsub-v9.0.15) (2024-04-15) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/utils bumped from ^5.3.0 to ^5.3.1 - -## [9.0.14](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.13...pubsub-v9.0.14) (2024-04-12) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.5 to ^4.0.6 - * @libp2p/interface bumped from ^1.1.6 to ^1.2.0 - * @libp2p/interface-internal bumped from ^1.0.11 to ^1.1.0 - * @libp2p/peer-collections bumped from ^5.1.9 to ^5.1.10 - * @libp2p/peer-id bumped from ^4.0.9 to ^4.0.10 - * @libp2p/utils bumped from ^5.2.8 to ^5.3.0 - * devDependencies - * @libp2p/logger bumped from ^4.0.9 to ^4.0.10 - * @libp2p/peer-id-factory bumped from ^4.0.9 to ^4.0.10 - -## [9.0.13](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.12...pubsub-v9.0.13) (2024-04-05) - - -### Bug Fixes - -* add @libp2p/record module to monorepo ([#2466](https://github.com/libp2p/js-libp2p/issues/2466)) ([3ffecc5](https://github.com/libp2p/js-libp2p/commit/3ffecc5bfe806a678c1b0228ff830f1811630718)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.4 to ^4.0.5 - * @libp2p/interface bumped from ^1.1.5 to ^1.1.6 - * @libp2p/interface-internal bumped from ^1.0.10 to ^1.0.11 - * @libp2p/peer-collections bumped from ^5.1.8 to ^5.1.9 - * @libp2p/peer-id bumped from ^4.0.8 to ^4.0.9 - * @libp2p/utils bumped from ^5.2.7 to ^5.2.8 - * devDependencies - * @libp2p/logger bumped from ^4.0.8 to ^4.0.9 - * @libp2p/peer-id-factory bumped from ^4.0.8 to ^4.0.9 - -## [9.0.12](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.11...pubsub-v9.0.12) (2024-03-28) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.3 to ^4.0.4 - * @libp2p/interface bumped from ^1.1.4 to ^1.1.5 - * @libp2p/interface-internal bumped from ^1.0.9 to ^1.0.10 - * @libp2p/peer-collections bumped from ^5.1.7 to ^5.1.8 - * @libp2p/peer-id bumped from ^4.0.7 to ^4.0.8 - * @libp2p/utils bumped from ^5.2.6 to ^5.2.7 - * devDependencies - * @libp2p/logger bumped from ^4.0.7 to ^4.0.8 - * @libp2p/peer-id-factory bumped from ^4.0.7 to ^4.0.8 - -## [9.0.11](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.10...pubsub-v9.0.11) (2024-02-27) - - -### Documentation - -* add doc-check to all modules ([#2419](https://github.com/libp2p/js-libp2p/issues/2419)) ([6cdb243](https://github.com/libp2p/js-libp2p/commit/6cdb24362de9991e749f76b16fcd4c130e8106a0)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.2 to ^4.0.3 - * @libp2p/interface bumped from ^1.1.3 to ^1.1.4 - * @libp2p/interface-internal bumped from ^1.0.8 to ^1.0.9 - * @libp2p/peer-collections bumped from ^5.1.6 to ^5.1.7 - * @libp2p/peer-id bumped from ^4.0.6 to ^4.0.7 - * @libp2p/utils bumped from ^5.2.5 to ^5.2.6 - * devDependencies - * @libp2p/logger bumped from ^4.0.6 to ^4.0.7 - * @libp2p/peer-id-factory bumped from ^4.0.6 to ^4.0.7 - -## [9.0.9](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.8...pubsub-v9.0.9) (2024-02-07) - - -### Bug Fixes - -* update patch versions of deps ([#2397](https://github.com/libp2p/js-libp2p/issues/2397)) ([0321812](https://github.com/libp2p/js-libp2p/commit/0321812e731515558f35ae2d53242035a343a21a)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.1 to ^4.0.2 - * @libp2p/interface bumped from ^1.1.2 to ^1.1.3 - * @libp2p/interface-internal bumped from ^1.0.7 to ^1.0.8 - * @libp2p/peer-collections bumped from ^5.1.5 to ^5.1.6 - * @libp2p/peer-id bumped from ^4.0.5 to ^4.0.6 - * @libp2p/utils bumped from ^5.2.3 to ^5.2.4 - * devDependencies - * @libp2p/logger bumped from ^4.0.5 to ^4.0.6 - * @libp2p/peer-id-factory bumped from ^4.0.5 to ^4.0.6 - -## [9.0.7](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.6...pubsub-v9.0.7) (2024-01-16) - - -### Bug Fixes - -* align dependency versions and update project config ([#2357](https://github.com/libp2p/js-libp2p/issues/2357)) ([8bbd436](https://github.com/libp2p/js-libp2p/commit/8bbd43628343f995804eea3102d0571ddcebc5c4)) -* mark all packages side-effect free ([#2360](https://github.com/libp2p/js-libp2p/issues/2360)) ([3c96210](https://github.com/libp2p/js-libp2p/commit/3c96210cf6343b21199996918bae3a0f60220046)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^4.0.0 to ^4.0.1 - * @libp2p/interface bumped from ^1.1.1 to ^1.1.2 - * @libp2p/interface-internal bumped from ^1.0.6 to ^1.0.7 - * @libp2p/peer-collections bumped from ^5.1.4 to ^5.1.5 - * @libp2p/peer-id bumped from ^4.0.4 to ^4.0.5 - * @libp2p/utils bumped from ^5.2.1 to ^5.2.2 - * devDependencies - * @libp2p/logger bumped from ^4.0.4 to ^4.0.5 - * @libp2p/peer-id-factory bumped from ^4.0.4 to ^4.0.5 - -## [9.0.5](https://github.com/libp2p/js-libp2p/compare/pubsub-v9.0.4...pubsub-v9.0.5) (2024-01-06) - - -### Bug Fixes - -* remove extra deps ([#2340](https://github.com/libp2p/js-libp2p/issues/2340)) ([53e83ee](https://github.com/libp2p/js-libp2p/commit/53e83eea50410391ec9cff4cd8097210b93894ff)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^3.0.3 to ^3.0.4 - * @libp2p/interface bumped from ^1.1.0 to ^1.1.1 - * @libp2p/interface-internal bumped from ^1.0.4 to ^1.0.5 - * @libp2p/peer-collections bumped from ^5.1.2 to ^5.1.3 - * @libp2p/peer-id bumped from ^4.0.3 to ^4.0.4 - * @libp2p/utils bumped from ^5.1.1 to ^5.2.0 - * devDependencies - * @libp2p/logger bumped from ^4.0.3 to ^4.0.4 - * @libp2p/peer-id-factory bumped from ^4.0.2 to ^4.0.3 - -## [9.0.0](https://github.com/libp2p/js-libp2p/compare/pubsub-v8.0.13...pubsub-v9.0.0) (2023-12-01) - - -### ⚠ BREAKING CHANGES - -* requires libp2p v1 - -### Bug Fixes - -* release majors of modules that had patches during v1.0 ([#2286](https://github.com/libp2p/js-libp2p/issues/2286)) ([738dd40](https://github.com/libp2p/js-libp2p/commit/738dd40f1e1b8ed1b83693763cc91c218ec2b41b)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^1.0.0 to ^1.0.1 - * @libp2p/peer-collections bumped from ^4.0.10 to ^5.0.0 - * @libp2p/utils bumped from ^5.0.1 to ^5.0.2 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.10 to ^4.0.0 - -## [8.0.12](https://github.com/libp2p/js-libp2p/compare/pubsub-v8.0.11...pubsub-v8.0.12) (2023-11-30) - - -### Bug Fixes - -* restore lost commits ([#2268](https://github.com/libp2p/js-libp2p/issues/2268)) ([5775f1d](https://github.com/libp2p/js-libp2p/commit/5775f1df4f5561500e622dc0788fdacbc74e2755)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^3.0.0 to ^3.0.1 - * @libp2p/interface bumped from ^1.0.0 to ^1.0.1 - * @libp2p/interface-internal bumped from ^0.1.10 to ^0.1.11 - * @libp2p/peer-collections bumped from ^4.0.9 to ^4.0.10 - * @libp2p/peer-id bumped from ^4.0.0 to ^4.0.1 - * @libp2p/utils bumped from ^5.0.0 to ^5.0.1 - * devDependencies - * @libp2p/logger bumped from ^4.0.0 to ^4.0.1 - * @libp2p/peer-id-factory bumped from ^3.0.9 to ^3.0.10 - -### [8.0.11](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.10...pubsub-v8.0.11) (2023-11-28) - - -### Bug Fixes - -* use logging component everywhere ([#2228](https://www.github.com/libp2p/js-libp2p/issues/2228)) ([e5dfde0](https://www.github.com/libp2p/js-libp2p/commit/e5dfde0883191c93903ca552433f177d48adf0b3)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.8 to ^3.0.0 - * @libp2p/interface bumped from ^0.1.6 to ^1.0.0 - * @libp2p/interface-internal bumped from ^0.1.9 to ^0.1.10 - * @libp2p/peer-collections bumped from ^4.0.8 to ^4.0.9 - * @libp2p/peer-id bumped from ^3.0.6 to ^4.0.0 - * @libp2p/utils bumped from ^4.0.7 to ^5.0.0 - * devDependencies - * @libp2p/logger bumped from ^3.1.0 to ^4.0.0 - * @libp2p/peer-id-factory bumped from ^3.0.8 to ^3.0.9 - -### [8.0.10](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.9...pubsub-v8.0.10) (2023-11-07) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.7 to ^2.0.8 - * @libp2p/interface bumped from ^0.1.5 to ^0.1.6 - * @libp2p/interface-internal bumped from ^0.1.8 to ^0.1.9 - * @libp2p/logger bumped from ^3.0.5 to ^3.1.0 - * @libp2p/peer-collections bumped from ^4.0.7 to ^4.0.8 - * @libp2p/peer-id bumped from ^3.0.5 to ^3.0.6 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.7 to ^3.0.8 - -### [8.0.9](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.8...pubsub-v8.0.9) (2023-11-03) - - -### Bug Fixes - -* opt-in to toplogy notifications on transient connections ([#2049](https://www.github.com/libp2p/js-libp2p/issues/2049)) ([346ff5a](https://www.github.com/libp2p/js-libp2p/commit/346ff5a2b81bded9f9b26051501ab9d25246961c)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.6 to ^2.0.7 - * @libp2p/interface bumped from ^0.1.4 to ^0.1.5 - * @libp2p/interface-internal bumped from ^0.1.7 to ^0.1.8 - * @libp2p/logger bumped from ^3.0.4 to ^3.0.5 - * @libp2p/peer-collections bumped from ^4.0.6 to ^4.0.7 - * @libp2p/peer-id bumped from ^3.0.4 to ^3.0.5 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.6 to ^3.0.7 - -### [8.0.8](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.7...pubsub-v8.0.8) (2023-10-25) - - -### Bug Fixes - -* rename event emitter class ([#2173](https://www.github.com/libp2p/js-libp2p/issues/2173)) ([50f912c](https://www.github.com/libp2p/js-libp2p/commit/50f912c2608caecc09acbcb0f46b4df4af073080)) -* revert "refactor: rename event emitter class" ([#2172](https://www.github.com/libp2p/js-libp2p/issues/2172)) ([0ef5f7f](https://www.github.com/libp2p/js-libp2p/commit/0ef5f7f62d9c6d822e0a4b99cc203a1516b11f2f)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.5 to ^2.0.6 - * @libp2p/interface bumped from ^0.1.3 to ^0.1.4 - * @libp2p/interface-internal bumped from ^0.1.6 to ^0.1.7 - * @libp2p/logger bumped from ^3.0.3 to ^3.0.4 - * @libp2p/peer-collections bumped from ^4.0.5 to ^4.0.6 - * @libp2p/peer-id bumped from ^3.0.3 to ^3.0.4 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.5 to ^3.0.6 - -### [8.0.7](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.6...pubsub-v8.0.7) (2023-10-06) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.4 to ^2.0.5 - * @libp2p/interface bumped from ^0.1.2 to ^0.1.3 - * @libp2p/interface-internal bumped from ^0.1.5 to ^0.1.6 - * @libp2p/logger bumped from ^3.0.2 to ^3.0.3 - * @libp2p/peer-collections bumped from ^4.0.4 to ^4.0.5 - * @libp2p/peer-id bumped from ^3.0.2 to ^3.0.3 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.4 to ^3.0.5 - -### [8.0.6](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.5...pubsub-v8.0.6) (2023-09-15) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.3 to ^2.0.4 - * @libp2p/interface-internal bumped from ^0.1.4 to ^0.1.5 - * @libp2p/peer-collections bumped from ^4.0.3 to ^4.0.4 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.3 to ^3.0.4 - -### [8.0.5](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.4...pubsub-v8.0.5) (2023-08-16) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/interface-internal bumped from ^0.1.3 to ^0.1.4 - -### [8.0.4](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.3...pubsub-v8.0.4) (2023-08-14) - - -### Bug Fixes - -* update project config ([9c0353c](https://www.github.com/libp2p/js-libp2p/commit/9c0353cf5a1e13196ca0e7764f87e36478518f69)) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.2 to ^2.0.3 - * @libp2p/interface bumped from ^0.1.1 to ^0.1.2 - * @libp2p/interface-internal bumped from ^0.1.2 to ^0.1.3 - * @libp2p/logger bumped from ^3.0.1 to ^3.0.2 - * @libp2p/peer-collections bumped from ^4.0.2 to ^4.0.3 - * @libp2p/peer-id bumped from ^3.0.1 to ^3.0.2 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.2 to ^3.0.3 - -### [8.0.3](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.2...pubsub-v8.0.3) (2023-08-05) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.1 to ^2.0.2 - * @libp2p/interface bumped from ^0.1.0 to ^0.1.1 - * @libp2p/interface-internal bumped from ^0.1.1 to ^0.1.2 - * @libp2p/logger bumped from ^3.0.0 to ^3.0.1 - * @libp2p/peer-collections bumped from ^4.0.1 to ^4.0.2 - * @libp2p/peer-id bumped from ^3.0.0 to ^3.0.1 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.1 to ^3.0.2 - -### [8.0.2](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.1...pubsub-v8.0.2) (2023-08-04) - - -### Dependencies - -* The following workspace dependencies were updated - * dependencies - * @libp2p/crypto bumped from ^2.0.0 to ^2.0.1 - * @libp2p/interface-internal bumped from ^0.1.0 to ^0.1.1 - * @libp2p/peer-collections bumped from ^4.0.0 to ^4.0.1 - * devDependencies - * @libp2p/peer-id-factory bumped from ^3.0.0 to ^3.0.1 - -### [8.0.1](https://www.github.com/libp2p/js-libp2p/compare/pubsub-v8.0.0...pubsub-v8.0.1) (2023-08-01) - - -### Bug Fixes - -* update package config ([#1919](https://www.github.com/libp2p/js-libp2p/issues/1919)) ([8d49602](https://www.github.com/libp2p/js-libp2p/commit/8d49602fb6f0c906f1920d397ff28705bb0bc845)) - -## [7.0.2](https://github.com/libp2p/js-libp2p-pubsub/compare/v7.0.1...v7.0.2) (2023-06-27) - - -### Trivial Changes - -* Update .github/workflows/semantic-pull-request.yml [skip ci] ([ab88716](https://github.com/libp2p/js-libp2p-pubsub/commit/ab8871630d551841696cbcfaa94a2d2943601d74)) -* Update .github/workflows/stale.yml [skip ci] ([f032696](https://github.com/libp2p/js-libp2p-pubsub/commit/f032696bbd33329985845341d2d1c6b7f9e8d23b)) - - -### Dependencies - -* **dev:** bump aegir from 38.1.8 to 39.0.10 ([#146](https://github.com/libp2p/js-libp2p-pubsub/issues/146)) ([074e78b](https://github.com/libp2p/js-libp2p-pubsub/commit/074e78b1708190bc8f607bf0895fcfca77375d34)) - -## [7.0.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v7.0.0...v7.0.1) (2023-04-19) - - -### Dependencies - -* bump abortable-iterator from 4.0.3 to 5.0.1 ([#137](https://github.com/libp2p/js-libp2p-pubsub/issues/137)) ([695ad25](https://github.com/libp2p/js-libp2p-pubsub/commit/695ad25b3b9b53ddbe646b507e7e7e3051b834cf)) - -## [7.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.6...v7.0.0) (2023-04-18) - - -### ⚠ BREAKING CHANGES - -* update stream deps (#136) - -### Dependencies - -* update stream deps ([#136](https://github.com/libp2p/js-libp2p-pubsub/issues/136)) ([8d6af79](https://github.com/libp2p/js-libp2p-pubsub/commit/8d6af79820700e2b7ffb4f11e939211cf2191f6e)) - -## [6.0.6](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.5...v6.0.6) (2023-04-12) - - -### Dependencies - -* bump @libp2p/interface-connection from 3.1.1 to 4.0.0 ([#135](https://github.com/libp2p/js-libp2p-pubsub/issues/135)) ([96b0c41](https://github.com/libp2p/js-libp2p-pubsub/commit/96b0c41ea3ecca6cf28ce8417ab0a5d782531658)) - -## [6.0.5](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.4...v6.0.5) (2023-04-03) - - -### Bug Fixes - -* update project config ([5c7e7f9](https://github.com/libp2p/js-libp2p-pubsub/commit/5c7e7f9f0393c0b231108bd51a0b8c805712ca09)) - - -### Trivial Changes - -* Update .github/workflows/semantic-pull-request.yml [skip ci] ([b1c5590](https://github.com/libp2p/js-libp2p-pubsub/commit/b1c5590a9090adadc69d9f5b0ea508035e1c02f8)) -* Update .github/workflows/semantic-pull-request.yml [skip ci] ([f83a2f7](https://github.com/libp2p/js-libp2p-pubsub/commit/f83a2f76235639aed4bd9ecd6114365330acd5ff)) - - -### Dependencies - -* bump it-length-prefixed from 8.x to 9.x ([#134](https://github.com/libp2p/js-libp2p-pubsub/issues/134)) ([ae3e688](https://github.com/libp2p/js-libp2p-pubsub/commit/ae3e6881aab3d79bf61ac1d3dab952f4a36cbd25)) -* bump it-pipe from 2.0.5 to 3.0.0 ([#133](https://github.com/libp2p/js-libp2p-pubsub/issues/133)) ([cafd733](https://github.com/libp2p/js-libp2p-pubsub/commit/cafd7330727644b1074f57920099888385dbd81f)) - -## [6.0.4](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.3...v6.0.4) (2023-02-22) - - -### Dependencies - -* **dev:** bump aegir from 37.12.1 to 38.1.6 ([#128](https://github.com/libp2p/js-libp2p-pubsub/issues/128)) ([7609545](https://github.com/libp2p/js-libp2p-pubsub/commit/7609545f732f94a1e52586b99f62f6c49d2b6c76)) - -## [6.0.3](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.2...v6.0.3) (2023-02-22) - - -### Dependencies - -* **dev:** bump protons from 6.1.3 to 7.0.2 ([#124](https://github.com/libp2p/js-libp2p-pubsub/issues/124)) ([302763e](https://github.com/libp2p/js-libp2p-pubsub/commit/302763ecbc95b03051c071f821464d97825cb693)) - -## [6.0.2](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.1...v6.0.2) (2023-02-22) - - -### Bug Fixes - -* get key from peer id if not specified in the message ([#129](https://github.com/libp2p/js-libp2p-pubsub/issues/129)) ([c183c70](https://github.com/libp2p/js-libp2p-pubsub/commit/c183c70f4d57af486e74fd2920eb1d0a878e6ab3)) - - -### Trivial Changes - -* Update .github/workflows/semantic-pull-request.yml [skip ci] ([9839b71](https://github.com/libp2p/js-libp2p-pubsub/commit/9839b71811a2467a9317700c9874b14a6da8759e)) - -## [6.0.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v6.0.0...v6.0.1) (2023-01-31) - - -### Bug Fixes - -* allow `key` field to be unset ([#118](https://github.com/libp2p/js-libp2p-pubsub/issues/118)) ([2567a45](https://github.com/libp2p/js-libp2p-pubsub/commit/2567a454c9f4c91ab7d55e6a90c79e816d527a30)) - - -### Trivial Changes - -* replace err-code with CodeError ([#116](https://github.com/libp2p/js-libp2p-pubsub/issues/116)) ([e121e4b](https://github.com/libp2p/js-libp2p-pubsub/commit/e121e4b18ab9bca90ee4b596928a1de84fb412f7)), closes [js-libp2p#1269](https://github.com/libp2p/js-libp2p/issues/1269) - -## [6.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v5.0.1...v6.0.0) (2023-01-06) - - -### ⚠ BREAKING CHANGES - -* update multiformats to v11 (#115) - -### Bug Fixes - -* update multiformats to v11 ([#115](https://github.com/libp2p/js-libp2p-pubsub/issues/115)) ([148f554](https://github.com/libp2p/js-libp2p-pubsub/commit/148f5548001896869caca90c3cad9d8d363638f0)) - -## [5.0.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v5.0.0...v5.0.1) (2022-12-16) - - -### Documentation - -* publish api docs ([#113](https://github.com/libp2p/js-libp2p-pubsub/issues/113)) ([bc20def](https://github.com/libp2p/js-libp2p-pubsub/commit/bc20defefafbe97defb64e18a7ae10527bff4ae6)) - -## [5.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v4.0.1...v5.0.0) (2022-10-12) - - -### ⚠ BREAKING CHANGES - -* modules no longer implement `Initializable` instead switching to constructor injection - -### Bug Fixes - -* remove @libp2p/components ([#106](https://github.com/libp2p/js-libp2p-pubsub/issues/106)) ([01707d7](https://github.com/libp2p/js-libp2p-pubsub/commit/01707d7dde5ff7d2f87115f9215d7a8a35d3d3f4)) - -## [4.0.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v4.0.0...v4.0.1) (2022-10-11) - - -### Bug Fixes - -* update interface-pubsub and adjust topicValidator implementation ([#102](https://github.com/libp2p/js-libp2p-pubsub/issues/102)) ([f84d365](https://github.com/libp2p/js-libp2p-pubsub/commit/f84d36588a1096f349d68752db43247346163e82)) - - -### Documentation - -* update readme ([7a6f91d](https://github.com/libp2p/js-libp2p-pubsub/commit/7a6f91da1dd05c0039cebfe2a17742736be08c08)) - -## [4.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.1.3...v4.0.0) (2022-10-07) - - -### ⚠ BREAKING CHANGES - -* bump @libp2p/components from 2.1.1 to 3.0.0 (#103) - -### Dependencies - -* bump @libp2p/components from 2.1.1 to 3.0.0 ([#103](https://github.com/libp2p/js-libp2p-pubsub/issues/103)) ([fe407fe](https://github.com/libp2p/js-libp2p-pubsub/commit/fe407fe981c57aaef0b022b536a696c09faa72fd)) - -## [3.1.3](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.1.2...v3.1.3) (2022-09-21) - - -### Trivial Changes - -* Update .github/workflows/stale.yml [skip ci] ([fcf5da9](https://github.com/libp2p/js-libp2p-pubsub/commit/fcf5da9f5038f3a544724adff2aa5559d31a82fe)) - - -### Dependencies - -* update @multiformats/multiaddr to 11.0.0 ([#101](https://github.com/libp2p/js-libp2p-pubsub/issues/101)) ([9524fa4](https://github.com/libp2p/js-libp2p-pubsub/commit/9524fa4e2d1935d7603bf2d2bfb09ac4f13675c2)) - -## [3.1.2](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.1.1...v3.1.2) (2022-08-11) - - -### Dependencies - -* **dev:** update protons to 5.1.0 ([#98](https://github.com/libp2p/js-libp2p-pubsub/issues/98)) ([aa6dc45](https://github.com/libp2p/js-libp2p-pubsub/commit/aa6dc453dd2d5cdafa58b5c75571f9ec9f69d197)) - -## [3.1.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.1.0...v3.1.1) (2022-08-10) - - -### Dependencies - -* update all deps ([#94](https://github.com/libp2p/js-libp2p-pubsub/issues/94)) ([5d5d788](https://github.com/libp2p/js-libp2p-pubsub/commit/5d5d78820c5feaca070ef504c83b730fd3b8b2d4)) - -## [3.1.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.0.4...v3.1.0) (2022-08-03) - - -### Features - -* remove unnecessary direct dependency ([#92](https://github.com/libp2p/js-libp2p-pubsub/issues/92)) ([6d51017](https://github.com/libp2p/js-libp2p-pubsub/commit/6d510173d3708e32eb635aac6c3cf7c616d5be4c)) - -## [3.0.4](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.0.3...v3.0.4) (2022-08-01) - - -### Trivial Changes - -* update project config ([#86](https://github.com/libp2p/js-libp2p-pubsub/issues/86)) ([3251829](https://github.com/libp2p/js-libp2p-pubsub/commit/3251829d4bb433fd26dc5cc9c8366c9a49d23e76)) - - -### Dependencies - -* update it-length-prefixed and uint8arraylists deps ([#91](https://github.com/libp2p/js-libp2p-pubsub/issues/91)) ([f295fce](https://github.com/libp2p/js-libp2p-pubsub/commit/f295fce10a32edb73789a6b08cd9ce9420bbb6a3)) - -## [3.0.3](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.0.2...v3.0.3) (2022-06-30) - - -### Trivial Changes - -* **deps:** bump @libp2p/peer-collections from 1.0.3 to 2.0.0 ([#79](https://github.com/libp2p/js-libp2p-pubsub/issues/79)) ([c066676](https://github.com/libp2p/js-libp2p-pubsub/commit/c06667694053e4d6df1607cce7cffdbe9a3c25c0)) - -## [3.0.2](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.0.1...v3.0.2) (2022-06-23) - - -### Bug Fixes - -* do not unsubscribe after publish ([#78](https://github.com/libp2p/js-libp2p-pubsub/issues/78)) ([760594e](https://github.com/libp2p/js-libp2p-pubsub/commit/760594e57224e38139a560c37747e52f9dd3e593)) - -## [3.0.1](https://github.com/libp2p/js-libp2p-pubsub/compare/v3.0.0...v3.0.1) (2022-06-17) - - -### Bug Fixes - -* limit stream concurrency ([#77](https://github.com/libp2p/js-libp2p-pubsub/issues/77)) ([d4f1779](https://github.com/libp2p/js-libp2p-pubsub/commit/d4f1779b68e658211e7a50ba446ec479bb413d2b)) - -## [3.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v2.0.0...v3.0.0) (2022-06-16) - - -### ⚠ BREAKING CHANGES - -* update to simpler connection api - -### Trivial Changes - -* update deps ([#76](https://github.com/libp2p/js-libp2p-pubsub/issues/76)) ([50d1a5f](https://github.com/libp2p/js-libp2p-pubsub/commit/50d1a5fdb487f264f1f9da1facf96f4da6836649)) - -## [2.0.0](https://github.com/libp2p/js-libp2p-pubsub/compare/v1.3.0...v2.0.0) (2022-06-15) - - -### ⚠ BREAKING CHANGES - -* uses new single-issue libp2p interface modules - -Co-authored-by: achingbrain - -### Features - -* update to latest libp2p interfaces ([#74](https://github.com/libp2p/js-libp2p-pubsub/issues/74)) ([fe38340](https://github.com/libp2p/js-libp2p-pubsub/commit/fe38340715f37f6e976c526bf45e10d649b118dc)) - -## [@libp2p/pubsub-v1.3.0](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.24...@libp2p/pubsub-v1.3.0) (2022-05-23) - - -### Features - -* expose utility methods to convert bigint to bytes and back ([#213](https://github.com/libp2p/js-libp2p-interfaces/issues/213)) ([3d2e59c](https://github.com/libp2p/js-libp2p-interfaces/commit/3d2e59c8fd8af5d618df904ae9d40518a13de547)) - -## [@libp2p/pubsub-v1.2.24](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.23...@libp2p/pubsub-v1.2.24) (2022-05-20) - - -### Bug Fixes - -* update interfaces ([#215](https://github.com/libp2p/js-libp2p-interfaces/issues/215)) ([72e6890](https://github.com/libp2p/js-libp2p-interfaces/commit/72e6890826dadbd6e7cbba5536bde350ca4286e6)) - -## [@libp2p/pubsub-v1.2.23](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.22...@libp2p/pubsub-v1.2.23) (2022-05-10) - - -### Trivial Changes - -* **deps:** bump sinon from 13.0.2 to 14.0.0 ([#211](https://github.com/libp2p/js-libp2p-interfaces/issues/211)) ([8859f70](https://github.com/libp2p/js-libp2p-interfaces/commit/8859f70943c0bcdb210f54a338ae901739e5e6f2)) - -## [@libp2p/pubsub-v1.2.22](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.21...@libp2p/pubsub-v1.2.22) (2022-05-10) - - -### Bug Fixes - -* regenerate protobuf code ([#212](https://github.com/libp2p/js-libp2p-interfaces/issues/212)) ([3cf210e](https://github.com/libp2p/js-libp2p-interfaces/commit/3cf210e230863f8049ac6c3ed2e73abb180fb8b2)) - -## [@libp2p/pubsub-v1.2.21](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.20...@libp2p/pubsub-v1.2.21) (2022-05-04) - - -### Bug Fixes - -* move startable and events interfaces ([#209](https://github.com/libp2p/js-libp2p-interfaces/issues/209)) ([8ce8a08](https://github.com/libp2p/js-libp2p-interfaces/commit/8ce8a08c94b0738aa32da516558977b195ddd8ed)) - -## [@libp2p/pubsub-v1.2.20](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.19...@libp2p/pubsub-v1.2.20) (2022-04-22) - - -### Bug Fixes - -* update pubsub interface in line with gossipsub ([#199](https://github.com/libp2p/js-libp2p-interfaces/issues/199)) ([3f55596](https://github.com/libp2p/js-libp2p-interfaces/commit/3f555965cddea3ef03e7217b755c82aa4107e093)) - -## [@libp2p/pubsub-v1.2.19](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.18...@libp2p/pubsub-v1.2.19) (2022-04-21) - - -### Bug Fixes - -* test PubSub interface and not PubSubBaseProtocol ([#198](https://github.com/libp2p/js-libp2p-interfaces/issues/198)) ([96c15c9](https://github.com/libp2p/js-libp2p-interfaces/commit/96c15c9780821a3cb763e48854d64377bf562692)) - -## [@libp2p/pubsub-v1.2.18](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.17...@libp2p/pubsub-v1.2.18) (2022-04-20) - - -### Bug Fixes - -* emit pubsub messages using 'message' event ([#197](https://github.com/libp2p/js-libp2p-interfaces/issues/197)) ([df9b685](https://github.com/libp2p/js-libp2p-interfaces/commit/df9b685cea30653109f2fa2cb5583a3bca7b09bb)) - -## [@libp2p/pubsub-v1.2.17](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.16...@libp2p/pubsub-v1.2.17) (2022-04-19) - - -### Trivial Changes - -* remove extraneous readme ([#196](https://github.com/libp2p/js-libp2p-interfaces/issues/196)) ([ee1d00c](https://github.com/libp2p/js-libp2p-interfaces/commit/ee1d00cc209909836f12f17d62f1165f11689488)) - -## [@libp2p/pubsub-v1.2.16](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.15...@libp2p/pubsub-v1.2.16) (2022-04-19) - - -### Bug Fixes - -* move dev deps to prod ([#195](https://github.com/libp2p/js-libp2p-interfaces/issues/195)) ([3e1ffc7](https://github.com/libp2p/js-libp2p-interfaces/commit/3e1ffc7b174e74be483943ad4e5fcab823ae3f6d)) - -## [@libp2p/pubsub-v1.2.15](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.14...@libp2p/pubsub-v1.2.15) (2022-04-13) - - -### Bug Fixes - -* add keychain types, fix bigint types ([#193](https://github.com/libp2p/js-libp2p-interfaces/issues/193)) ([9ceadf9](https://github.com/libp2p/js-libp2p-interfaces/commit/9ceadf9d5c42a12d88d74ddd9140e34f7fa63537)) - -## [@libp2p/pubsub-v1.2.14](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.13...@libp2p/pubsub-v1.2.14) (2022-04-08) - - -### Bug Fixes - -* swap protobufjs for protons ([#191](https://github.com/libp2p/js-libp2p-interfaces/issues/191)) ([d72b30c](https://github.com/libp2p/js-libp2p-interfaces/commit/d72b30cfca4b9145e0b31db28e8fa3329a180e83)) - - -### Trivial Changes - -* update aegir ([#192](https://github.com/libp2p/js-libp2p-interfaces/issues/192)) ([41c1494](https://github.com/libp2p/js-libp2p-interfaces/commit/41c14941e8b67d6601a90b4d48a2776573d55e60)) - -## [@libp2p/pubsub-v1.2.13](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.12...@libp2p/pubsub-v1.2.13) (2022-03-24) - - -### Bug Fixes - -* rename peer data to peer info ([#187](https://github.com/libp2p/js-libp2p-interfaces/issues/187)) ([dfea342](https://github.com/libp2p/js-libp2p-interfaces/commit/dfea3429bad57abde040397e4e7a58539829e9c2)) - -## [@libp2p/pubsub-v1.2.12](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.11...@libp2p/pubsub-v1.2.12) (2022-03-21) - - -### Bug Fixes - -* handle empty pubsub messages ([#185](https://github.com/libp2p/js-libp2p-interfaces/issues/185)) ([0db8d84](https://github.com/libp2p/js-libp2p-interfaces/commit/0db8d84dd98ff6e99776c01a6b5bab404033bffa)) - -## [@libp2p/pubsub-v1.2.11](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.10...@libp2p/pubsub-v1.2.11) (2022-03-20) - - -### Bug Fixes - -* update pubsub types ([#183](https://github.com/libp2p/js-libp2p-interfaces/issues/183)) ([7ef4baa](https://github.com/libp2p/js-libp2p-interfaces/commit/7ef4baad0fe30f783f3eecd5199ef92af08b7f57)) - -## [@libp2p/pubsub-v1.2.10](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.9...@libp2p/pubsub-v1.2.10) (2022-03-15) - - -### Bug Fixes - -* simplify transport interface, update interfaces for use with libp2p ([#180](https://github.com/libp2p/js-libp2p-interfaces/issues/180)) ([ec81622](https://github.com/libp2p/js-libp2p-interfaces/commit/ec81622e5b7c6d256e0f8aed6d3695642473293b)) - -## [@libp2p/pubsub-v1.2.9](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.8...@libp2p/pubsub-v1.2.9) (2022-02-27) - - -### Bug Fixes - -* rename crypto to connection-encrypter ([#179](https://github.com/libp2p/js-libp2p-interfaces/issues/179)) ([d197f55](https://github.com/libp2p/js-libp2p-interfaces/commit/d197f554d7cdadb3b05ed2d6c69fda2c4362b1eb)) - -## [@libp2p/pubsub-v1.2.8](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.7...@libp2p/pubsub-v1.2.8) (2022-02-27) - - -### Bug Fixes - -* update package config and add connection gater interface ([#178](https://github.com/libp2p/js-libp2p-interfaces/issues/178)) ([c6079a6](https://github.com/libp2p/js-libp2p-interfaces/commit/c6079a6367f004788062df3e30ad2e26330d947b)) - -## [@libp2p/pubsub-v1.2.7](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.6...@libp2p/pubsub-v1.2.7) (2022-02-18) - - -### Bug Fixes - -* simpler pubsub ([#172](https://github.com/libp2p/js-libp2p-interfaces/issues/172)) ([98715ed](https://github.com/libp2p/js-libp2p-interfaces/commit/98715ed73183b32e4fda3d878a462389548358d9)) - -## [@libp2p/pubsub-v1.2.6](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.5...@libp2p/pubsub-v1.2.6) (2022-02-17) - - -### Bug Fixes - -* update deps ([#171](https://github.com/libp2p/js-libp2p-interfaces/issues/171)) ([d0d2564](https://github.com/libp2p/js-libp2p-interfaces/commit/d0d2564a84a0722ab587a3aa6ec01e222442b100)) - -## [@libp2p/pubsub-v1.2.5](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.4...@libp2p/pubsub-v1.2.5) (2022-02-17) - - -### Bug Fixes - -* add multistream-select and update pubsub types ([#170](https://github.com/libp2p/js-libp2p-interfaces/issues/170)) ([b9ecb2b](https://github.com/libp2p/js-libp2p-interfaces/commit/b9ecb2bee8f2abc0c41bfcf7bf2025894e37ddc2)) - -## [@libp2p/pubsub-v1.2.4](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.3...@libp2p/pubsub-v1.2.4) (2022-02-12) - - -### Bug Fixes - -* hide implementations behind factory methods ([#167](https://github.com/libp2p/js-libp2p-interfaces/issues/167)) ([2fba080](https://github.com/libp2p/js-libp2p-interfaces/commit/2fba0800c9896af6dcc49da4fa904bb4a3e3e40d)) - -## [@libp2p/pubsub-v1.2.3](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.2...@libp2p/pubsub-v1.2.3) (2022-02-11) - - -### Bug Fixes - -* simpler topologies ([#164](https://github.com/libp2p/js-libp2p-interfaces/issues/164)) ([45fcaa1](https://github.com/libp2p/js-libp2p-interfaces/commit/45fcaa10a6a3215089340ff2eff117d7fd1100e7)) - -## [@libp2p/pubsub-v1.2.2](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.1...@libp2p/pubsub-v1.2.2) (2022-02-10) - - -### Bug Fixes - -* make registrar simpler ([#163](https://github.com/libp2p/js-libp2p-interfaces/issues/163)) ([d122f3d](https://github.com/libp2p/js-libp2p-interfaces/commit/d122f3daaccc04039d90814960da92b513265644)) - -## [@libp2p/pubsub-v1.2.1](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.2.0...@libp2p/pubsub-v1.2.1) (2022-02-10) - - -### Bug Fixes - -* remove node event emitters ([#161](https://github.com/libp2p/js-libp2p-interfaces/issues/161)) ([221fb6a](https://github.com/libp2p/js-libp2p-interfaces/commit/221fb6a024430dc56288d73d8b8ce1aa88427701)) - -## [@libp2p/pubsub-v1.2.0](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.1.0...@libp2p/pubsub-v1.2.0) (2022-02-09) - - -### Features - -* add peer store/records, and streams are just streams ([#160](https://github.com/libp2p/js-libp2p-interfaces/issues/160)) ([8860a0c](https://github.com/libp2p/js-libp2p-interfaces/commit/8860a0cd46b359a5648402d83870f7ff957222fe)) - -## [@libp2p/pubsub-v1.1.0](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.6...@libp2p/pubsub-v1.1.0) (2022-02-07) - - -### Features - -* add logger package ([#158](https://github.com/libp2p/js-libp2p-interfaces/issues/158)) ([f327cd2](https://github.com/libp2p/js-libp2p-interfaces/commit/f327cd24825d9ce2f45a02fdb9b47c9735c847e0)) - -## [@libp2p/pubsub-v1.0.6](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.5...@libp2p/pubsub-v1.0.6) (2022-02-05) - - -### Bug Fixes - -* fix muxer tests ([#157](https://github.com/libp2p/js-libp2p-interfaces/issues/157)) ([7233c44](https://github.com/libp2p/js-libp2p-interfaces/commit/7233c4438479dff56a682f45209ef7a938d63857)) - -## [@libp2p/pubsub-v1.0.5](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.4...@libp2p/pubsub-v1.0.5) (2022-01-15) - - -### Bug Fixes - -* remove abort controller dep ([#151](https://github.com/libp2p/js-libp2p-interfaces/issues/151)) ([518bce1](https://github.com/libp2p/js-libp2p-interfaces/commit/518bce1f9bd1f8b2922338e0c65c9934af7da3af)) - -## [@libp2p/pubsub-v1.0.4](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.3...@libp2p/pubsub-v1.0.4) (2022-01-15) - - -### Trivial Changes - -* update project config ([#149](https://github.com/libp2p/js-libp2p-interfaces/issues/149)) ([6eb8556](https://github.com/libp2p/js-libp2p-interfaces/commit/6eb85562c0da167d222808da10a7914daf12970b)) - -## [@libp2p/pubsub-v1.0.3](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.2...@libp2p/pubsub-v1.0.3) (2022-01-14) - - -### Bug Fixes - -* update it-* deps to ts versions ([#148](https://github.com/libp2p/js-libp2p-interfaces/issues/148)) ([7a6fdd7](https://github.com/libp2p/js-libp2p-interfaces/commit/7a6fdd7622ce2870b89dbb849ab421d0dd714b43)) - -## [@libp2p/pubsub-v1.0.2](https://github.com/libp2p/js-libp2p-interfaces/compare/@libp2p/pubsub-v1.0.1...@libp2p/pubsub-v1.0.2) (2022-01-08) - - -### Trivial Changes - -* add semantic release config ([#141](https://github.com/libp2p/js-libp2p-interfaces/issues/141)) ([5f0de59](https://github.com/libp2p/js-libp2p-interfaces/commit/5f0de59136b6343d2411abb2d6a4dd2cd0b7efe4)) -* update package versions ([#140](https://github.com/libp2p/js-libp2p-interfaces/issues/140)) ([cd844f6](https://github.com/libp2p/js-libp2p-interfaces/commit/cd844f6e39f4ee50d006e86eac8dadf696900eb5)) - -# Change Log - -All notable changes to this project will be documented in this file. -See [Conventional Commits](https://conventionalcommits.org) for commit guidelines. - -# 0.2.0 (2022-01-04) - - -### chore - -* update libp2p-crypto and peer-id ([c711e8b](https://github.com/libp2p/js-libp2p-interfaces/commit/c711e8bd4d606f6974b13fad2eeb723f93cebb87)) - - -### Features - -* add auto-publish ([7aede5d](https://github.com/libp2p/js-libp2p-interfaces/commit/7aede5df39ea6b5f243348ec9a212b3e33c16a81)) -* simpler peer id ([#117](https://github.com/libp2p/js-libp2p-interfaces/issues/117)) ([fa2c4f5](https://github.com/libp2p/js-libp2p-interfaces/commit/fa2c4f5be74a5cfc11489771881e57b4e53bf174)) -* split out code, convert to typescript ([#111](https://github.com/libp2p/js-libp2p-interfaces/issues/111)) ([e174bba](https://github.com/libp2p/js-libp2p-interfaces/commit/e174bba889388269b806643c79a6b53c8d6a0f8c)), closes [#110](https://github.com/libp2p/js-libp2p-interfaces/issues/110) [#101](https://github.com/libp2p/js-libp2p-interfaces/issues/101) -* update package names ([#133](https://github.com/libp2p/js-libp2p-interfaces/issues/133)) ([337adc9](https://github.com/libp2p/js-libp2p-interfaces/commit/337adc9a9bc0278bdae8cbce9c57d07a83c8b5c2)) - - -### BREAKING CHANGES - -* requires node 15+ -* not all fields from concrete classes have been added to the interfaces, some adjustment may be necessary as this gets rolled out - - - - - -## [0.9.1](https://github.com/libp2p/js-libp2p-interfaces/compare/libp2p-pubsub@0.9.0...libp2p-pubsub@0.9.1) (2022-01-02) - -**Note:** Version bump only for package libp2p-pubsub - - - - - -# [0.9.0](https://github.com/libp2p/js-libp2p-interfaces/compare/libp2p-pubsub@0.8.0...libp2p-pubsub@0.9.0) (2022-01-02) - - -### Features - -* simpler peer id ([#117](https://github.com/libp2p/js-libp2p-interfaces/issues/117)) ([fa2c4f5](https://github.com/libp2p/js-libp2p-interfaces/commit/fa2c4f5be74a5cfc11489771881e57b4e53bf174)) - - - - - -# [0.8.0](https://github.com/libp2p/js-libp2p-interfaces/compare/libp2p-pubsub@0.7.0...libp2p-pubsub@0.8.0) (2021-12-02) - - -### chore - -* update libp2p-crypto and peer-id ([c711e8b](https://github.com/libp2p/js-libp2p-interfaces/commit/c711e8bd4d606f6974b13fad2eeb723f93cebb87)) - - -### BREAKING CHANGES - -* requires node 15+ - - - - - -# 0.7.0 (2021-11-22) - - -### Features - -* split out code, convert to typescript ([#111](https://github.com/libp2p/js-libp2p-interfaces/issues/111)) ([e174bba](https://github.com/libp2p/js-libp2p-interfaces/commit/e174bba889388269b806643c79a6b53c8d6a0f8c)), closes [#110](https://github.com/libp2p/js-libp2p-interfaces/issues/110) [#101](https://github.com/libp2p/js-libp2p-interfaces/issues/101) - - -### BREAKING CHANGES - -* not all fields from concrete classes have been added to the interfaces, some adjustment may be necessary as this gets rolled out diff --git a/packages/pubsub/CODE_OF_CONDUCT.md b/packages/pubsub/CODE_OF_CONDUCT.md deleted file mode 100644 index 6b0fa54c54..0000000000 --- a/packages/pubsub/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Contributor Code of Conduct - -This project follows the [`IPFS Community Code of Conduct`](https://github.com/ipfs/community/blob/master/code-of-conduct.md) diff --git a/packages/pubsub/LICENSE-APACHE b/packages/pubsub/LICENSE-APACHE deleted file mode 100644 index b09cd7856d..0000000000 --- a/packages/pubsub/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/packages/pubsub/LICENSE-MIT b/packages/pubsub/LICENSE-MIT deleted file mode 100644 index 72dc60d84b..0000000000 --- a/packages/pubsub/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/packages/pubsub/README.md b/packages/pubsub/README.md deleted file mode 100644 index e25085fe3d..0000000000 --- a/packages/pubsub/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# @libp2p/pubsub - -[![libp2p.io](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](http://libp2p.io/) -[![Discuss](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg?style=flat-square)](https://discuss.libp2p.io) -[![codecov](https://img.shields.io/codecov/c/github/libp2p/js-libp2p.svg?style=flat-square)](https://codecov.io/gh/libp2p/js-libp2p) -[![CI](https://img.shields.io/github/actions/workflow/status/libp2p/js-libp2p/main.yml?branch=main\&style=flat-square)](https://github.com/libp2p/js-libp2p/actions/workflows/main.yml?query=branch%3Amain) - -> libp2p pubsub base class - -# About - - - -A set of components to be extended in order to create a pubsub implementation. - -## Example - -```TypeScript -import { PubSubBaseProtocol } from '@libp2p/pubsub' -import type { PubSubRPC, PublishResult, PubSubRPCMessage, PeerId, Message } from '@libp2p/interface' -import type { Uint8ArrayList } from 'uint8arraylist' - -class MyPubsubImplementation extends PubSubBaseProtocol { - decodeRpc (bytes: Uint8Array | Uint8ArrayList): PubSubRPC { - throw new Error('Not implemented') - } - - encodeRpc (rpc: PubSubRPC): Uint8Array { - throw new Error('Not implemented') - } - - encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - throw new Error('Not implemented') - } - - async publishMessage (sender: PeerId, message: Message): Promise { - throw new Error('Not implemented') - } -} -``` - -# Install - -```console -$ npm i @libp2p/pubsub -``` - -## Browser ` -``` - -# API Docs - -- - -# License - -Licensed under either of - -- Apache 2.0, ([LICENSE-APACHE](https://github.com/libp2p/js-libp2p/blob/main/packages/pubsub/LICENSE-APACHE) / ) -- MIT ([LICENSE-MIT](https://github.com/libp2p/js-libp2p/blob/main/packages/pubsub/LICENSE-MIT) / ) - -# Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/packages/pubsub/package.json b/packages/pubsub/package.json deleted file mode 100644 index c83da020aa..0000000000 --- a/packages/pubsub/package.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "name": "@libp2p/pubsub", - "version": "10.1.18", - "description": "libp2p pubsub base class", - "license": "Apache-2.0 OR MIT", - "homepage": "https://github.com/libp2p/js-libp2p/tree/main/packages/pubsub#readme", - "repository": { - "type": "git", - "url": "git+https://github.com/libp2p/js-libp2p.git" - }, - "bugs": { - "url": "https://github.com/libp2p/js-libp2p/issues" - }, - "publishConfig": { - "access": "public", - "provenance": true - }, - "keywords": [ - "interface", - "libp2p" - ], - "type": "module", - "types": "./dist/src/index.d.ts", - "typesVersions": { - "*": { - "*": [ - "*", - "dist/*", - "dist/src/*", - "dist/src/*/index" - ], - "src/*": [ - "*", - "dist/*", - "dist/src/*", - "dist/src/*/index" - ] - } - }, - "files": [ - "src", - "dist", - "!dist/test", - "!**/*.tsbuildinfo" - ], - "exports": { - ".": { - "types": "./dist/src/index.d.ts", - "import": "./dist/src/index.js" - }, - "./peer-streams": { - "types": "./dist/src/peer-streams.d.ts", - "import": "./dist/src/peer-streams.js" - }, - "./utils": { - "types": "./dist/src/utils.d.ts", - "import": "./dist/src/utils.js" - } - }, - "scripts": { - "clean": "aegir clean", - "lint": "aegir lint", - "dep-check": "aegir dep-check", - "doc-check": "aegir doc-check", - "build": "aegir build", - "generate": "protons test/message/rpc.proto", - "test": "aegir test", - "test:chrome": "aegir test -t browser --cov", - "test:chrome-webworker": "aegir test -t webworker", - "test:firefox": "aegir test -t browser -- --browser firefox", - "test:firefox-webworker": "aegir test -t webworker -- --browser firefox", - "test:node": "aegir test -t node --cov", - "test:electron-main": "aegir test -t electron-main" - }, - "dependencies": { - "@libp2p/crypto": "^5.1.8", - "@libp2p/interface": "^2.11.0", - "@libp2p/interface-internal": "^2.3.19", - "@libp2p/peer-collections": "^6.0.35", - "@libp2p/peer-id": "^5.1.9", - "@libp2p/utils": "^6.7.2", - "it-length-prefixed": "^10.0.1", - "it-pipe": "^3.0.1", - "it-pushable": "^3.2.3", - "main-event": "^1.0.1", - "multiformats": "^13.4.0", - "p-event": "^6.0.1", - "p-queue": "^8.1.0", - "uint8arraylist": "^2.4.8", - "uint8arrays": "^5.1.0" - }, - "devDependencies": { - "@libp2p/logger": "^5.2.0", - "@types/sinon": "^17.0.4", - "aegir": "^47.0.22", - "delay": "^6.0.0", - "it-all": "^3.0.9", - "p-defer": "^4.0.1", - "p-wait-for": "^5.0.2", - "protons": "^7.7.0", - "protons-runtime": "^5.6.0", - "sinon": "^21.0.0", - "sinon-ts": "^2.0.0" - }, - "sideEffects": false -} diff --git a/packages/pubsub/test/instance.spec.ts b/packages/pubsub/test/instance.spec.ts deleted file mode 100644 index dfe4c38d6f..0000000000 --- a/packages/pubsub/test/instance.spec.ts +++ /dev/null @@ -1,55 +0,0 @@ -import { generateKeyPair } from '@libp2p/crypto/keys' -import { defaultLogger } from '@libp2p/logger' -import { peerIdFromPrivateKey } from '@libp2p/peer-id' -import { expect } from 'aegir/chai' -import { PubSubBaseProtocol } from '../src/index.js' -import { MockRegistrar } from './utils/index.js' -import type { PublishResult, PubSubRPC, PubSubRPCMessage } from '@libp2p/interface' -import type { Uint8ArrayList } from 'uint8arraylist' - -class PubsubProtocol extends PubSubBaseProtocol { - decodeRpc (bytes: Uint8Array): PubSubRPC { - throw new Error('Method not implemented.') - } - - encodeRpc (rpc: PubSubRPC): Uint8Array { - throw new Error('Method not implemented.') - } - - decodeMessage (bytes: Uint8Array | Uint8ArrayList): PubSubRPCMessage { - throw new Error('Method not implemented.') - } - - encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - throw new Error('Method not implemented.') - } - - async publishMessage (): Promise { - throw new Error('Method not implemented.') - } -} - -describe('pubsub instance', () => { - it('should throw if no init is provided', () => { - expect(() => { - // @ts-expect-error incorrect constructor args - new PubsubProtocol() // eslint-disable-line no-new - }).to.throw() - }) - - it('should accept valid parameters', async () => { - const privateKey = await generateKeyPair('Ed25519') - const peerId = peerIdFromPrivateKey(privateKey) - - expect(() => { - return new PubsubProtocol({ - peerId, - privateKey, - registrar: new MockRegistrar(), - logger: defaultLogger() - }, { - multicodecs: ['/pubsub/1.0.0'] - }) - }).not.to.throw() - }) -}) diff --git a/packages/pubsub/test/lifecycle.spec.ts b/packages/pubsub/test/lifecycle.spec.ts deleted file mode 100644 index 80fb23bf4b..0000000000 --- a/packages/pubsub/test/lifecycle.spec.ts +++ /dev/null @@ -1,270 +0,0 @@ -import { generateKeyPair } from '@libp2p/crypto/keys' -import { defaultLogger } from '@libp2p/logger' -import { peerIdFromPrivateKey } from '@libp2p/peer-id' -import { expect } from 'aegir/chai' -import sinon from 'sinon' -import { PubSubBaseProtocol } from '../src/index.js' -import { - PubsubImplementation, - connectionPair, - MockRegistrar -} from './utils/index.js' -import type { PeerId, PublishResult, PubSubRPC, PubSubRPCMessage } from '@libp2p/interface' -import type { Registrar } from '@libp2p/interface-internal' -import type { Uint8ArrayList } from 'uint8arraylist' - -class PubsubProtocol extends PubSubBaseProtocol { - decodeRpc (bytes: Uint8Array): PubSubRPC { - throw new Error('Method not implemented.') - } - - encodeRpc (rpc: PubSubRPC): Uint8Array { - throw new Error('Method not implemented.') - } - - decodeMessage (bytes: Uint8Array | Uint8ArrayList): PubSubRPCMessage { - throw new Error('Method not implemented.') - } - - encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - throw new Error('Method not implemented.') - } - - async publishMessage (): Promise { - throw new Error('Method not implemented.') - } -} - -describe('pubsub base life cycle', () => { - describe('should start and stop properly', () => { - let pubsub: PubsubProtocol - let sinonMockRegistrar: Registrar - - beforeEach(async () => { - const privateKey = await generateKeyPair('Ed25519') - const peerId = peerIdFromPrivateKey(privateKey) - - // @ts-expect-error incomplete implementation - sinonMockRegistrar = { - handle: sinon.stub(), - unhandle: sinon.stub(), - register: sinon.stub().returns(`id-${Math.random()}`), - unregister: sinon.stub() - } - - pubsub = new PubsubProtocol({ - peerId, - privateKey, - registrar: sinonMockRegistrar, - logger: defaultLogger() - }, { - multicodecs: ['/pubsub/1.0.0'] - }) - - expect(pubsub.peers.size).to.be.eql(0) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should be able to start and stop', async () => { - await pubsub.start() - expect(sinonMockRegistrar.handle).to.have.property('calledOnce', true) - expect(sinonMockRegistrar.register).to.have.property('calledOnce', true) - - await pubsub.stop() - expect(sinonMockRegistrar.unhandle).to.have.property('calledOnce', true) - expect(sinonMockRegistrar.unregister).to.have.property('calledOnce', true) - }) - - it('starting should not throw if already started', async () => { - await pubsub.start() - await pubsub.start() - expect(sinonMockRegistrar.handle).to.have.property('calledOnce', true) - expect(sinonMockRegistrar.register).to.have.property('calledOnce', true) - - await pubsub.stop() - expect(sinonMockRegistrar.unhandle).to.have.property('calledOnce', true) - expect(sinonMockRegistrar.unregister).to.have.property('calledOnce', true) - }) - - it('stopping should not throw if not started', async () => { - await pubsub.stop() - expect(sinonMockRegistrar.handle).to.have.property('calledOnce', false) - expect(sinonMockRegistrar.unhandle).to.have.property('calledOnce', false) - expect(sinonMockRegistrar.register).to.have.property('calledOnce', false) - expect(sinonMockRegistrar.unregister).to.have.property('calledOnce', false) - }) - }) - - describe('should be able to register two nodes', () => { - const protocol = '/pubsub/1.0.0' - let pubsubA: PubsubImplementation, pubsubB: PubsubImplementation - let peerIdA: PeerId, peerIdB: PeerId - let registrarA: MockRegistrar - let registrarB: MockRegistrar - - // mount pubsub - beforeEach(async () => { - const privateKeyA = await generateKeyPair('Ed25519') - peerIdA = peerIdFromPrivateKey(privateKeyA) - - const privateKeyB = await generateKeyPair('Ed25519') - peerIdB = peerIdFromPrivateKey(privateKeyB) - - registrarA = new MockRegistrar() - registrarB = new MockRegistrar() - - pubsubA = new PubsubImplementation({ - peerId: peerIdA, - privateKey: privateKeyA, - registrar: registrarA, - logger: defaultLogger() - }, { - multicodecs: [protocol] - }) - pubsubB = new PubsubImplementation({ - peerId: peerIdB, - privateKey: privateKeyB, - registrar: registrarB, - logger: defaultLogger() - }, { - multicodecs: [protocol] - }) - }) - - // start pubsub - beforeEach(async () => { - await Promise.all([ - pubsubA.start(), - pubsubB.start() - ]) - - expect(registrarA.getHandler(protocol)).to.be.ok() - expect(registrarB.getHandler(protocol)).to.be.ok() - }) - - afterEach(async () => { - sinon.restore() - - await Promise.all([ - pubsubA.stop(), - pubsubB.stop() - ]) - }) - - it('should handle onConnect as expected', async () => { - const topologyA = registrarA.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) - - if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) - } - - const [c0, c1] = await connectionPair(peerIdA, peerIdB) - - // Notify peers of connection - topologyA.onConnect?.(peerIdB, c0) - await handlerB.handler(await c1.newStream([protocol]), c1) - - expect(pubsubA.peers.size).to.equal(1) - expect(pubsubB.peers.size).to.equal(1) - }) - - it('should use the latest connection if onConnect is called more than once', async () => { - const topologyA = registrarA.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) - - if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) - } - - // Notify peers of connection - const [c0, c1] = await connectionPair(peerIdA, peerIdB) - const [c2] = await connectionPair(peerIdA, peerIdB) - - sinon.spy(c0, 'newStream') - - topologyA.onConnect?.(peerIdB, c0) - handlerB.handler(await c1.newStream(protocol), c1) - expect(c0.newStream).to.have.property('callCount', 1) - - // @ts-expect-error _removePeer is a protected method - sinon.spy(pubsubA, '_removePeer') - - sinon.spy(c2, 'newStream') - - await topologyA?.onConnect?.(peerIdB, c2) - // newStream invocation takes place in a resolved promise - expect(c2.newStream).to.have.property('callCount', 1) - - // @ts-expect-error _removePeer is a protected method - expect(pubsubA._removePeer).to.have.property('callCount', 0) - - // Verify the first stream was closed - // @ts-expect-error .returnValues is a sinon property - const { stream: firstStream } = await c0.newStream.returnValues[0] - try { - await firstStream.sink(['test']) - } catch (err: any) { - expect(err).to.exist() - return - } - expect.fail('original stream should have ended') - }) - - it('should handle newStream errors in onConnect', async () => { - const topologyA = registrarA.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) - - if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) - } - - // Notify peers of connection - const [c0, c1] = await connectionPair(peerIdA, peerIdB) - const error = new Error('new stream error') - sinon.stub(c0, 'newStream').throws(error) - - topologyA.onConnect?.(peerIdB, c0) - handlerB.handler(await c1.newStream(protocol), c1) - - expect(c0.newStream).to.have.property('callCount', 1) - }) - - it('should handle onDisconnect as expected', async () => { - const topologyA = registrarA.getTopologies(protocol)[0] - const topologyB = registrarB.getTopologies(protocol)[0] - const handlerB = registrarB.getHandler(protocol) - - if (topologyA == null || handlerB == null) { - throw new Error(`No handler registered for ${protocol}`) - } - - // Notify peers of connection - const [c0, c1] = await connectionPair(peerIdA, peerIdB) - - topologyA.onConnect?.(peerIdB, c0) - await handlerB.handler(await c1.newStream(protocol), c1) - - // Notify peers of disconnect - topologyA?.onDisconnect?.(peerIdB) - topologyB?.onDisconnect?.(peerIdA) - - expect(pubsubA.peers.size).to.equal(0) - expect(pubsubB.peers.size).to.equal(0) - }) - - it('should handle onDisconnect for unknown peers', () => { - const topologyA = registrarA.getTopologies(protocol)[0] - - expect(pubsubA.peers.size).to.be.eql(0) - - // Notice peers of disconnect - topologyA?.onDisconnect?.(peerIdB) - - expect(pubsubA.peers.size).to.equal(0) - }) - }) -}) diff --git a/packages/pubsub/test/message/rpc.proto b/packages/pubsub/test/message/rpc.proto deleted file mode 100644 index 5e32c35a5c..0000000000 --- a/packages/pubsub/test/message/rpc.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -message RPC { - repeated SubOpts subscriptions = 1; - repeated Message messages = 2; - optional ControlMessage control = 3; - - message SubOpts { - optional bool subscribe = 1; // subscribe or unsubcribe - optional string topic = 2; - } - - message Message { - optional bytes from = 1; - optional bytes data = 2; - optional bytes seqno = 3; - optional string topic = 4; - optional bytes signature = 5; - optional bytes key = 6; - } -} - -message ControlMessage { - repeated ControlIHave ihave = 1; - repeated ControlIWant iwant = 2; - repeated ControlGraft graft = 3; - repeated ControlPrune prune = 4; -} - -message ControlIHave { - optional string topic = 1; - repeated bytes messageIDs = 2; -} - -message ControlIWant { - repeated bytes messageIDs = 1; -} - -message ControlGraft { - optional string topic = 1; -} - -message ControlPrune { - optional string topic = 1; - repeated PeerInfo peers = 2; - optional uint64 backoff = 3; -} - -message PeerInfo { - optional bytes peerID = 1; - optional bytes signedPeerRecord = 2; -} diff --git a/packages/pubsub/test/message/rpc.ts b/packages/pubsub/test/message/rpc.ts deleted file mode 100644 index 8de6cab185..0000000000 --- a/packages/pubsub/test/message/rpc.ts +++ /dev/null @@ -1,761 +0,0 @@ -/* eslint-disable complexity */ - -import { decodeMessage, encodeMessage, MaxLengthError, message } from 'protons-runtime' -import type { Codec, DecodeOptions } from 'protons-runtime' -import type { Uint8ArrayList } from 'uint8arraylist' - -export interface RPC { - subscriptions: RPC.SubOpts[] - messages: RPC.Message[] - control?: ControlMessage -} - -export namespace RPC { - export interface SubOpts { - subscribe?: boolean - topic?: string - } - - export namespace SubOpts { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.subscribe != null) { - w.uint32(8) - w.bool(obj.subscribe) - } - - if (obj.topic != null) { - w.uint32(18) - w.string(obj.topic) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = {} - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.subscribe = reader.bool() - break - } - case 2: { - obj.topic = reader.string() - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, SubOpts.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): SubOpts => { - return decodeMessage(buf, SubOpts.codec(), opts) - } - } - - export interface Message { - from?: Uint8Array - data?: Uint8Array - seqno?: Uint8Array - topic?: string - signature?: Uint8Array - key?: Uint8Array - } - - export namespace Message { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.from != null) { - w.uint32(10) - w.bytes(obj.from) - } - - if (obj.data != null) { - w.uint32(18) - w.bytes(obj.data) - } - - if (obj.seqno != null) { - w.uint32(26) - w.bytes(obj.seqno) - } - - if (obj.topic != null) { - w.uint32(34) - w.string(obj.topic) - } - - if (obj.signature != null) { - w.uint32(42) - w.bytes(obj.signature) - } - - if (obj.key != null) { - w.uint32(50) - w.bytes(obj.key) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = {} - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.from = reader.bytes() - break - } - case 2: { - obj.data = reader.bytes() - break - } - case 3: { - obj.seqno = reader.bytes() - break - } - case 4: { - obj.topic = reader.string() - break - } - case 5: { - obj.signature = reader.bytes() - break - } - case 6: { - obj.key = reader.bytes() - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, Message.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): Message => { - return decodeMessage(buf, Message.codec(), opts) - } - } - - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.subscriptions != null) { - for (const value of obj.subscriptions) { - w.uint32(10) - RPC.SubOpts.codec().encode(value, w) - } - } - - if (obj.messages != null) { - for (const value of obj.messages) { - w.uint32(18) - RPC.Message.codec().encode(value, w) - } - } - - if (obj.control != null) { - w.uint32(26) - ControlMessage.codec().encode(obj.control, w) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = { - subscriptions: [], - messages: [] - } - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - if (opts.limits?.subscriptions != null && obj.subscriptions.length === opts.limits.subscriptions) { - throw new MaxLengthError('Decode error - map field "subscriptions" had too many elements') - } - - obj.subscriptions.push(RPC.SubOpts.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.subscriptions$ - })) - break - } - case 2: { - if (opts.limits?.messages != null && obj.messages.length === opts.limits.messages) { - throw new MaxLengthError('Decode error - map field "messages" had too many elements') - } - - obj.messages.push(RPC.Message.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.messages$ - })) - break - } - case 3: { - obj.control = ControlMessage.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.control - }) - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, RPC.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): RPC => { - return decodeMessage(buf, RPC.codec(), opts) - } -} - -export interface ControlMessage { - ihave: ControlIHave[] - iwant: ControlIWant[] - graft: ControlGraft[] - prune: ControlPrune[] -} - -export namespace ControlMessage { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.ihave != null) { - for (const value of obj.ihave) { - w.uint32(10) - ControlIHave.codec().encode(value, w) - } - } - - if (obj.iwant != null) { - for (const value of obj.iwant) { - w.uint32(18) - ControlIWant.codec().encode(value, w) - } - } - - if (obj.graft != null) { - for (const value of obj.graft) { - w.uint32(26) - ControlGraft.codec().encode(value, w) - } - } - - if (obj.prune != null) { - for (const value of obj.prune) { - w.uint32(34) - ControlPrune.codec().encode(value, w) - } - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = { - ihave: [], - iwant: [], - graft: [], - prune: [] - } - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - if (opts.limits?.ihave != null && obj.ihave.length === opts.limits.ihave) { - throw new MaxLengthError('Decode error - map field "ihave" had too many elements') - } - - obj.ihave.push(ControlIHave.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.ihave$ - })) - break - } - case 2: { - if (opts.limits?.iwant != null && obj.iwant.length === opts.limits.iwant) { - throw new MaxLengthError('Decode error - map field "iwant" had too many elements') - } - - obj.iwant.push(ControlIWant.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.iwant$ - })) - break - } - case 3: { - if (opts.limits?.graft != null && obj.graft.length === opts.limits.graft) { - throw new MaxLengthError('Decode error - map field "graft" had too many elements') - } - - obj.graft.push(ControlGraft.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.graft$ - })) - break - } - case 4: { - if (opts.limits?.prune != null && obj.prune.length === opts.limits.prune) { - throw new MaxLengthError('Decode error - map field "prune" had too many elements') - } - - obj.prune.push(ControlPrune.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.prune$ - })) - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, ControlMessage.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlMessage => { - return decodeMessage(buf, ControlMessage.codec(), opts) - } -} - -export interface ControlIHave { - topic?: string - messageIDs: Uint8Array[] -} - -export namespace ControlIHave { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.topic != null) { - w.uint32(10) - w.string(obj.topic) - } - - if (obj.messageIDs != null) { - for (const value of obj.messageIDs) { - w.uint32(18) - w.bytes(value) - } - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = { - messageIDs: [] - } - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.topic = reader.string() - break - } - case 2: { - if (opts.limits?.messageIDs != null && obj.messageIDs.length === opts.limits.messageIDs) { - throw new MaxLengthError('Decode error - map field "messageIDs" had too many elements') - } - - obj.messageIDs.push(reader.bytes()) - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, ControlIHave.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlIHave => { - return decodeMessage(buf, ControlIHave.codec(), opts) - } -} - -export interface ControlIWant { - messageIDs: Uint8Array[] -} - -export namespace ControlIWant { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.messageIDs != null) { - for (const value of obj.messageIDs) { - w.uint32(10) - w.bytes(value) - } - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = { - messageIDs: [] - } - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - if (opts.limits?.messageIDs != null && obj.messageIDs.length === opts.limits.messageIDs) { - throw new MaxLengthError('Decode error - map field "messageIDs" had too many elements') - } - - obj.messageIDs.push(reader.bytes()) - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, ControlIWant.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlIWant => { - return decodeMessage(buf, ControlIWant.codec(), opts) - } -} - -export interface ControlGraft { - topic?: string -} - -export namespace ControlGraft { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.topic != null) { - w.uint32(10) - w.string(obj.topic) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = {} - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.topic = reader.string() - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, ControlGraft.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlGraft => { - return decodeMessage(buf, ControlGraft.codec(), opts) - } -} - -export interface ControlPrune { - topic?: string - peers: PeerInfo[] - backoff?: bigint -} - -export namespace ControlPrune { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.topic != null) { - w.uint32(10) - w.string(obj.topic) - } - - if (obj.peers != null) { - for (const value of obj.peers) { - w.uint32(18) - PeerInfo.codec().encode(value, w) - } - } - - if (obj.backoff != null) { - w.uint32(24) - w.uint64(obj.backoff) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = { - peers: [] - } - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.topic = reader.string() - break - } - case 2: { - if (opts.limits?.peers != null && obj.peers.length === opts.limits.peers) { - throw new MaxLengthError('Decode error - map field "peers" had too many elements') - } - - obj.peers.push(PeerInfo.codec().decode(reader, reader.uint32(), { - limits: opts.limits?.peers$ - })) - break - } - case 3: { - obj.backoff = reader.uint64() - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, ControlPrune.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): ControlPrune => { - return decodeMessage(buf, ControlPrune.codec(), opts) - } -} - -export interface PeerInfo { - peerID?: Uint8Array - signedPeerRecord?: Uint8Array -} - -export namespace PeerInfo { - let _codec: Codec - - export const codec = (): Codec => { - if (_codec == null) { - _codec = message((obj, w, opts = {}) => { - if (opts.lengthDelimited !== false) { - w.fork() - } - - if (obj.peerID != null) { - w.uint32(10) - w.bytes(obj.peerID) - } - - if (obj.signedPeerRecord != null) { - w.uint32(18) - w.bytes(obj.signedPeerRecord) - } - - if (opts.lengthDelimited !== false) { - w.ldelim() - } - }, (reader, length, opts = {}) => { - const obj: any = {} - - const end = length == null ? reader.len : reader.pos + length - - while (reader.pos < end) { - const tag = reader.uint32() - - switch (tag >>> 3) { - case 1: { - obj.peerID = reader.bytes() - break - } - case 2: { - obj.signedPeerRecord = reader.bytes() - break - } - default: { - reader.skipType(tag & 7) - break - } - } - } - - return obj - }) - } - - return _codec - } - - export const encode = (obj: Partial): Uint8Array => { - return encodeMessage(obj, PeerInfo.codec()) - } - - export const decode = (buf: Uint8Array | Uint8ArrayList, opts?: DecodeOptions): PeerInfo => { - return decodeMessage(buf, PeerInfo.codec(), opts) - } -} diff --git a/packages/pubsub/test/utils/index.ts b/packages/pubsub/test/utils/index.ts deleted file mode 100644 index 9419556f73..0000000000 --- a/packages/pubsub/test/utils/index.ts +++ /dev/null @@ -1,154 +0,0 @@ -import { streamPair } from '@libp2p/utils' -import { stubInterface } from 'sinon-ts' -import { PubSubBaseProtocol } from '../../src/index.js' -import { RPC } from '../message/rpc.js' -import type { Connection, PublishResult, PubSubRPC, PubSubRPCMessage, Topology, StreamHandler, StreamHandlerRecord, PeerId, StreamMiddleware } from '@libp2p/interface' -import type { Registrar } from '@libp2p/interface-internal' - -export class PubsubImplementation extends PubSubBaseProtocol { - async publishMessage (): Promise { - return { - recipients: [] - } - } - - decodeRpc (bytes: Uint8Array): PubSubRPC { - return RPC.decode(bytes) - } - - encodeRpc (rpc: PubSubRPC): Uint8Array { - return RPC.encode(rpc) - } - - decodeMessage (bytes: Uint8Array): PubSubRPCMessage { - return RPC.Message.decode(bytes) - } - - encodeMessage (rpc: PubSubRPCMessage): Uint8Array { - return RPC.Message.encode(rpc) - } -} - -export class MockRegistrar implements Registrar { - private readonly topologies = new Map() - private readonly handlers = new Map() - private readonly middleware = new Map() - - getProtocols (): string[] { - const protocols = new Set() - - for (const topology of this.topologies.values()) { - topology.protocols.forEach(protocol => protocols.add(protocol)) - } - - for (const protocol of this.handlers.keys()) { - protocols.add(protocol) - } - - return Array.from(protocols).sort() - } - - async handle (protocols: string | string[], handler: StreamHandler): Promise { - const protocolList = Array.isArray(protocols) ? protocols : [protocols] - - for (const protocol of protocolList) { - if (this.handlers.has(protocol)) { - throw new Error(`Handler already registered for protocol ${protocol}`) - } - - this.handlers.set(protocol, handler) - } - } - - async unhandle (protocols: string | string[]): Promise { - const protocolList = Array.isArray(protocols) ? protocols : [protocols] - - protocolList.forEach(protocol => { - this.handlers.delete(protocol) - }) - } - - getHandler (protocol: string): StreamHandlerRecord { - const handler = this.handlers.get(protocol) - - if (handler == null) { - throw new Error(`No handler registered for protocol ${protocol}`) - } - - return { handler, options: {} } - } - - async register (protocols: string | string[], topology: Topology): Promise { - if (!Array.isArray(protocols)) { - protocols = [protocols] - } - - const id = `topology-id-${Math.random()}` - - this.topologies.set(id, { - topology, - protocols - }) - - return id - } - - unregister (id: string | string[]): void { - if (!Array.isArray(id)) { - id = [id] - } - - id.forEach(id => this.topologies.delete(id)) - } - - getTopologies (protocol: string): Topology[] { - const output: Topology[] = [] - - for (const { topology, protocols } of this.topologies.values()) { - if (protocols.includes(protocol)) { - output.push(topology) - } - } - - if (output.length > 0) { - return output - } - - throw new Error(`No topologies registered for protocol ${protocol}`) - } - - use (protocol: string, middleware: StreamMiddleware[]): void { - this.middleware.set(protocol, middleware) - } - - unuse (protocol: string): void { - this.middleware.delete(protocol) - } - - getMiddleware (protocol: string): StreamMiddleware[] { - return this.middleware.get(protocol) ?? [] - } -} - -/** - * Returns two connections: - * - * 1. peerA -> peerB - * 2. peerB -> peerA - */ -export const connectionPair = async (peerA: PeerId, peerB: PeerId): Promise<[Connection, Connection]> => { - const [d0, d1] = await streamPair() - - return [ - stubInterface({ - newStream: async () => d0, - streams: [], - remotePeer: peerB - }), - stubInterface({ - newStream: async () => d1, - streams: [], - remotePeer: peerA - }) - ] -} diff --git a/packages/pubsub/tsconfig.json b/packages/pubsub/tsconfig.json deleted file mode 100644 index 81a288f81b..0000000000 --- a/packages/pubsub/tsconfig.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "extends": "aegir/src/config/tsconfig.aegir.json", - "compilerOptions": { - "outDir": "dist" - }, - "include": [ - "src", - "test" - ], - "exclude": [ - "test/message/rpc.js" - ], - "references": [ - { - "path": "../crypto" - }, - { - "path": "../interface" - }, - { - "path": "../interface-internal" - }, - { - "path": "../logger" - }, - { - "path": "../peer-collections" - }, - { - "path": "../peer-id" - }, - { - "path": "../utils" - } - ] -} diff --git a/packages/pubsub/typedoc.json b/packages/pubsub/typedoc.json deleted file mode 100644 index 2455039d81..0000000000 --- a/packages/pubsub/typedoc.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "readme": "none", - "entryPoints": [ - "./src/index.ts", - "./src/peer-streams.ts", - "./src/utils.ts" - ] -}