diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 68b1f99cc2a..dbd46a38f07 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -7,7 +7,7 @@ - Update `Transport::dial` function signature with a `DialOpts` param and remove `Transport::dial_as_listener`: - `DialOpts` struct contains `PortUse` and `Endpoint`, - - `PortUse` allows controlling port allocation of new connections (defaults to `PortUse::Reuse`) - + - `PortUse` allows controling port allocation of new connections (defaults to `PortUse::Reuse`) - - Add `port_use` field to `ConnectedPoint` - Set `endpoint` field in `DialOpts` to `Endpoint::Listener` to dial as a listener - Remove `Transport::address_translation` and relocate functionality to `libp2p_swarm` diff --git a/core/src/connection.rs b/core/src/connection.rs index bb6639842c9..dcd75b6eef0 100644 --- a/core/src/connection.rs +++ b/core/src/connection.rs @@ -83,12 +83,14 @@ pub enum ConnectedPoint { /// connection as a dialer and one peer dial the other and upgrade the /// connection _as a listener_ overriding its role. role_override: Endpoint, - /// Whether the port for the outgoing connection was reused from a listener - /// or a new port was allocated. This is useful for address translation. + /// Whether the port for the outgoing connection was reused from a + /// listener or a new port was allocated. This is useful for + /// address translation. /// - /// The port use is implemented on a best-effort basis. It is not guaranteed - /// that [`PortUse::Reuse`] actually reused a port. A good example is the case - /// where there is no listener available to reuse a port from. + /// The port use is implemented on a best-effort basis. It is not + /// guaranteed that [`PortUse::Reuse`] actually reused a port. A + /// good example is the case where there is no listener + /// available to reuse a port from. port_use: PortUse, }, /// We received the node. @@ -153,10 +155,11 @@ impl ConnectedPoint { /// Returns the address of the remote stored in this struct. /// - /// For `Dialer`, this returns `address`. For `Listener`, this returns `send_back_addr`. + /// For `Dialer`, this returns `address`. For `Listener`, this returns + /// `send_back_addr`. /// - /// Note that the remote node might not be listening on this address and hence the address might - /// not be usable to establish new connections. + /// Note that the remote node might not be listening on this address and + /// hence the address might not be usable to establish new connections. pub fn get_remote_address(&self) -> &Multiaddr { match self { ConnectedPoint::Dialer { address, .. } => address, @@ -166,7 +169,8 @@ impl ConnectedPoint { /// Modifies the address of the remote stored in this struct. /// - /// For `Dialer`, this modifies `address`. For `Listener`, this modifies `send_back_addr`. + /// For `Dialer`, this modifies `address`. For `Listener`, this modifies + /// `send_back_addr`. pub fn set_remote_address(&mut self, new_address: Multiaddr) { match self { ConnectedPoint::Dialer { address, .. } => *address = new_address, diff --git a/core/src/either.rs b/core/src/either.rs index 2593174290c..72199847487 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -18,17 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::muxing::StreamMuxerEvent; -use crate::transport::DialOpts; -use crate::{ - muxing::StreamMuxer, - transport::{ListenerId, Transport, TransportError, TransportEvent}, - Multiaddr, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use pin_project::pin_project; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; impl StreamMuxer for future::Either where @@ -88,7 +91,8 @@ where } } -/// Implements `Future` and dispatches all method calls to either `First` or `Second`. +/// Implements `Future` and dispatches all method calls to either `First` or +/// `Second`. #[pin_project(project = EitherFutureProj)] #[derive(Debug, Copy, Clone)] #[must_use = "futures do nothing unless polled"] diff --git a/core/src/lib.rs b/core/src/lib.rs index ab5afbedae4..911b3776cc9 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -28,8 +28,8 @@ //! to a remote and can subdivide this connection into multiple substreams. //! See the [`muxing`] module. //! - The [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] traits -//! define how to upgrade each individual substream to use a protocol. -//! See the `upgrade` module. +//! define how to upgrade each individual substream to use a protocol. See the +//! `upgrade` module. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -37,7 +37,8 @@ mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); pub use self::{ - envelope_proto::*, peer_record_proto::mod_PeerRecord::*, peer_record_proto::PeerRecord, + envelope_proto::*, + peer_record_proto::{mod_PeerRecord::*, PeerRecord}, }; } diff --git a/core/src/muxing.rs b/core/src/muxing.rs index 477e1608073..6a711e72e22 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -20,53 +20,65 @@ //! Muxing is the process of splitting a connection into multiple substreams. //! -//! The main item of this module is the `StreamMuxer` trait. An implementation of `StreamMuxer` -//! has ownership of a connection, lets you open and close substreams. +//! The main item of this module is the `StreamMuxer` trait. An implementation +//! of `StreamMuxer` has ownership of a connection, lets you open and close +//! substreams. //! -//! > **Note**: You normally don't need to use the methods of the `StreamMuxer` directly, as this -//! > is managed by the library's internals. +//! > **Note**: You normally don't need to use the methods of the `StreamMuxer` +//! > directly, as this +//! > is managed by the library's internals. //! -//! Each substream of a connection is an isolated stream of data. All the substreams are muxed -//! together so that the data read from or written to each substream doesn't influence the other -//! substreams. +//! Each substream of a connection is an isolated stream of data. All the +//! substreams are muxed together so that the data read from or written to each +//! substream doesn't influence the other substreams. //! -//! In the context of libp2p, each substream can use a different protocol. Contrary to opening a -//! connection, opening a substream is almost free in terms of resources. This means that you -//! shouldn't hesitate to rapidly open and close substreams, and to design protocols that don't -//! require maintaining long-lived channels of communication. +//! In the context of libp2p, each substream can use a different protocol. +//! Contrary to opening a connection, opening a substream is almost free in +//! terms of resources. This means that you shouldn't hesitate to rapidly open +//! and close substreams, and to design protocols that don't require maintaining +//! long-lived channels of communication. //! -//! > **Example**: The Kademlia protocol opens a new substream for each request it wants to -//! > perform. Multiple requests can be performed simultaneously by opening multiple -//! > substreams, without having to worry about associating responses with the -//! > right request. +//! > **Example**: The Kademlia protocol opens a new substream for each request +//! > it wants to +//! > perform. Multiple requests can be performed simultaneously by opening +//! > multiple +//! > substreams, without having to worry about associating responses with the +//! > right request. //! //! # Implementing a muxing protocol //! -//! In order to implement a muxing protocol, create an object that implements the `UpgradeInfo`, -//! `InboundUpgrade` and `OutboundUpgrade` traits. See the `upgrade` module for more information. -//! The `Output` associated type of the `InboundUpgrade` and `OutboundUpgrade` traits should be -//! identical, and should be an object that implements the `StreamMuxer` trait. +//! In order to implement a muxing protocol, create an object that implements +//! the `UpgradeInfo`, `InboundUpgrade` and `OutboundUpgrade` traits. See the +//! `upgrade` module for more information. The `Output` associated type of the +//! `InboundUpgrade` and `OutboundUpgrade` traits should be identical, and +//! should be an object that implements the `StreamMuxer` trait. //! -//! The upgrade process will take ownership of the connection, which makes it possible for the -//! implementation of `StreamMuxer` to control everything that happens on the wire. +//! The upgrade process will take ownership of the connection, which makes it +//! possible for the implementation of `StreamMuxer` to control everything that +//! happens on the wire. + +use std::{future::Future, pin::Pin}; -use futures::{task::Context, task::Poll, AsyncRead, AsyncWrite}; +use futures::{ + task::{Context, Poll}, + AsyncRead, + AsyncWrite, +}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -pub use self::boxed::StreamMuxerBox; -pub use self::boxed::SubstreamBox; +pub use self::boxed::{StreamMuxerBox, SubstreamBox}; mod boxed; /// Provides multiplexing for a connection by allowing users to open substreams. /// -/// A substream created by a [`StreamMuxer`] is a type that implements [`AsyncRead`] and [`AsyncWrite`]. -/// The [`StreamMuxer`] itself is modelled closely after [`AsyncWrite`]. It features `poll`-style -/// functions that allow the implementation to make progress on various tasks. +/// A substream created by a [`StreamMuxer`] is a type that implements +/// [`AsyncRead`] and [`AsyncWrite`]. The [`StreamMuxer`] itself is modelled +/// closely after [`AsyncWrite`]. It features `poll`-style functions that allow +/// the implementation to make progress on various tasks. pub trait StreamMuxer { - /// Type of the object that represents the raw substream where data can be read and written. + /// Type of the object that represents the raw substream where data can be + /// read and written. type Substream: AsyncRead + AsyncWrite; /// Error type of the muxer @@ -74,9 +86,10 @@ pub trait StreamMuxer { /// Poll for new inbound substreams. /// - /// This function should be called whenever callers are ready to accept more inbound streams. In - /// other words, callers may exercise back-pressure on incoming streams by not calling this - /// function if a certain limit is hit. + /// This function should be called whenever callers are ready to accept more + /// inbound streams. In other words, callers may exercise back-pressure + /// on incoming streams by not calling this function if a certain limit + /// is hit. fn poll_inbound( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -90,20 +103,23 @@ pub trait StreamMuxer { /// Poll to close this [`StreamMuxer`]. /// - /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become useless and may be safely - /// dropped. + /// After this has returned `Poll::Ready(Ok(()))`, the muxer has become + /// useless and may be safely dropped. /// - /// > **Note**: You are encouraged to call this method and wait for it to return `Ready`, so - /// > that the remote is properly informed of the shutdown. However, apart from - /// > properly informing the remote, there is no difference between this and - /// > immediately dropping the muxer. + /// > **Note**: You are encouraged to call this method and wait for it to + /// > return `Ready`, so + /// > that the remote is properly informed of the shutdown. However, apart + /// > from + /// > properly informing the remote, there is no difference between this and + /// > immediately dropping the muxer. fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; /// Poll to allow the underlying connection to make progress. /// - /// In contrast to all other `poll`-functions on [`StreamMuxer`], this function MUST be called - /// unconditionally. Because it will be called regardless, this function can be used by - /// implementations to return events about the underlying connection that the caller MUST deal + /// In contrast to all other `poll`-functions on [`StreamMuxer`], this + /// function MUST be called unconditionally. Because it will be called + /// regardless, this function can be used by implementations to return + /// events about the underlying connection that the caller MUST deal /// with. fn poll( self: Pin<&mut Self>, @@ -120,7 +136,8 @@ pub enum StreamMuxerEvent { /// Extension trait for [`StreamMuxer`]. pub trait StreamMuxerExt: StreamMuxer + Sized { - /// Convenience function for calling [`StreamMuxer::poll_inbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_inbound`] for + /// [`StreamMuxer`]s that are `Unpin`. fn poll_inbound_unpin( &mut self, cx: &mut Context<'_>, @@ -131,7 +148,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_inbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll_outbound`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_outbound`] for + /// [`StreamMuxer`]s that are `Unpin`. fn poll_outbound_unpin( &mut self, cx: &mut Context<'_>, @@ -142,7 +160,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll_outbound(cx) } - /// Convenience function for calling [`StreamMuxer::poll`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll`] for + /// [`StreamMuxer`]s that are `Unpin`. fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, @@ -150,7 +169,8 @@ pub trait StreamMuxerExt: StreamMuxer + Sized { Pin::new(self).poll(cx) } - /// Convenience function for calling [`StreamMuxer::poll_close`] for [`StreamMuxer`]s that are `Unpin`. + /// Convenience function for calling [`StreamMuxer::poll_close`] for + /// [`StreamMuxer`]s that are `Unpin`. fn poll_close_unpin(&mut self, cx: &mut Context<'_>) -> Poll> where Self: Unpin, diff --git a/core/src/muxing/boxed.rs b/core/src/muxing/boxed.rs index e909fb9fbf1..f2a6c2b2cfa 100644 --- a/core/src/muxing/boxed.rs +++ b/core/src/muxing/boxed.rs @@ -1,12 +1,16 @@ -use crate::muxing::{StreamMuxer, StreamMuxerEvent}; +use std::{ + error::Error, + fmt, + io, + io::{IoSlice, IoSliceMut}, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use pin_project::pin_project; -use std::error::Error; -use std::fmt; -use std::io; -use std::io::{IoSlice, IoSliceMut}; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use crate::muxing::{StreamMuxer, StreamMuxerEvent}; /// Abstract `StreamMuxer`. pub struct StreamMuxerBox { @@ -21,8 +25,8 @@ impl fmt::Debug for StreamMuxerBox { /// Abstract type for asynchronous reading and writing. /// -/// A [`SubstreamBox`] erases the concrete type it is given and only retains its `AsyncRead` -/// and `AsyncWrite` capabilities. +/// A [`SubstreamBox`] erases the concrete type it is given and only retains its +/// `AsyncRead` and `AsyncWrite` capabilities. pub struct SubstreamBox(Pin>); #[pin_project] @@ -139,7 +143,8 @@ impl StreamMuxer for StreamMuxerBox { } impl SubstreamBox { - /// Construct a new [`SubstreamBox`] from something that implements [`AsyncRead`] and [`AsyncWrite`]. + /// Construct a new [`SubstreamBox`] from something that implements + /// [`AsyncRead`] and [`AsyncWrite`]. pub fn new(stream: S) -> Self { Self(Box::pin(stream)) } @@ -155,7 +160,8 @@ impl fmt::Debug for SubstreamBox { trait AsyncReadWrite: AsyncRead + AsyncWrite { /// Helper function to capture the erased inner type. /// - /// Used to make the [`Debug`] implementation of [`SubstreamBox`] more useful. + /// Used to make the [`Debug`] implementation of [`SubstreamBox`] more + /// useful. fn type_name(&self) -> &'static str; } diff --git a/core/src/peer_record.rs b/core/src/peer_record.rs index ac488338cc6..1902ed2e8ca 100644 --- a/core/src/peer_record.rs +++ b/core/src/peer_record.rs @@ -1,18 +1,16 @@ -use crate::signed_envelope::SignedEnvelope; -use crate::{proto, signed_envelope, DecodeError, Multiaddr}; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; -use libp2p_identity::SigningError; +use libp2p_identity::{Keypair, PeerId, SigningError}; use quick_protobuf::{BytesReader, Writer}; use web_time::SystemTime; +use crate::{proto, signed_envelope, signed_envelope::SignedEnvelope, DecodeError, Multiaddr}; + const PAYLOAD_TYPE: &str = "/libp2p/routing-state-record"; const DOMAIN_SEP: &str = "libp2p-routing-state"; /// Represents a peer routing record. /// -/// Peer records are designed to be distributable and carry a signature by being wrapped in a signed envelope. -/// For more information see RFC0003 of the libp2p specifications: +/// Peer records are designed to be distributable and carry a signature by being +/// wrapped in a signed envelope. For more information see RFC0003 of the libp2p specifications: #[derive(Debug, PartialEq, Eq, Clone)] pub struct PeerRecord { peer_id: PeerId, @@ -21,14 +19,16 @@ pub struct PeerRecord { /// A signed envelope representing this [`PeerRecord`]. /// - /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this is the original instance. + /// If this [`PeerRecord`] was constructed from a [`SignedEnvelope`], this + /// is the original instance. envelope: SignedEnvelope, } impl PeerRecord { /// Attempt to re-construct a [`PeerRecord`] from a [`SignedEnvelope`]. /// - /// If this function succeeds, the [`SignedEnvelope`] contained a peer record with a valid signature and can hence be considered authenticated. + /// If this function succeeds, the [`SignedEnvelope`] contained a peer + /// record with a valid signature and can hence be considered authenticated. pub fn from_signed_envelope(envelope: SignedEnvelope) -> Result { use quick_protobuf::MessageRead; @@ -58,9 +58,12 @@ impl PeerRecord { }) } - /// Construct a new [`PeerRecord`] by authenticating the provided addresses with the given key. + /// Construct a new [`PeerRecord`] by authenticating the provided addresses + /// with the given key. /// - /// This is the same key that is used for authenticating every libp2p connection of your application, i.e. what you use when setting up your [`crate::transport::Transport`]. + /// This is the same key that is used for authenticating every libp2p + /// connection of your application, i.e. what you use when setting up your + /// [`crate::transport::Transport`]. pub fn new(key: &Keypair, addresses: Vec) -> Result { use quick_protobuf::MessageWrite; diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 19a0cac4f82..4d1ea21e25a 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -1,11 +1,13 @@ -use crate::{proto, DecodeError}; -use libp2p_identity::SigningError; -use libp2p_identity::{Keypair, PublicKey}; -use quick_protobuf::{BytesReader, Writer}; use std::fmt; + +use libp2p_identity::{Keypair, PublicKey, SigningError}; +use quick_protobuf::{BytesReader, Writer}; use unsigned_varint::encode::usize_buffer; -/// A signed envelope contains an arbitrary byte string payload, a signature of the payload, and the public key that can be used to verify the signature. +use crate::{proto, DecodeError}; + +/// A signed envelope contains an arbitrary byte string payload, a signature of +/// the payload, and the public key that can be used to verify the signature. /// /// For more details see libp2p RFC0002: #[derive(Debug, Clone, PartialEq, Eq)] @@ -36,7 +38,8 @@ impl SignedEnvelope { }) } - /// Verify this [`SignedEnvelope`] against the provided domain-separation string. + /// Verify this [`SignedEnvelope`] against the provided domain-separation + /// string. #[must_use] pub fn verify(&self, domain_separation: String) -> bool { let buffer = signature_payload(domain_separation, &self.payload_type, &self.payload); @@ -46,8 +49,10 @@ impl SignedEnvelope { /// Extract the payload and signing key of this [`SignedEnvelope`]. /// - /// You must provide the correct domain-separation string and expected payload type in order to get the payload. - /// This guards against accidental mis-use of the payload where the signature was created for a different purpose or payload type. + /// You must provide the correct domain-separation string and expected + /// payload type in order to get the payload. This guards against + /// accidental mis-use of the payload where the signature was created for a + /// different purpose or payload type. /// /// It is the caller's responsibility to check that the signing key is what /// is expected. For example, checking that the signing key is from a @@ -71,7 +76,8 @@ impl SignedEnvelope { Ok((&self.payload, &self.key)) } - /// Encode this [`SignedEnvelope`] using the protobuf encoding specified in the RFC. + /// Encode this [`SignedEnvelope`] using the protobuf encoding specified in + /// the RFC. pub fn into_protobuf_encoding(self) -> Vec { use quick_protobuf::MessageWrite; @@ -92,7 +98,8 @@ impl SignedEnvelope { buf } - /// Decode a [`SignedEnvelope`] using the protobuf encoding specified in the RFC. + /// Decode a [`SignedEnvelope`] using the protobuf encoding specified in the + /// RFC. pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; @@ -139,16 +146,19 @@ fn signature_payload(domain_separation: String, payload_type: &[u8], payload: &[ buffer } -/// Errors that occur whilst decoding a [`SignedEnvelope`] from its byte representation. +/// Errors that occur whilst decoding a [`SignedEnvelope`] from its byte +/// representation. #[derive(thiserror::Error, Debug)] pub enum DecodingError { /// Decoding the provided bytes as a signed envelope failed. #[error("Failed to decode envelope")] InvalidEnvelope(#[from] DecodeError), - /// The public key in the envelope could not be converted to our internal public key type. + /// The public key in the envelope could not be converted to our internal + /// public key type. #[error("Failed to convert public key")] InvalidPublicKey(#[from] libp2p_identity::DecodingError), - /// The public key in the envelope could not be converted to our internal public key type. + /// The public key in the envelope could not be converted to our internal + /// public key type. #[error("Public key is missing from protobuf struct")] MissingPublicKey, } @@ -156,7 +166,8 @@ pub enum DecodingError { /// Errors that occur whilst extracting the payload of a [`SignedEnvelope`]. #[derive(Debug)] pub enum ReadPayloadError { - /// The signature on the signed envelope does not verify with the provided domain separation string. + /// The signature on the signed envelope does not verify with the provided + /// domain separation string. InvalidSignature, /// The payload contained in the envelope is not of the expected type. UnexpectedPayloadType { expected: Vec, got: Vec }, diff --git a/core/src/transport.rs b/core/src/transport.rs index 28ce2dbf650..1da9961d2a0 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -23,10 +23,9 @@ //! The main entity of this module is the [`Transport`] trait, which provides an //! interface for establishing connections with other nodes, thereby negotiating //! any desired protocols. The rest of the module defines combinators for -//! modifying a transport through composition with other transports or protocol upgrades. +//! modifying a transport through composition with other transports or protocol +//! upgrades. -use futures::prelude::*; -use multiaddr::Multiaddr; use std::{ error::Error, fmt, @@ -35,6 +34,9 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use multiaddr::Multiaddr; + pub mod and_then; pub mod choice; pub mod dummy; @@ -48,14 +50,15 @@ pub mod upgrade; mod boxed; mod optional; +pub use self::{ + boxed::Boxed, + choice::OrTransport, + memory::MemoryTransport, + optional::OptionalTransport, + upgrade::Upgrade, +}; use crate::{ConnectedPoint, Endpoint}; -pub use self::boxed::Boxed; -pub use self::choice::OrTransport; -pub use self::memory::MemoryTransport; -pub use self::optional::OptionalTransport; -pub use self::upgrade::Upgrade; - static NEXT_LISTENER_ID: AtomicUsize = AtomicUsize::new(1); /// The port use policy for a new connection. @@ -65,7 +68,8 @@ pub enum PortUse { New, /// Best effor reusing of an existing port. /// - /// If there is no listener present that can be used to dial, a new port is allocated. + /// If there is no listener present that can be used to dial, a new port is + /// allocated. #[default] Reuse, } @@ -75,7 +79,8 @@ pub enum PortUse { pub struct DialOpts { /// The endpoint establishing a new connection. /// - /// When attempting a hole-punch, both parties simultaneously "dial" each other but one party has to be the "listener" on the final connection. + /// When attempting a hole-punch, both parties simultaneously "dial" each + /// other but one party has to be the "listener" on the final connection. /// This option specifies the role of this node in the final connection. pub role: Endpoint, /// The port use policy for a new connection. @@ -87,18 +92,18 @@ pub struct DialOpts { /// /// Connections are established either by [listening](Transport::listen_on) /// or [dialing](Transport::dial) on a [`Transport`]. A peer that -/// obtains a connection by listening is often referred to as the *listener* and the -/// peer that initiated the connection through dialing as the *dialer*, in +/// obtains a connection by listening is often referred to as the *listener* and +/// the peer that initiated the connection through dialing as the *dialer*, in /// contrast to the traditional roles of *server* and *client*. /// /// Most transports also provide a form of reliable delivery on the established /// connections but the precise semantics of these guarantees depend on the /// specific transport. /// -/// This trait is implemented for concrete connection-oriented transport protocols -/// like TCP or Unix Domain Sockets, but also on wrappers that add additional -/// functionality to the dialing or listening process (e.g. name resolution via -/// the DNS). +/// This trait is implemented for concrete connection-oriented transport +/// protocols like TCP or Unix Domain Sockets, but also on wrappers that add +/// additional functionality to the dialing or listening process (e.g. name +/// resolution via the DNS). /// /// Additional protocols can be layered on top of the connections established /// by a [`Transport`] through an upgrade mechanism that is initiated via @@ -124,19 +129,21 @@ pub trait Transport { /// A pending [`Output`](Transport::Output) for an inbound connection, /// obtained from the [`Transport`] stream. /// - /// After a connection has been accepted by the transport, it may need to go through - /// asynchronous post-processing (i.e. protocol upgrade negotiations). Such - /// post-processing should not block the `Listener` from producing the next - /// connection, hence further connection setup proceeds asynchronously. - /// Once a `ListenerUpgrade` future resolves it yields the [`Output`](Transport::Output) - /// of the connection setup process. + /// After a connection has been accepted by the transport, it may need to go + /// through asynchronous post-processing (i.e. protocol upgrade + /// negotiations). Such post-processing should not block the `Listener` + /// from producing the next connection, hence further connection setup + /// proceeds asynchronously. Once a `ListenerUpgrade` future resolves it + /// yields the [`Output`](Transport::Output) of the connection setup + /// process. type ListenerUpgrade: Future>; /// A pending [`Output`](Transport::Output) for an outbound connection, /// obtained from [dialing](Transport::dial). type Dial: Future>; - /// Listens on the given [`Multiaddr`] for inbound connections with a provided [`ListenerId`]. + /// Listens on the given [`Multiaddr`] for inbound connections with a + /// provided [`ListenerId`]. fn listen_on( &mut self, id: ListenerId, @@ -149,10 +156,11 @@ pub trait Transport { /// otherwise. fn remove_listener(&mut self, id: ListenerId) -> bool; - /// Dials the given [`Multiaddr`], returning a future for a pending outbound connection. + /// Dials the given [`Multiaddr`], returning a future for a pending outbound + /// connection. /// - /// If [`TransportError::MultiaddrNotSupported`] is returned, it may be desirable to - /// try an alternative [`Transport`], if available. + /// If [`TransportError::MultiaddrNotSupported`] is returned, it may be + /// desirable to try an alternative [`Transport`], if available. fn dial( &mut self, addr: Multiaddr, @@ -161,15 +169,16 @@ pub trait Transport { /// Poll for [`TransportEvent`]s. /// - /// A [`TransportEvent::Incoming`] should be produced whenever a connection is received at the lowest - /// level of the transport stack. The item must be a [`ListenerUpgrade`](Transport::ListenerUpgrade) - /// future that resolves to an [`Output`](Transport::Output) value once all protocol upgrades have - /// been applied. + /// A [`TransportEvent::Incoming`] should be produced whenever a connection + /// is received at the lowest level of the transport stack. The item + /// must be a [`ListenerUpgrade`](Transport::ListenerUpgrade) + /// future that resolves to an [`Output`](Transport::Output) value once all + /// protocol upgrades have been applied. /// - /// Transports are expected to produce [`TransportEvent::Incoming`] events only for - /// listen addresses which have previously been announced via - /// a [`TransportEvent::NewAddress`] event and which have not been invalidated by - /// an [`TransportEvent::AddressExpired`] event yet. + /// Transports are expected to produce [`TransportEvent::Incoming`] events + /// only for listen addresses which have previously been announced via + /// a [`TransportEvent::NewAddress`] event and which have not been + /// invalidated by an [`TransportEvent::AddressExpired`] event yet. fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -195,7 +204,8 @@ pub trait Transport { map::Map::new(self, f) } - /// Applies a function on the errors generated by the futures of the transport. + /// Applies a function on the errors generated by the futures of the + /// transport. fn map_err(self, f: F) -> map_err::MapErr where Self: Sized, @@ -207,8 +217,8 @@ pub trait Transport { /// Adds a fallback transport that is used when encountering errors /// while establishing inbound or outbound connections. /// - /// The returned transport will act like `self`, except that if `listen_on` or `dial` - /// return an error then `other` will be tried. + /// The returned transport will act like `self`, except that if `listen_on` + /// or `dial` return an error then `other` will be tried. fn or_transport(self, other: U) -> OrTransport where Self: Sized, @@ -224,7 +234,8 @@ pub trait Transport { /// This function can be used for ad-hoc protocol upgrades or /// for processing or adapting the output for following configurations. /// - /// For the high-level transport upgrade procedure, see [`Transport::upgrade`]. + /// For the high-level transport upgrade procedure, see + /// [`Transport::upgrade`]. fn and_then(self, f: C) -> and_then::AndThen where Self: Sized, @@ -293,8 +304,8 @@ pub enum TransportEvent { ListenerClosed { /// The ID of the listener that closed. listener_id: ListenerId, - /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` - /// if the stream produced an error. + /// Reason for the closure. Contains `Ok(())` if the stream produced + /// `None`, or `Err` if the stream produced an error. reason: Result<(), TErr>, }, /// A listener errored. @@ -311,7 +322,8 @@ pub enum TransportEvent { impl TransportEvent { /// In case this [`TransportEvent`] is an upgrade, apply the given function - /// to the upgrade and produce another transport event based the function's result. + /// to the upgrade and produce another transport event based the function's + /// result. pub fn map_upgrade(self, map: impl FnOnce(TUpgr) -> U) -> TransportEvent { match self { TransportEvent::Incoming { @@ -352,9 +364,11 @@ impl TransportEvent { } } - /// In case this [`TransportEvent`] is an [`ListenerError`](TransportEvent::ListenerError), - /// or [`ListenerClosed`](TransportEvent::ListenerClosed) apply the given function to the - /// error and produce another transport event based on the function's result. + /// In case this [`TransportEvent`] is an + /// [`ListenerError`](TransportEvent::ListenerError), + /// or [`ListenerClosed`](TransportEvent::ListenerClosed) apply the given + /// function to the error and produce another transport event based on + /// the function's result. pub fn map_err(self, map_err: impl FnOnce(TErr) -> E) -> TransportEvent { match self { TransportEvent::Incoming { @@ -396,7 +410,8 @@ impl TransportEvent { } } - /// Returns `true` if this is an [`Incoming`](TransportEvent::Incoming) transport event. + /// Returns `true` if this is an [`Incoming`](TransportEvent::Incoming) + /// transport event. pub fn is_upgrade(&self) -> bool { matches!(self, TransportEvent::Incoming { .. }) } @@ -426,8 +441,8 @@ impl TransportEvent { /// Try to turn this transport event into the new `Multiaddr`. /// - /// Returns `None` if the event is not actually a [`TransportEvent::NewAddress`], - /// otherwise the address. + /// Returns `None` if the event is not actually a + /// [`TransportEvent::NewAddress`], otherwise the address. pub fn into_new_address(self) -> Option { if let TransportEvent::NewAddress { listen_addr, .. } = self { Some(listen_addr) @@ -443,8 +458,8 @@ impl TransportEvent { /// Try to turn this transport event into the expire `Multiaddr`. /// - /// Returns `None` if the event is not actually a [`TransportEvent::AddressExpired`], - /// otherwise the address. + /// Returns `None` if the event is not actually a + /// [`TransportEvent::AddressExpired`], otherwise the address. pub fn into_address_expired(self) -> Option { if let TransportEvent::AddressExpired { listen_addr, .. } = self { Some(listen_addr) @@ -453,15 +468,16 @@ impl TransportEvent { } } - /// Returns `true` if this is an [`TransportEvent::ListenerError`] transport event. + /// Returns `true` if this is an [`TransportEvent::ListenerError`] transport + /// event. pub fn is_listener_error(&self) -> bool { matches!(self, TransportEvent::ListenerError { .. }) } /// Try to turn this transport event into the listener error. /// - /// Returns `None` if the event is not actually a [`TransportEvent::ListenerError`]`, - /// otherwise the error. + /// Returns `None` if the event is not actually a + /// [`TransportEvent::ListenerError`]`, otherwise the error. pub fn into_listener_error(self) -> Option { if let TransportEvent::ListenerError { error, .. } = self { Some(error) @@ -516,8 +532,8 @@ impl fmt::Debug for TransportEvent { } } -/// An error during [dialing][Transport::dial] or [listening][Transport::listen_on] -/// on a [`Transport`]. +/// An error during [dialing][Transport::dial] or +/// [listening][Transport::listen_on] on a [`Transport`]. #[derive(Debug, Clone)] pub enum TransportError { /// The [`Multiaddr`] passed as parameter is not supported. diff --git a/core/src/transport/and_then.rs b/core/src/transport/and_then.rs index e85703f77fb..5d2b7d91553 100644 --- a/core/src/transport/and_then.rs +++ b/core/src/transport/and_then.rs @@ -18,14 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - connection::ConnectedPoint, - transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +use std::{ + error, + marker::PhantomPinned, + pin::Pin, + task::{Context, Poll}, }; + use either::Either; use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, marker::PhantomPinned, pin::Pin, task::Context, task::Poll}; + +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// See the [`Transport::and_then`] method. #[pin_project::pin_project] diff --git a/core/src/transport/boxed.rs b/core/src/transport/boxed.rs index 596ab262221..7fa57026dca 100644 --- a/core/src/transport/boxed.rs +++ b/core/src/transport/boxed.rs @@ -18,16 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use futures::{prelude::*, stream::FusedStream}; -use multiaddr::Multiaddr; use std::{ error::Error, - fmt, io, + fmt, + io, pin::Pin, task::{Context, Poll}, }; +use futures::{prelude::*, stream::FusedStream}; +use multiaddr::Multiaddr; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + /// Creates a new [`Boxed`] transport from the given transport. pub(crate) fn boxed(transport: T) -> Boxed where diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 4339f6bba71..251091f2008 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -18,12 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use either::Either; use futures::future; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::{ + either::EitherFuture, + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, +}; /// Struct returned by `or_transport()`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/dummy.rs b/core/src/transport/dummy.rs index 72558d34a79..0940328b554 100644 --- a/core/src/transport/dummy.rs +++ b/core/src/transport/dummy.rs @@ -18,14 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use crate::Multiaddr; -use futures::{prelude::*, task::Context, task::Poll}; use std::{fmt, io, marker::PhantomData, pin::Pin}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; + +use crate::{ + transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}, + Multiaddr, +}; + /// Implementation of `Transport` that doesn't support any multiaddr. /// -/// Useful for testing purposes, or as a fallback implementation when no protocol is available. +/// Useful for testing purposes, or as a fallback implementation when no +/// protocol is available. pub struct DummyTransport(PhantomData); impl DummyTransport { @@ -87,7 +95,8 @@ impl Transport for DummyTransport { } } -/// Implementation of `AsyncRead` and `AsyncWrite`. Not meant to be instantiated. +/// Implementation of `AsyncRead` and `AsyncWrite`. Not meant to be +/// instantiated. pub struct DummyStream(()); impl fmt::Debug for DummyStream { diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 83774f37004..a5d0784b40f 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -18,22 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; use std::{ pin::Pin, task::{Context, Poll}, }; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; + /// Dropping all dial requests to non-global IP addresses. #[derive(Debug, Clone, Default)] pub struct Transport { inner: T, } -/// This module contains an implementation of the `is_global` IPv4 address space. +/// This module contains an implementation of the `is_global` IPv4 address +/// space. /// /// Credit for this implementation goes to the Rust standard library team. /// @@ -41,9 +43,10 @@ pub struct Transport { mod ipv4_global { use std::net::Ipv4Addr; - /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112] - /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the - /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since + /// Returns [`true`] if this address is reserved by IANA for future use. + /// [IETF RFC 1112] defines the block of reserved addresses as + /// `240.0.0.0/4`. This range normally includes the broadcast address + /// `255.255.255.255`, but this implementation explicitly excludes it, since /// it is obviously not reserved for future use. /// /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112 @@ -60,9 +63,10 @@ mod ipv4_global { a.octets()[0] & 240 == 240 && !a.is_broadcast() } - /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for - /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0` - /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`. + /// Returns [`true`] if this address part of the `198.18.0.0/15` range, + /// which is reserved for network devices benchmarking. This range is + /// defined in [IETF RFC 2544] as `192.18.0.0` through `198.19.255.255` + /// but [errata 423] corrects it to `198.18.0.0/15`. /// /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544 /// [errata 423]: https://www.rfc-editor.org/errata/eid423 @@ -72,8 +76,8 @@ mod ipv4_global { a.octets()[0] == 198 && (a.octets()[1] & 0xfe) == 18 } - /// Returns [`true`] if this address is part of the Shared Address Space defined in - /// [IETF RFC 6598] (`100.64.0.0/10`). + /// Returns [`true`] if this address is part of the Shared Address Space + /// defined in [IETF RFC 6598] (`100.64.0.0/10`). /// /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598 #[must_use] @@ -104,24 +108,32 @@ mod ipv4_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your + /// network configuration. /// /// Most IPv4 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. /// - /// Non-exhaustive list of notable addresses that are not globally reachable: + /// Non-exhaustive list of notable addresses that are not globally + /// reachable: /// - /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) - /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) - /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) + /// - The [unspecified address] + /// ([`is_unspecified`](Ipv4Addr::is_unspecified)) + /// - Addresses reserved for private use + /// ([`is_private`](Ipv4Addr::is_private)) + /// - Addresses in the shared address space + /// ([`is_shared`](Ipv4Addr::is_shared)) /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) - /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) - /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) + /// - Addresses reserved for documentation + /// ([`is_documentation`](Ipv4Addr::is_documentation)) + /// - Addresses reserved for benchmarking + /// ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see + /// the table at the [IANA IPv4 Special-Purpose Address Registry]. /// /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml /// [unspecified address]: Ipv4Addr::UNSPECIFIED @@ -143,7 +155,8 @@ mod ipv4_global { } } -/// This module contains an implementation of the `is_global` IPv6 address space. +/// This module contains an implementation of the `is_global` IPv6 address +/// space. /// /// Credit for this implementation goes to the Rust standard library team. /// @@ -151,12 +164,14 @@ mod ipv4_global { mod ipv6_global { use std::net::Ipv6Addr; - /// Returns `true` if the address is a unicast address with link-local scope, - /// as defined in [RFC 4291]. + /// Returns `true` if the address is a unicast address with link-local + /// scope, as defined in [RFC 4291]. /// - /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. - /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], - /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, + /// as per [RFC 4291 section 2.4]. Note that this encompasses more + /// addresses than those defined in [RFC 4291 section 2.5.6], + /// which describes "Link-Local IPv6 Unicast Addresses" as having the + /// following stricter format: /// /// ```text /// | 10 bits | 54 bits | 64 bits | @@ -164,12 +179,16 @@ mod ipv6_global { /// |1111111010| 0 | interface ID | /// +----------+-------------------------+----------------------------+ /// ``` - /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, - /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, - /// and those addresses will have link-local scope. + /// So while currently the only addresses with link-local scope an + /// application will encounter are all in `fe80::/64`, this might change + /// in the future with the publication of new standards. More addresses in + /// `fe80::/10` could be allocated, and those addresses will have + /// link-local scope. /// - /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", - /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// Also note that while [RFC 4291 section 2.5.3] mentions about the + /// [loopback address] (`::1`) that "it is treated as having Link-Local + /// scope", this does not mean that the loopback address actually has + /// link-local scope and this method will return `false` on it. /// /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 @@ -207,25 +226,33 @@ mod ipv6_global { /// Returns [`true`] if the address appears to be globally reachable /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. + /// Whether or not an address is practically reachable will depend on your + /// network configuration. /// /// Most IPv6 addresses are globally reachable; /// unless they are specifically defined as *not* globally reachable. /// - /// Non-exhaustive list of notable addresses that are not globally reachable: - /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) + /// Non-exhaustive list of notable addresses that are not globally + /// reachable: + /// - The [unspecified address] + /// ([`is_unspecified`](Ipv6Addr::is_unspecified)) /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) /// - IPv4-mapped addresses /// - Addresses reserved for benchmarking - /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) - /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) - /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// - Addresses reserved for documentation + /// ([`is_documentation`](Ipv6Addr::is_documentation)) + /// - Unique local addresses + /// ([`is_unique_local`](Ipv6Addr::is_unique_local)) + /// - Unicast addresses with link-local scope + /// ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// For the complete overview of which addresses are globally reachable, see + /// the table at the [IANA IPv6 Special-Purpose Address Registry]. /// - /// Note that an address having global scope is not the same as being globally reachable, - /// and there is no direct relation between the two concepts: There exist addresses with global scope - /// that are not globally reachable (for example unique local addresses), + /// Note that an address having global scope is not the same as being + /// globally reachable, and there is no direct relation between the two + /// concepts: There exist addresses with global scope that are not + /// globally reachable (for example unique local addresses), /// and addresses that are globally reachable without having global scope /// (multicast addresses with non-global scope). /// diff --git a/core/src/transport/map.rs b/core/src/transport/map.rs index 9aab84ba8b1..4f6910b141f 100644 --- a/core/src/transport/map.rs +++ b/core/src/transport/map.rs @@ -18,16 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::DialOpts; -use crate::{ - connection::ConnectedPoint, - transport::{Transport, TransportError, TransportEvent}, +use std::{ + pin::Pin, + task::{Context, Poll}, }; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; use super::ListenerId; +use crate::{ + connection::ConnectedPoint, + transport::{DialOpts, Transport, TransportError, TransportEvent}, +}; /// See `Transport::map`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/map_err.rs b/core/src/transport/map_err.rs index 5d44af9af2e..f47f5713225 100644 --- a/core/src/transport/map_err.rs +++ b/core/src/transport/map_err.rs @@ -18,10 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + error, + pin::Pin, + task::{Context, Poll}, +}; + use futures::prelude::*; use multiaddr::Multiaddr; -use std::{error, pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// See `Transport::map_err`. #[derive(Debug, Copy, Clone)] diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 85680265e8b..bd663940f12 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -18,20 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; -use fnv::FnvHashMap; -use futures::{channel::mpsc, future::Ready, prelude::*, task::Context, task::Poll}; -use multiaddr::{Multiaddr, Protocol}; -use once_cell::sync::Lazy; -use parking_lot::Mutex; -use rw_stream_sink::RwStreamSink; use std::{ collections::{hash_map::Entry, VecDeque}, - error, fmt, io, + error, + fmt, + io, num::NonZeroU64, pin::Pin, }; +use fnv::FnvHashMap; +use futures::{ + channel::mpsc, + future::Ready, + prelude::*, + task::{Context, Poll}, +}; +use multiaddr::{Multiaddr, Protocol}; +use once_cell::sync::Lazy; +use parking_lot::Mutex; +use rw_stream_sink::RwStreamSink; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; + static HUB: Lazy = Lazy::new(|| Hub(Mutex::new(FnvHashMap::default()))); struct Hub(Mutex>); @@ -306,7 +315,8 @@ pub struct Listener { addr: Multiaddr, /// Receives incoming connections. receiver: ChannelReceiver, - /// Generate [`TransportEvent::NewAddress`] to inform about our listen address. + /// Generate [`TransportEvent::NewAddress`] to inform about our listen + /// address. tell_listen_addr: bool, } @@ -322,12 +332,14 @@ fn parse_memory_addr(a: &Multiaddr) -> Result { } } -/// A channel represents an established, in-memory, logical connection between two endpoints. +/// A channel represents an established, in-memory, logical connection between +/// two endpoints. /// /// Implements `AsyncRead` and `AsyncWrite`. pub type Channel = RwStreamSink>; -/// A channel represents an established, in-memory, logical connection between two endpoints. +/// A channel represents an established, in-memory, logical connection between +/// two endpoints. /// /// Implements `Sink` and `Stream`. pub struct Chan> { @@ -398,9 +410,8 @@ impl Drop for Chan { #[cfg(test)] mod tests { - use crate::{transport::PortUse, Endpoint}; - use super::*; + use crate::{transport::PortUse, Endpoint}; #[test] fn parse_memory_addr_works() { @@ -429,7 +440,8 @@ mod tests { ); assert_eq!( parse_memory_addr( - &"/memory/5/p2p/12D3KooWETLZBFBfkzvH3BQEtA1TJZPmjb4a18ss5TpwNU7DHDX6/p2p-circuit/p2p/12D3KooWLiQ7i8sY6LkPvHmEymncicEgzrdpXegbxEr3xgN8oxMU" + &"/memory/5/p2p/12D3KooWETLZBFBfkzvH3BQEtA1TJZPmjb4a18ss5TpwNU7DHDX6/p2p-circuit/\ + p2p/12D3KooWLiQ7i8sY6LkPvHmEymncicEgzrdpXegbxEr3xgN8oxMU" .parse() .unwrap() ), diff --git a/core/src/transport/optional.rs b/core/src/transport/optional.rs index f18bfa441b0..49968071762 100644 --- a/core/src/transport/optional.rs +++ b/core/src/transport/optional.rs @@ -18,16 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use multiaddr::Multiaddr; -use std::{pin::Pin, task::Context, task::Poll}; + +use crate::transport::{DialOpts, ListenerId, Transport, TransportError, TransportEvent}; /// Transport that is possibly disabled. /// -/// An `OptionalTransport` is a wrapper around an `Option`. If it is disabled (read: contains -/// `None`), then any attempt to dial or listen will return `MultiaddrNotSupported`. If it is -/// enabled (read: contains `Some`), then dialing and listening will be handled by the inner -/// transport. +/// An `OptionalTransport` is a wrapper around an `Option`. If it is +/// disabled (read: contains `None`), then any attempt to dial or listen will +/// return `MultiaddrNotSupported`. If it is enabled (read: contains `Some`), +/// then dialing and listening will be handled by the inner transport. #[derive(Debug, Copy, Clone)] #[pin_project::pin_project] pub struct OptionalTransport(#[pin] Option); diff --git a/core/src/transport/timeout.rs b/core/src/transport/timeout.rs index 830ed099629..c59e9bb03ac 100644 --- a/core/src/transport/timeout.rs +++ b/core/src/transport/timeout.rs @@ -24,17 +24,26 @@ //! underlying `Transport`. // TODO: add example -use crate::transport::DialOpts; -use crate::{ - transport::{ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, +use std::{ + error, + fmt, + io, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; + use futures::prelude::*; use futures_timer::Delay; -use std::{error, fmt, io, pin::Pin, task::Context, task::Poll, time::Duration}; -/// A `TransportTimeout` is a `Transport` that wraps another `Transport` and adds -/// timeouts to all inbound and outbound connection attempts. +use crate::{ + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, + Multiaddr, + Transport, +}; + +/// A `TransportTimeout` is a `Transport` that wraps another `Transport` and +/// adds timeouts to all inbound and outbound connection attempts. /// /// **Note**: `listen_on` is never subject to a timeout, only the setup of each /// individual accepted connection. @@ -48,7 +57,8 @@ pub struct TransportTimeout { } impl TransportTimeout { - /// Wraps around a `Transport` to add timeouts to all the sockets created by it. + /// Wraps around a `Transport` to add timeouts to all the sockets created by + /// it. pub fn new(trans: InnerTrans, timeout: Duration) -> Self { TransportTimeout { inner: trans, @@ -151,10 +161,11 @@ where type Output = Result>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // It is debatable whether we should poll the inner future first or the timer first. - // For example, if you start dialing with a timeout of 10 seconds, then after 15 seconds - // the dialing succeeds on the wire, then after 20 seconds you poll, then depending on - // which gets polled first, the outcome will be success or failure. + // It is debatable whether we should poll the inner future first or the timer + // first. For example, if you start dialing with a timeout of 10 + // seconds, then after 15 seconds the dialing succeeds on the wire, then + // after 20 seconds you poll, then depending on which gets polled first, + // the outcome will be success or failure. let mut this = self.project(); diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 66b9e7509af..0e623cb2594 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -20,32 +20,44 @@ //! Configuration of transport protocol upgrades. -pub use crate::upgrade::Version; +use std::{ + error::Error, + fmt, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{prelude::*, ready}; +use libp2p_identity::PeerId; +use multiaddr::Multiaddr; -use crate::transport::DialOpts; +pub use crate::upgrade::Version; use crate::{ connection::ConnectedPoint, muxing::{StreamMuxer, StreamMuxerBox}, transport::{ - and_then::AndThen, boxed::boxed, timeout::TransportTimeout, ListenerId, Transport, - TransportError, TransportEvent, + and_then::AndThen, + boxed::boxed, + timeout::TransportTimeout, + DialOpts, + ListenerId, + Transport, + TransportError, + TransportEvent, }, upgrade::{ - self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply, - OutboundConnectionUpgrade, OutboundUpgradeApply, UpgradeError, + self, + apply_inbound, + apply_outbound, + InboundConnectionUpgrade, + InboundUpgradeApply, + OutboundConnectionUpgrade, + OutboundUpgradeApply, + UpgradeError, }, Negotiated, }; -use futures::{prelude::*, ready}; -use libp2p_identity::PeerId; -use multiaddr::Multiaddr; -use std::{ - error::Error, - fmt, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; /// A `Builder` facilitates upgrading of a [`Transport`] for use with /// a `Swarm`. @@ -59,8 +71,8 @@ use std::{ /// It thus enforces the following invariants on every transport /// obtained from [`multiplex`](Authenticated::multiplex): /// -/// 1. The transport must be [authenticated](Builder::authenticate) -/// and [multiplexed](Authenticated::multiplex). +/// 1. The transport must be [authenticated](Builder::authenticate) and +/// [multiplexed](Authenticated::multiplex). /// 2. Authentication must precede the negotiation of a multiplexer. /// 3. Applying a multiplexer is the last step in the upgrade process. /// 4. The [`Transport::Output`] conforms to the requirements of a `Swarm`, @@ -185,7 +197,8 @@ where } } -/// An transport with peer authentication, obtained from [`Builder::authenticate`]. +/// An transport with peer authentication, obtained from +/// [`Builder::authenticate`]. #[derive(Clone)] pub struct Authenticated(Builder); @@ -222,8 +235,8 @@ where /// Upgrades the transport with a (sub)stream multiplexer. /// /// The supplied upgrade receives the I/O resource `C` and must - /// produce a [`StreamMuxer`] `M`. The transport must already be authenticated. - /// This ends the (regular) transport upgrade process. + /// produce a [`StreamMuxer`] `M`. The transport must already be + /// authenticated. This ends the (regular) transport upgrade process. /// /// ## Transitions /// @@ -251,12 +264,13 @@ where })) } - /// Like [`Authenticated::multiplex`] but accepts a function which returns the upgrade. + /// Like [`Authenticated::multiplex`] but accepts a function which returns + /// the upgrade. /// /// The supplied function is applied to [`PeerId`] and [`ConnectedPoint`] /// and returns an upgrade which receives the I/O resource `C` and must - /// produce a [`StreamMuxer`] `M`. The transport must already be authenticated. - /// This ends the (regular) transport upgrade process. + /// produce a [`StreamMuxer`] `M`. The transport must already be + /// authenticated. This ends the (regular) transport upgrade process. /// /// ## Transitions /// @@ -499,8 +513,8 @@ where type Output = Result<(PeerId, D), TransportUpgradeError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // We use a `this` variable because the compiler can't mutably borrow multiple times - // across a `Deref`. + // We use a `this` variable because the compiler can't mutably borrow multiple + // times across a `Deref`. let this = &mut *self; loop { @@ -558,8 +572,8 @@ where type Output = Result<(PeerId, D), TransportUpgradeError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // We use a `this` variable because the compiler can't mutably borrow multiple times - // across a `Deref`. + // We use a `this` variable because the compiler can't mutably borrow multiple + // times across a `Deref`. let this = &mut *self; loop { diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 7a1fd3724d0..7916f9f6cb8 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -18,44 +18,51 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Contains everything related to upgrading a connection or a substream to use a protocol. +//! Contains everything related to upgrading a connection or a substream to use +//! a protocol. //! -//! After a connection with a remote has been successfully established or a substream successfully -//! opened, the next step is to *upgrade* this connection or substream to use a protocol. +//! After a connection with a remote has been successfully established or a +//! substream successfully opened, the next step is to *upgrade* this connection +//! or substream to use a protocol. //! -//! This is where the `UpgradeInfo`, `InboundUpgrade` and `OutboundUpgrade` traits come into play. -//! The `InboundUpgrade` and `OutboundUpgrade` traits are implemented on types that represent a -//! collection of one or more possible protocols for respectively an ingoing or outgoing -//! connection or substream. +//! This is where the `UpgradeInfo`, `InboundUpgrade` and `OutboundUpgrade` +//! traits come into play. The `InboundUpgrade` and `OutboundUpgrade` traits are +//! implemented on types that represent a collection of one or more possible +//! protocols for respectively an ingoing or outgoing connection or substream. //! -//! > **Note**: Multiple versions of the same protocol are treated as different protocols. -//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as -//! > upgrading is concerned. +//! > **Note**: Multiple versions of the same protocol are treated as different +//! > protocols. +//! > For example, `/foo/1.0.0` and `/foo/1.1.0` are totally unrelated as far as +//! > upgrading is concerned. //! //! # Upgrade process //! //! An upgrade is performed in two steps: //! -//! - A protocol negotiation step. The `UpgradeInfo::protocol_info` method is called to determine -//! which protocols are supported by the trait implementation. The `multistream-select` protocol -//! is used in order to agree on which protocol to use amongst the ones supported. +//! - A protocol negotiation step. The `UpgradeInfo::protocol_info` method is +//! called to determine which protocols are supported by the trait +//! implementation. The `multistream-select` protocol is used in order to +//! agree on which protocol to use amongst the ones supported. //! -//! - A handshake. After a successful negotiation, the `InboundUpgrade::upgrade_inbound` or -//! `OutboundUpgrade::upgrade_outbound` method is called. This method will return a `Future` that -//! performs a handshake. This handshake is considered mandatory, however in practice it is -//! possible for the trait implementation to return a dummy `Future` that doesn't perform any -//! action and immediately succeeds. +//! - A handshake. After a successful negotiation, the +//! `InboundUpgrade::upgrade_inbound` or `OutboundUpgrade::upgrade_outbound` +//! method is called. This method will return a `Future` that performs a +//! handshake. This handshake is considered mandatory, however in practice it +//! is possible for the trait implementation to return a dummy `Future` that +//! doesn't perform any action and immediately succeeds. //! -//! After an upgrade is successful, an object of type `InboundUpgrade::Output` or -//! `OutboundUpgrade::Output` is returned. The actual object depends on the implementation and -//! there is no constraint on the traits that it should implement, however it is expected that it -//! can be used by the user to control the behaviour of the protocol. +//! After an upgrade is successful, an object of type `InboundUpgrade::Output` +//! or `OutboundUpgrade::Output` is returned. The actual object depends on the +//! implementation and there is no constraint on the traits that it should +//! implement, however it is expected that it can be used by the user to control +//! the behaviour of the protocol. //! -//! > **Note**: You can use the `apply_inbound` or `apply_outbound` methods to try upgrade a +//! > **Note**: You can use the `apply_inbound` or `apply_outbound` methods to +//! > try upgrade a //! > connection or substream. However if you use the recommended `Swarm` or -//! > `ConnectionHandler` APIs, the upgrade is automatically handled for you and you don't +//! > `ConnectionHandler` APIs, the upgrade is automatically handled for you and +//! > you don't //! > need to use these methods. -//! mod apply; mod denied; @@ -66,89 +73,105 @@ mod ready; mod select; pub(crate) use apply::{ - apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply, + apply, + apply_inbound, + apply_outbound, + InboundUpgradeApply, + OutboundUpgradeApply, }; pub(crate) use error::UpgradeError; use futures::future::Future; +pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; pub use self::{ - denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, + denied::DeniedUpgrade, + pending::PendingUpgrade, + ready::ReadyUpgrade, + select::SelectUpgrade, }; pub use crate::Negotiated; -pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; -/// Common trait for upgrades that can be applied on inbound substreams, outbound substreams, -/// or both. +/// Common trait for upgrades that can be applied on inbound substreams, +/// outbound substreams, or both. pub trait UpgradeInfo { /// Opaque type representing a negotiable protocol. type Info: AsRef + Clone; /// Iterator returned by `protocol_info`. type InfoIter: IntoIterator; - /// Returns the list of protocols that are supported. Used during the negotiation process. + /// Returns the list of protocols that are supported. Used during the + /// negotiation process. fn protocol_info(&self) -> Self::InfoIter; } /// Possible upgrade on an inbound connection or substream. pub trait InboundUpgrade: UpgradeInfo { - /// Output after the upgrade has been successfully negotiated and the handshake performed. + /// Output after the upgrade has been successfully negotiated and the + /// handshake performed. type Output; /// Possible error during the handshake. type Error; /// Future that performs the handshake with the remote. type Future: Future>; - /// After we have determined that the remote supports one of the protocols we support, this - /// method is called to start the handshake. + /// After we have determined that the remote supports one of the protocols + /// we support, this method is called to start the handshake. /// - /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + /// The `info` is the identifier of the protocol, as produced by + /// `protocol_info`. fn upgrade_inbound(self, socket: C, info: Self::Info) -> Self::Future; } /// Possible upgrade on an outbound connection or substream. pub trait OutboundUpgrade: UpgradeInfo { - /// Output after the upgrade has been successfully negotiated and the handshake performed. + /// Output after the upgrade has been successfully negotiated and the + /// handshake performed. type Output; /// Possible error during the handshake. type Error; /// Future that performs the handshake with the remote. type Future: Future>; - /// After we have determined that the remote supports one of the protocols we support, this - /// method is called to start the handshake. + /// After we have determined that the remote supports one of the protocols + /// we support, this method is called to start the handshake. /// - /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + /// The `info` is the identifier of the protocol, as produced by + /// `protocol_info`. fn upgrade_outbound(self, socket: C, info: Self::Info) -> Self::Future; } /// Possible upgrade on an inbound connection pub trait InboundConnectionUpgrade: UpgradeInfo { - /// Output after the upgrade has been successfully negotiated and the handshake performed. + /// Output after the upgrade has been successfully negotiated and the + /// handshake performed. type Output; /// Possible error during the handshake. type Error; /// Future that performs the handshake with the remote. type Future: Future>; - /// After we have determined that the remote supports one of the protocols we support, this - /// method is called to start the handshake. + /// After we have determined that the remote supports one of the protocols + /// we support, this method is called to start the handshake. /// - /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + /// The `info` is the identifier of the protocol, as produced by + /// `protocol_info`. fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future; } /// Possible upgrade on an outbound connection pub trait OutboundConnectionUpgrade: UpgradeInfo { - /// Output after the upgrade has been successfully negotiated and the handshake performed. + /// Output after the upgrade has been successfully negotiated and the + /// handshake performed. type Output; /// Possible error during the handshake. type Error; /// Future that performs the handshake with the remote. type Future: Future>; - /// After we have determined that the remote supports one of the protocols we support, this - /// method is called to start the handshake. + /// After we have determined that the remote supports one of the protocols + /// we support, this method is called to start the handshake. /// - /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + /// The `info` is the identifier of the protocol, as produced by + /// `protocol_info`. fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future; } diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index f84aaaac9fa..315c0e74a1b 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -18,16 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; -use crate::{connection::ConnectedPoint, Negotiated}; +use std::{ + mem, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{future::Either, prelude::*}; +pub(crate) use multistream_select::Version; use multistream_select::{DialerSelectFuture, ListenerSelectFuture}; -use std::{mem, pin::Pin, task::Context, task::Poll}; -pub(crate) use multistream_select::Version; +use crate::{ + connection::ConnectedPoint, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}, + Negotiated, +}; // TODO: Still needed? -/// Applies an upgrade to the inbound and outbound direction of a connection or substream. +/// Applies an upgrade to the inbound and outbound direction of a connection or +/// substream. pub(crate) fn apply( conn: C, up: U, diff --git a/core/src/upgrade/denied.rs b/core/src/upgrade/denied.rs index 568bbfb056d..57b90046363 100644 --- a/core/src/upgrade/denied.rs +++ b/core/src/upgrade/denied.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; -/// Dummy implementation of `UpgradeInfo`/`InboundUpgrade`/`OutboundUpgrade` that doesn't support -/// any protocol. +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; + +/// Dummy implementation of `UpgradeInfo`/`InboundUpgrade`/`OutboundUpgrade` +/// that doesn't support any protocol. #[derive(Debug, Copy, Clone)] pub struct DeniedUpgrade; diff --git a/core/src/upgrade/either.rs b/core/src/upgrade/either.rs index db62f8d6558..9970dcb0b1d 100644 --- a/core/src/upgrade/either.rs +++ b/core/src/upgrade/either.rs @@ -18,13 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::iter::Map; + +use either::Either; +use futures::future; + use crate::{ either::EitherFuture, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, }; -use either::Either; -use futures::future; -use std::iter::Map; impl UpgradeInfo for Either where diff --git a/core/src/upgrade/error.rs b/core/src/upgrade/error.rs index 3d349587c2c..f76bbb4ceeb 100644 --- a/core/src/upgrade/error.rs +++ b/core/src/upgrade/error.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use multistream_select::NegotiationError; use std::fmt; -/// Error that can happen when upgrading a connection or substream to use a protocol. +use multistream_select::NegotiationError; + +/// Error that can happen when upgrading a connection or substream to use a +/// protocol. #[derive(Debug)] pub enum UpgradeError { /// Error during the negotiation process. diff --git a/core/src/upgrade/pending.rs b/core/src/upgrade/pending.rs index 5e3c65422f1..c4841d8d5d7 100644 --- a/core/src/upgrade/pending.rs +++ b/core/src/upgrade/pending.rs @@ -19,13 +19,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; -/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that always -/// returns a pending upgrade. +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and +/// [`OutboundUpgrade`] that always returns a pending upgrade. #[derive(Debug, Copy, Clone)] pub struct PendingUpgrade

{ protocol_name: P, diff --git a/core/src/upgrade/ready.rs b/core/src/upgrade/ready.rs index 13270aa8b6d..f77b8c2ef96 100644 --- a/core/src/upgrade/ready.rs +++ b/core/src/upgrade/ready.rs @@ -19,12 +19,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use std::{convert::Infallible, iter}; + use futures::future; -use std::convert::Infallible; -use std::iter; -/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and [`OutboundUpgrade`] that directly yields the substream. +use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; + +/// Implementation of [`UpgradeInfo`], [`InboundUpgrade`] and +/// [`OutboundUpgrade`] that directly yields the substream. #[derive(Debug, Copy, Clone)] pub struct ReadyUpgrade

{ protocol_name: P, diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 037045a2f29..d3beea96b50 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -18,17 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::either::EitherFuture; -use crate::upgrade::{ - InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, - UpgradeInfo, -}; +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use std::iter::{Chain, Map}; -/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either -/// sub-upgrade. +use crate::{ + either::EitherFuture, + upgrade::{ + InboundConnectionUpgrade, + InboundUpgrade, + OutboundConnectionUpgrade, + OutboundUpgrade, + UpgradeInfo, + }, +}; + +/// Upgrade that combines two upgrades into one. Supports all the protocols +/// supported by either sub-upgrade. /// /// The protocols supported by the first element have a higher priority. #[derive(Debug, Clone)] diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index d8bec6f2b59..b9733e38322 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -18,18 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, pin::Pin}; + use futures::prelude::*; -use libp2p_core::transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}; -use libp2p_core::upgrade::{ - self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo, +use libp2p_core::{ + transport::{DialOpts, ListenerId, MemoryTransport, PortUse, Transport}, + upgrade::{self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, + Endpoint, }; -use libp2p_core::Endpoint; use libp2p_identity as identity; use libp2p_mplex::MplexConfig; use libp2p_noise as noise; use multiaddr::{Multiaddr, Protocol}; use rand::random; -use std::{io, pin::Pin}; #[derive(Clone)] struct HelloUpgrade {} diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index def66c4823b..ed4e81c7e63 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -20,15 +20,21 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr, time::Duration}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::multiaddr::Protocol; -use libp2p::core::Multiaddr; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, + identity, + noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, + yamux, + PeerId, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 389cc0fa26f..23873fbba97 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -20,14 +20,20 @@ #![doc = include_str!("../../README.md")] +use std::{error::Error, net::Ipv4Addr, time::Duration}; + use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, Multiaddr}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux}; -use std::error::Error; -use std::net::Ipv4Addr; -use std::time::Duration; +use libp2p::{ + autonat, + core::{multiaddr::Protocol, Multiaddr}, + identify, + identity, + noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, + yamux, +}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] diff --git a/examples/autonatv2/src/bin/autonatv2_client.rs b/examples/autonatv2/src/bin/autonatv2_client.rs index de902514dd8..e901074a2e7 100644 --- a/examples/autonatv2/src/bin/autonatv2_client.rs +++ b/examples/autonatv2/src/bin/autonatv2_client.rs @@ -4,11 +4,15 @@ use clap::Parser; use libp2p::{ autonat, futures::StreamExt, - identify, identity, + identify, + identity, multiaddr::Protocol, noise, swarm::{dial_opts::DialOpts, NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, SwarmBuilder, + tcp, + yamux, + Multiaddr, + SwarmBuilder, }; use rand::rngs::OsRng; use tracing_subscriber::EnvFilter; @@ -73,7 +77,10 @@ async fn main() -> Result<(), Box> { bytes_sent, result: Ok(()), })) => { - println!("Tested {tested_addr} with {server}. Sent {bytes_sent} bytes for verification. Everything Ok and verified."); + println!( + "Tested {tested_addr} with {server}. Sent {bytes_sent} bytes for \ + verification. Everything Ok and verified." + ); } SwarmEvent::Behaviour(BehaviourEvent::Autonat(autonat::v2::client::Event { server, @@ -81,7 +88,10 @@ async fn main() -> Result<(), Box> { bytes_sent, result: Err(e), })) => { - println!("Tested {tested_addr} with {server}. Sent {bytes_sent} bytes for verification. Failed with {e:?}."); + println!( + "Tested {tested_addr} with {server}. Sent {bytes_sent} bytes for \ + verification. Failed with {e:?}." + ); } SwarmEvent::ExternalAddrConfirmed { address } => { println!("External address confirmed: {address}"); diff --git a/examples/autonatv2/src/bin/autonatv2_server.rs b/examples/autonatv2/src/bin/autonatv2_server.rs index 849ed3b3b0a..555be51ea43 100644 --- a/examples/autonatv2/src/bin/autonatv2_server.rs +++ b/examples/autonatv2/src/bin/autonatv2_server.rs @@ -5,11 +5,15 @@ use clap::Parser; use libp2p::{ autonat, futures::StreamExt, - identify, identity, + identify, + identity, multiaddr::Protocol, noise, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, SwarmBuilder, + tcp, + yamux, + Multiaddr, + SwarmBuilder, }; use rand::rngs::OsRng; diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 9499ccbd158..e2d884cb445 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -1,13 +1,11 @@ #![cfg(target_arch = "wasm32")] +use std::{io, time::Duration}; + use futures::StreamExt; use js_sys::Date; -use libp2p::core::Multiaddr; -use libp2p::ping; -use libp2p::swarm::SwarmEvent; +use libp2p::{core::Multiaddr, ping, swarm::SwarmEvent}; use libp2p_webrtc_websys as webrtc_websys; -use std::io; -use std::time::Duration; use wasm_bindgen::prelude::*; use web_sys::{Document, HtmlElement}; diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 7f06b0d0d99..ec6be0c066d 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -1,23 +1,27 @@ #![allow(non_upper_case_globals)] +use std::{ + net::{Ipv4Addr, SocketAddr}, + time::Duration, +}; + use anyhow::Result; -use axum::extract::{Path, State}; -use axum::http::header::CONTENT_TYPE; -use axum::http::StatusCode; -use axum::response::{Html, IntoResponse}; -use axum::{http::Method, routing::get, Router}; +use axum::{ + extract::{Path, State}, + http::{header::CONTENT_TYPE, Method, StatusCode}, + response::{Html, IntoResponse}, + routing::get, + Router, +}; use futures::StreamExt; use libp2p::{ - core::muxing::StreamMuxerBox, - core::Transport, + core::{muxing::StreamMuxerBox, Transport}, multiaddr::{Multiaddr, Protocol}, ping, swarm::SwarmEvent, }; use libp2p_webrtc as webrtc; use rand::thread_rng; -use std::net::{Ipv4Addr, SocketAddr}; -use std::time::Duration; use tokio::net::TcpListener; use tower_http::cors::{Any, CorsLayer}; @@ -127,7 +131,8 @@ struct Libp2pEndpoint(Multiaddr); /// Serves the index.html file for our client. /// /// Our server listens on a random UDP port for the WebRTC transport. -/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address. +/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` +/// placeholder with the actual address. async fn get_index( State(Libp2pEndpoint(libp2p_endpoint)): State, ) -> Result, StatusCode> { diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index c785d301c2f..83cf649dd5f 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -20,12 +20,22 @@ #![doc = include_str!("../README.md")] +use std::{ + collections::hash_map::DefaultHasher, + error::Error, + hash::{Hash, Hasher}, + time::Duration, +}; + use futures::stream::StreamExt; -use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux}; -use std::collections::hash_map::DefaultHasher; -use std::error::Error; -use std::hash::{Hash, Hasher}; -use std::time::Duration; +use libp2p::{ + gossipsub, + mdns, + noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, + yamux, +}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; @@ -51,7 +61,8 @@ async fn main() -> Result<(), Box> { )? .with_quic() .with_behaviour(|key| { - // To content-address message, we can take the hash of message and use it as an ID. + // To content-address message, we can take the hash of message and use it as an + // ID. let message_id_fn = |message: &gossipsub::Message| { let mut s = DefaultHasher::new(); message.data.hash(&mut s); @@ -61,7 +72,8 @@ async fn main() -> Result<(), Box> { // Set a custom gossipsub configuration let gossipsub_config = gossipsub::ConfigBuilder::default() .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message + // signing) .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. .build() .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 630d4b2b1f3..c3990b05a2f 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -20,16 +20,23 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, str::FromStr, time::Duration}; + use clap::Parser; use futures::{executor::block_on, future::FutureExt, stream::StreamExt}; use libp2p::{ core::multiaddr::{Multiaddr, Protocol}, - dcutr, identify, identity, noise, ping, relay, + dcutr, + identify, + identity, + noise, + ping, + relay, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, PeerId, + tcp, + yamux, + PeerId, }; -use std::str::FromStr; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -136,8 +143,9 @@ async fn main() -> Result<(), Box> { } }); - // Connect to the relay server. Not for the reservation or relayed connection, but to (a) learn - // our local public address and (b) enable a freshly started relay to learn its public address. + // Connect to the relay server. Not for the reservation or relayed connection, + // but to (a) learn our local public address and (b) enable a freshly + // started relay to learn its public address. swarm.dial(opts.relay_address.clone()).unwrap(); block_on(async { let mut learned_observed_addr = false; diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 6b7947b7eb3..10934601c93 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -20,17 +20,18 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::stream::StreamExt; -use libp2p::kad; -use libp2p::kad::store::MemoryStore; -use libp2p::kad::Mode; use libp2p::{ - mdns, noise, + kad, + kad::{store::MemoryStore, Mode}, + mdns, + noise, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, + tcp, + yamux, }; -use std::error::Error; -use std::time::Duration; use tokio::{ io::{self, AsyncBufReadExt}, select, diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 5f6be83dc11..1e3b80a330c 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -22,15 +22,12 @@ mod network; -use clap::Parser; -use tokio::task::spawn; +use std::{error::Error, io::Write, path::PathBuf}; -use futures::prelude::*; -use futures::StreamExt; +use clap::Parser; +use futures::{prelude::*, StreamExt}; use libp2p::{core::Multiaddr, multiaddr::Protocol}; -use std::error::Error; -use std::io::Write; -use std::path::PathBuf; +use tokio::task::spawn; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index a74afd1c0da..103e1cbe9e2 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,27 +1,33 @@ -use futures::channel::{mpsc, oneshot}; -use futures::prelude::*; -use futures::StreamExt; +use std::{ + collections::{hash_map, HashMap, HashSet}, + error::Error, + time::Duration, +}; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, + StreamExt, +}; use libp2p::{ core::Multiaddr, - identity, kad, + identity, + kad, multiaddr::Protocol, noise, request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel}, swarm::{NetworkBehaviour, Swarm, SwarmEvent}, - tcp, yamux, PeerId, + tcp, + yamux, + PeerId, + StreamProtocol, }; - -use libp2p::StreamProtocol; use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, HashMap, HashSet}; -use std::error::Error; -use std::time::Duration; /// Creates the network components, namely: /// -/// - The network client to interact with the network layer from anywhere -/// within your application. +/// - The network client to interact with the network layer from anywhere within +/// your application. /// /// - The network event stream, e.g. for incoming requests. /// diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index 22474061da6..55d093c0399 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index 95921d6fa35..9acec089a56 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -20,15 +20,25 @@ #![doc = include_str!("../README.md")] -use std::num::NonZeroUsize; -use std::ops::Add; -use std::time::{Duration, Instant}; +use std::{ + num::NonZeroUsize, + ops::Add, + time::{Duration, Instant}, +}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::swarm::{StreamProtocol, SwarmEvent}; -use libp2p::{bytes::BufMut, identity, kad, noise, tcp, yamux, PeerId}; +use libp2p::{ + bytes::BufMut, + identity, + kad, + noise, + swarm::{StreamProtocol, SwarmEvent}, + tcp, + yamux, + PeerId, +}; use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index a57bfd465e0..24727ec80bf 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -20,23 +20,29 @@ #![doc = include_str!("../README.md")] +use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; + use either::Either; use futures::prelude::*; use libp2p::{ core::transport::upgrade::Version, - gossipsub, identify, + gossipsub, + identify, multiaddr::Protocol, - noise, ping, + noise, + ping, pnet::{PnetConfig, PreSharedKey}, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, Transport, + tcp, + yamux, + Multiaddr, + Transport, }; -use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; -/// Get the current ipfs repo path, either from the IPFS_PATH environment variable or -/// from the default $HOME/.ipfs +/// Get the current ipfs repo path, either from the IPFS_PATH environment +/// variable or from the default $HOME/.ipfs fn get_ipfs_path() -> Box { env::var("IPFS_PATH") .map(|ipfs_path| Path::new(&ipfs_path).into()) @@ -58,8 +64,9 @@ fn get_psk(path: &Path) -> std::io::Result> { } } -/// for a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p -/// only supports dialing to an address without providing the peer id. +/// for a multiaddr that ends with a peer id, this strips this suffix. +/// Rust-libp2p only supports dialing to an address without providing the peer +/// id. fn strip_peer_id(addr: &mut Multiaddr) { let last = addr.pop(); match last { @@ -105,7 +112,8 @@ async fn main() -> Result<(), Box> { // Create a Gosspipsub topic let gossipsub_topic = gossipsub::IdentTopic::new("chat"); - // We create a custom network behaviour that combines gossipsub, ping and identify. + // We create a custom network behaviour that combines gossipsub, ping and + // identify. #[derive(NetworkBehaviour)] struct MyBehaviour { gossipsub: gossipsub::Behaviour, diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 4a9c9785bb3..f1485832d86 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 1755c769053..bf18654640d 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,18 +20,23 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; -use libp2p::core::Multiaddr; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -use libp2p::{identify, identity, noise, ping, tcp, yamux}; +use libp2p::{ + core::Multiaddr, + identify, + identity, + metrics::{Metrics, Recorder}, + noise, + ping, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, + yamux, +}; use opentelemetry::{trace::TracerProvider, KeyValue}; use prometheus_client::registry::Registry; -use std::error::Error; -use std::time::Duration; -use tracing_subscriber::layer::SubscriberExt; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::{EnvFilter, Layer}; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; mod http_service; diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index 911b0384f89..565ef057c0d 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::prelude::*; use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; -use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 46a122d0717..6385c58abfc 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -21,17 +21,24 @@ #![doc = include_str!("../README.md")] +use std::{ + error::Error, + net::{Ipv4Addr, Ipv6Addr}, +}; + use clap::Parser; use futures::StreamExt; use libp2p::{ - core::multiaddr::Protocol, - core::Multiaddr, - identify, identity, noise, ping, relay, + core::{multiaddr::Protocol, Multiaddr}, + identify, + identity, + noise, + ping, + relay, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, + tcp, + yamux, }; -use std::error::Error; -use std::net::{Ipv4Addr, Ipv6Addr}; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -119,7 +126,8 @@ fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { #[derive(Debug, Parser)] #[clap(name = "libp2p relay")] struct Opt { - /// Determine if the relay listen on ipv6 or ipv4 loopback address. the default is ipv4 + /// Determine if the relay listen on ipv6 or ipv4 loopback address. the + /// default is ipv4 #[clap(long)] use_ipv6: Option, diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index edd3d10a0ce..170d901652c 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -18,15 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ multiaddr::Protocol, - noise, ping, rendezvous, + noise, + ping, + rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, + tcp, + yamux, + Multiaddr, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index ff637aa6f49..1d0ffac3616 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -18,13 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ - identify, noise, ping, rendezvous, + identify, + noise, + ping, + rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, + tcp, + yamux, + Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index bd848238d4a..4eaa1a23ccb 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -18,13 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use futures::StreamExt; use libp2p::{ - noise, ping, rendezvous, + noise, + ping, + rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Multiaddr, + tcp, + yamux, + Multiaddr, }; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -54,8 +59,9 @@ async fn main() { .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) .build(); - // In production the external address should be the publicly facing IP address of the rendezvous point. - // This address is recorded in the registration entry by the rendezvous point. + // In production the external address should be the publicly facing IP address + // of the rendezvous point. This address is recorded in the registration + // entry by the rendezvous point. let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); swarm.add_external_address(external_address); diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a15bc1ca2d3..6f6bbe0654b 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -20,14 +20,18 @@ #![doc = include_str!("../README.md")] +use std::{error::Error, time::Duration}; + use futures::StreamExt; use libp2p::{ - identify, noise, ping, rendezvous, + identify, + noise, + ping, + rendezvous, swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, + tcp, + yamux, }; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -36,8 +40,8 @@ async fn main() -> Result<(), Box> { .with_env_filter(EnvFilter::from_default_env()) .try_init(); - // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which is - // used as the rendezvous point by the other peer examples. + // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which + // is used as the rendezvous point by the other peer examples. let keypair = libp2p::identity::Keypair::ed25519_from_bytes([0; 32]).unwrap(); let mut swarm = libp2p::SwarmBuilder::with_existing_identity(keypair) diff --git a/examples/stream/src/main.rs b/examples/stream/src/main.rs index 872ab8c3b98..924a3fe8728 100644 --- a/examples/stream/src/main.rs +++ b/examples/stream/src/main.rs @@ -43,13 +43,15 @@ async fn main() -> Result<()> { // Deal with incoming streams. // Spawning a dedicated task is just one way of doing this. - // libp2p doesn't care how you handle incoming streams but you _must_ handle them somehow. - // To mitigate DoS attacks, libp2p will internally drop incoming streams if your application cannot keep up processing them. + // libp2p doesn't care how you handle incoming streams but you _must_ handle + // them somehow. To mitigate DoS attacks, libp2p will internally drop + // incoming streams if your application cannot keep up processing them. tokio::spawn(async move { - // This loop handles incoming streams _sequentially_ but that doesn't have to be the case. - // You can also spawn a dedicated task per stream if you want to. - // Be aware that this breaks backpressure though as spawning new tasks is equivalent to an unbounded buffer. - // Each task needs memory meaning an aggressive remote peer may force you OOM this way. + // This loop handles incoming streams _sequentially_ but that doesn't have to be + // the case. You can also spawn a dedicated task per stream if you want + // to. Be aware that this breaks backpressure though as spawning new + // tasks is equivalent to an unbounded buffer. Each task needs memory + // meaning an aggressive remote peer may force you OOM this way. while let Some((peer, stream)) = incoming_streams.next().await { match echo(stream).await { @@ -89,7 +91,8 @@ async fn main() -> Result<()> { } } -/// A very simple, `async fn`-based connection handler for our custom echo protocol. +/// A very simple, `async fn`-based connection handler for our custom echo +/// protocol. async fn connection_handler(peer: PeerId, mut control: stream::Control) { loop { tokio::time::sleep(Duration::from_secs(1)).await; // Wait a second between echos. @@ -102,7 +105,8 @@ async fn connection_handler(peer: PeerId, mut control: stream::Control) { } Err(error) => { // Other errors may be temporary. - // In production, something like an exponential backoff / circuit-breaker may be more appropriate. + // In production, something like an exponential backoff / circuit-breaker may be + // more appropriate. tracing::debug!(%peer, %error); continue; } diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index fd0764990d1..73f1a9460e4 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -20,9 +20,10 @@ #![doc = include_str!("../README.md")] +use std::error::Error; + use futures::prelude::*; use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; -use std::error::Error; use tracing_subscriber::EnvFilter; #[tokio::main] @@ -64,7 +65,10 @@ async fn main() -> Result<(), Box> { break; } SwarmEvent::Behaviour(upnp::Event::NonRoutableGateway) => { - println!("Gateway is not exposed directly to the public Internet, i.e. it itself has a private IP address."); + println!( + "Gateway is not exposed directly to the public Internet, i.e. it itself has a \ + private IP address." + ); break; } _ => {} diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index 02229e16262..08478a3d98c 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -18,24 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::HashMap, + fmt, + io, + net::{IpAddr, Ipv4Addr}, + str::FromStr, + time::Duration, +}; + use anyhow::{Context, Result}; use either::Either; use futures::stream::StreamExt; -use libp2p::core::transport::ListenerId; -use libp2p::swarm::dial_opts::DialOpts; -use libp2p::swarm::ConnectionId; use libp2p::{ - core::multiaddr::{Multiaddr, Protocol}, - dcutr, identify, noise, ping, relay, - swarm::{NetworkBehaviour, SwarmEvent}, - tcp, yamux, Swarm, + core::{ + multiaddr::{Multiaddr, Protocol}, + transport::ListenerId, + }, + dcutr, + identify, + noise, + ping, + relay, + swarm::{dial_opts::DialOpts, ConnectionId, NetworkBehaviour, SwarmEvent}, + tcp, + yamux, + Swarm, }; use redis::AsyncCommands; -use std::collections::HashMap; -use std::net::{IpAddr, Ipv4Addr}; -use std::str::FromStr; -use std::time::Duration; -use std::{fmt, io}; /// The redis key we push the relay's TCP listen address to. const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; @@ -47,7 +57,10 @@ const LISTEN_CLIENT_PEER_ID: &str = "LISTEN_CLIENT_PEER_ID"; #[tokio::main] async fn main() -> Result<()> { env_logger::builder() - .parse_filters("debug,netlink_proto=warn,rustls=warn,multistream_select=warn,libp2p_core::transport::choice=off,libp2p_swarm::connection=warn,libp2p_quic=trace") + .parse_filters( + "debug,netlink_proto=warn,rustls=warn,multistream_select=warn,\ + libp2p_core::transport::choice=off,libp2p_swarm::connection=warn,libp2p_quic=trace", + ) .parse_default_env() .init(); @@ -214,7 +227,8 @@ async fn client_listen_on_transport( let mut listen_addresses = 0; - // We should have at least two listen addresses, one for localhost and the actual interface. + // We should have at least two listen addresses, one for localhost and the + // actual interface. while listen_addresses < 2 { if let SwarmEvent::NewListenAddr { listener_id, diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 98f3e5c5636..8ee12c8124a 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -10,7 +10,7 @@ ## 0.2.8 -- Bump `ring` to `0.17.5`. +- Bump `ring` to `0.17.5. See [PR 4779](https://github.com/libp2p/rust-libp2p/pull/4779). ## 0.2.7 diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 922675097df..5ba170ec7fe 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -20,21 +20,23 @@ //! ECDSA keys with secp256r1 curve support. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; +use std::convert::Infallible; + use p256::{ ecdsa::{ signature::{Signer, Verifier}, - Signature, SigningKey, VerifyingKey, + Signature, + SigningKey, + VerifyingKey, }, EncodedPoint, }; use sec1::{DecodeEcPrivateKey, EncodeEcPrivateKey}; -use std::convert::Infallible; use zeroize::Zeroize; +use super::error::DecodingError; + /// An ECDSA keypair generated using `secp256r1` curve. #[derive(Clone)] pub struct Keypair { @@ -99,19 +101,22 @@ impl SecretKey { SecretKey(SigningKey::random(&mut rand::thread_rng())) } - /// Sign a message with this secret key, producing a DER-encoded ECDSA signature. + /// Sign a message with this secret key, producing a DER-encoded ECDSA + /// signature. pub fn sign(&self, msg: &[u8]) -> Vec { let signature: p256::ecdsa::DerSignature = self.0.sign(msg); signature.as_bytes().to_owned() } - /// Convert a secret key into a byte buffer containing raw scalar of the key. + /// Convert a secret key into a byte buffer containing raw scalar of the + /// key. pub fn to_bytes(&self) -> Vec { self.0.to_bytes().to_vec() } - /// Try to parse a secret key from a byte buffer containing raw scalar of the key. + /// Try to parse a secret key from a byte buffer containing raw scalar of + /// the key. pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { SigningKey::from_bytes(buf.as_ref().into()) .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) @@ -127,7 +132,8 @@ impl SecretKey { .to_vec() } - /// Try to decode a secret key from a DER-encoded byte buffer, zeroize the buffer on success. + /// Try to decode a secret key from a DER-encoded byte buffer, zeroize the + /// buffer on success. pub(crate) fn try_decode_der(buf: &mut [u8]) -> Result { match SigningKey::from_sec1_der(buf) { Ok(key) => { @@ -158,7 +164,8 @@ impl PublicKey { self.0.verify(msg, &sig).is_ok() } - /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression. + /// Try to parse a public key from a byte buffer containing raw components + /// of a key with or without compression. pub fn try_from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; @@ -168,18 +175,21 @@ impl PublicKey { .map(PublicKey) } - /// Convert a public key into a byte buffer containing raw components of the key without compression. + /// Convert a public key into a byte buffer containing raw components of the + /// key without compression. pub fn to_bytes(&self) -> Vec { self.0.to_encoded_point(false).as_bytes().to_owned() } - /// Encode a public key into a DER encoded byte buffer as defined by SEC1 standard. + /// Encode a public key into a DER encoded byte buffer as defined by SEC1 + /// standard. pub fn encode_der(&self) -> Vec { let buf = self.to_bytes(); Self::add_asn1_header(&buf) } - /// Try to decode a public key from a DER encoded byte buffer as defined by SEC1 standard. + /// Try to decode a public key from a DER encoded byte buffer as defined by + /// SEC1 standard. pub fn try_decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { DecodingError::failed_to_parse::( diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index d77c44547d6..e4037a3d5f4 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -20,13 +20,13 @@ //! Ed25519 keys. -use super::error::DecodingError; -use core::cmp; -use core::fmt; -use core::hash; +use core::{cmp, fmt, hash}; + use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; use zeroize::Zeroize; +use super::error::DecodingError; + /// An Ed25519 keypair. #[derive(Clone)] pub struct Keypair(ed25519::SigningKey); @@ -48,7 +48,8 @@ impl Keypair { /// Try to parse a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) /// produced by [`Keypair::to_bytes`], zeroing the input on success. /// - /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. + /// Note that this binary format is the same as `ed25519_dalek`'s and + /// `ed25519_zebra`'s. pub fn try_from_bytes(kp: &mut [u8]) -> Result { let bytes = <[u8; 64]>::try_from(&*kp) .map_err(|e| DecodingError::failed_to_parse("Ed25519 keypair", e))?; @@ -152,7 +153,8 @@ impl PublicKey { self.0.to_bytes() } - /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`. + /// Try to parse a public key from a byte array containing the actual key as + /// produced by `to_bytes`. pub fn try_from_bytes(k: &[u8]) -> Result { let k = <[u8; 32]>::try_from(k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?; @@ -206,9 +208,10 @@ impl SecretKey { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() } diff --git a/identity/src/error.rs b/identity/src/error.rs index 71cd78fe1ea..4a21ec74ebe 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -20,8 +20,7 @@ //! Errors during identity key operations. -use std::error::Error; -use std::fmt; +use std::{error::Error, fmt}; use crate::KeyType; @@ -136,7 +135,8 @@ impl Error for SigningError { } } -/// Error produced when failing to convert [`Keypair`](crate::Keypair) to a more concrete keypair. +/// Error produced when failing to convert [`Keypair`](crate::Keypair) to a more +/// concrete keypair. #[derive(Debug)] pub struct OtherVariantError { actual: KeyType, diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index f1e8a7c2142..d22d6d3ec87 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -24,40 +24,40 @@ feature = "ed25519", feature = "rsa" ))] -#[cfg(feature = "ed25519")] -use crate::ed25519; +use quick_protobuf::{BytesReader, Writer}; + +#[cfg(feature = "ecdsa")] +use crate::ecdsa; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::error::OtherVariantError; -use crate::error::{DecodingError, SigningError}; +#[cfg(feature = "ed25519")] +use crate::ed25519; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use crate::proto; +use crate::error::OtherVariantError; #[cfg(any( feature = "ecdsa", feature = "secp256k1", feature = "ed25519", feature = "rsa" ))] -use quick_protobuf::{BytesReader, Writer}; - +use crate::proto; #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] use crate::rsa; - #[cfg(feature = "secp256k1")] use crate::secp256k1; - -#[cfg(feature = "ecdsa")] -use crate::ecdsa; -use crate::KeyType; +use crate::{ + error::{DecodingError, SigningError}, + KeyType, +}; /// Identity keypair of a node. /// @@ -75,7 +75,6 @@ use crate::KeyType; /// let mut bytes = std::fs::read("private.pk8").unwrap(); /// let keypair = Keypair::rsa_from_pkcs8(&mut bytes); /// ``` -/// #[derive(Debug, Clone)] pub struct Keypair { keypair: KeyPairInner, @@ -154,8 +153,8 @@ impl Keypair { }) } - /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey - /// structure as defined in [RFC5915]. + /// Decode a keypair from a DER-encoded Secp256k1 secret key in an + /// ECPrivateKey structure as defined in [RFC5915]. /// /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[cfg(feature = "secp256k1")] @@ -258,7 +257,8 @@ impl Keypair { unreachable!() } - /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. + /// Decode a private key from a protobuf structure and parse it as a + /// [`Keypair`]. #[allow(unused_variables)] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { #[cfg(any( @@ -341,7 +341,8 @@ impl Keypair { } } - /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. + /// Deterministically derive a new secret from this [`Keypair`], taking into + /// account the provided domain. /// /// This works for all key types except RSA where it returns `None`. /// @@ -352,10 +353,11 @@ impl Keypair { /// # use libp2p_identity as identity; /// let key = identity::Keypair::generate_ed25519(); /// - /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); + /// let new_key = key + /// .derive_secret(b"my encryption key") + /// .expect("can derive secret for ed25519"); /// # } /// ``` - /// #[cfg(any( feature = "ecdsa", feature = "secp256k1", @@ -904,9 +906,10 @@ mod tests { #[test] fn public_key_implements_hash() { - use crate::PublicKey; use std::hash::Hash; + use crate::PublicKey; + fn assert_implements_hash() {} assert_implements_hash::(); @@ -914,9 +917,10 @@ mod tests { #[test] fn public_key_implements_ord() { - use crate::PublicKey; use std::cmp::Ord; + use crate::PublicKey; + fn assert_implements_ord() {} assert_implements_ord::(); diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 4f4313e8f17..68518a7547d 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -31,7 +31,8 @@ //! Instead, loading fixed keys must use the standard, thus more portable //! binary representation of the specific key type //! (e.g. [ed25519 binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5)). -//! All key types have functions to enable conversion to/from their binary representations. +//! All key types have functions to enable conversion to/from their binary +//! representations. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(unreachable_pub)] diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 8ae6d99ae32..293dbfa1f86 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -18,17 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, str::FromStr}; + #[cfg(feature = "rand")] use rand::Rng; use sha2::Digest as _; -use std::{fmt, str::FromStr}; use thiserror::Error; /// Local type-alias for multihash. /// /// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`. -/// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses. -/// Given that this appears in our type-signature, using a "common" number here makes us more compatible. +/// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem +/// typically uses. Given that this appears in our type-signature, using a +/// "common" number here makes us more compatible. type Multihash = multihash::Multihash<64>; #[cfg(feature = "serde")] @@ -43,8 +45,8 @@ const MULTIHASH_SHA256_CODE: u64 = 0x12; /// Identifier of a peer of the network. /// -/// The data is a CIDv0 compatible multihash of the protobuf encoded public key of the peer -/// as specified in [specs/peer-ids](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md). +/// The data is a CIDv0 compatible multihash of the protobuf encoded public key +/// of the peer as specified in [specs/peer-ids](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md). #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct PeerId { multihash: Multihash, diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 5eb78a4af75..bf2954f0bb1 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -20,15 +20,24 @@ //! RSA keys. -use super::error::*; -use asn1_der::typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}; -use asn1_der::{Asn1DerError, Asn1DerErrorVariant, DerObject, Sink, VecBacking}; -use ring::rand::SystemRandom; -use ring::signature::KeyPair; -use ring::signature::{self, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}; use std::{fmt, sync::Arc}; + +use asn1_der::{ + typed::{DerDecodable, DerEncodable, DerTypeView, Sequence}, + Asn1DerError, + Asn1DerErrorVariant, + DerObject, + Sink, + VecBacking, +}; +use ring::{ + rand::SystemRandom, + signature::{self, KeyPair, RsaKeyPair, RSA_PKCS1_2048_8192_SHA256, RSA_PKCS1_SHA256}, +}; use zeroize::Zeroize; +use super::error::*; + /// An RSA keypair. #[derive(Clone)] pub struct Keypair(Arc); @@ -42,8 +51,8 @@ impl std::fmt::Debug for Keypair { } impl Keypair { - /// Decode an RSA keypair from a DER-encoded private key in PKCS#1 RSAPrivateKey - /// format (i.e. unencrypted) as defined in [RFC3447]. + /// Decode an RSA keypair from a DER-encoded private key in PKCS#1 + /// RSAPrivateKey format (i.e. unencrypted) as defined in [RFC3447]. /// /// [RFC3447]: https://tools.ietf.org/html/rfc3447#appendix-A.1.2 pub fn try_decode_pkcs1(der: &mut [u8]) -> Result { @@ -53,8 +62,8 @@ impl Keypair { Ok(Keypair(Arc::new(kp))) } - /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo - /// format (i.e. unencrypted) as defined in [RFC5208]. + /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 + /// PrivateKeyInfo format (i.e. unencrypted) as defined in [RFC5208]. /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 pub fn try_decode_pkcs8(der: &mut [u8]) -> Result { @@ -100,8 +109,8 @@ impl PublicKey { self.0.clone() } - /// Encode the RSA public key in DER as a X.509 SubjectPublicKeyInfo structure, - /// as defined in [RFC5280]. + /// Encode the RSA public key in DER as a X.509 SubjectPublicKeyInfo + /// structure, as defined in [RFC5280]. /// /// [RFC5280]: https://tools.ietf.org/html/rfc5280#section-4.1 pub fn encode_x509(&self) -> Vec { @@ -315,9 +324,10 @@ impl DerDecodable<'_> for Asn1SubjectPublicKeyInfo { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index a6e9e923268..d8e2a8f795c 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -20,15 +20,15 @@ //! Secp256k1 keys. -use super::error::DecodingError; +use core::{cmp, fmt, hash}; + use asn1_der::typed::{DerDecodable, Sequence}; -use core::cmp; -use core::fmt; -use core::hash; use libsecp256k1::{Message, Signature}; use sha2::{Digest as ShaDigestTrait, Sha256}; use zeroize::Zeroize; +use super::error::DecodingError; + /// A Secp256k1 keypair. #[derive(Clone)] pub struct Keypair { @@ -133,7 +133,8 @@ impl SecretKey { pub fn sign(&self, msg: &[u8]) -> Vec { let generic_array = Sha256::digest(msg); - // FIXME: Once `generic-array` hits 1.0, we should be able to just use `Into` here. + // FIXME: Once `generic-array` hits 1.0, we should be able to just use `Into` + // here. let mut array = [0u8; 32]; array.copy_from_slice(generic_array.as_slice()); @@ -196,15 +197,16 @@ impl PublicKey { self.verify_hash(Sha256::digest(msg).as_ref(), sig) } - /// Verify the Secp256k1 DER-encoded signature on a raw 256-bit message using the public key. + /// Verify the Secp256k1 DER-encoded signature on a raw 256-bit message + /// using the public key. pub fn verify_hash(&self, msg: &[u8], sig: &[u8]) -> bool { Message::parse_slice(msg) .and_then(|m| Signature::parse_der(sig).map(|s| libsecp256k1::verify(&m, &s, &self.0))) .unwrap_or(false) } - /// Convert the public key to a byte buffer in compressed form, i.e. with one coordinate - /// represented by a single bit. + /// Convert the public key to a byte buffer in compressed form, i.e. with + /// one coordinate represented by a single bit. pub fn to_bytes(&self) -> [u8; 33] { self.0.serialize_compressed() } diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index df36f8e5baf..bfd54119eab 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -1,7 +1,6 @@ // Native re-exports #[cfg(not(target_arch = "wasm32"))] pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient}; - // Wasm re-exports #[cfg(target_arch = "wasm32")] pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient}; @@ -11,11 +10,15 @@ pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use futures::future::BoxFuture; - use futures::FutureExt; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, tcp, tls, yamux}; + use futures::{future::BoxFuture, FutureExt}; + use libp2p::{ + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + tcp, + tls, + yamux, + }; use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; @@ -186,15 +189,22 @@ pub(crate) mod native { #[cfg(target_arch = "wasm32")] pub(crate) mod wasm { + use std::time::Duration; + use anyhow::{bail, Context, Result}; use futures::future::{BoxFuture, FutureExt}; - use libp2p::core::upgrade::Version; - use libp2p::identity::Keypair; - use libp2p::swarm::{NetworkBehaviour, Swarm}; - use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _}; + use libp2p::{ + core::upgrade::Version, + identity::Keypair, + noise, + swarm::{NetworkBehaviour, Swarm}, + websocket_websys, + webtransport_websys, + yamux, + Transport as _, + }; use libp2p_mplex as mplex; use libp2p_webrtc_websys as webrtc_websys; - use std::time::Duration; use crate::{BlpopRequest, Muxer, SecProtocol, Transport}; diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index 0d697a0e2a3..a2176bc04f0 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -1,26 +1,28 @@ #![allow(non_upper_case_globals)] -use std::future::IntoFuture; -use std::process::Stdio; -use std::time::Duration; +use std::{future::IntoFuture, process::Stdio, time::Duration}; use anyhow::{bail, Context, Result}; -use axum::http::{header, Uri}; -use axum::response::{Html, IntoResponse, Response}; -use axum::routing::get; -use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; +use axum::{ + extract::State, + http::{header, StatusCode, Uri}, + response::{Html, IntoResponse, Response}, + routing::{get, post}, + Json, + Router, +}; +use interop_tests::{BlpopRequest, Report}; use redis::{AsyncCommands, Client}; use thirtyfour::prelude::*; -use tokio::io::{AsyncBufReadExt, BufReader}; -use tokio::net::TcpListener; -use tokio::process::Child; -use tokio::sync::mpsc; -use tower_http::cors::CorsLayer; -use tower_http::trace::TraceLayer; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + net::TcpListener, + process::Child, + sync::mpsc, +}; +use tower_http::{cors::CorsLayer, trace::TraceLayer}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; -use interop_tests::{BlpopRequest, Report}; - mod config; const BIND_ADDR: &str = "127.0.0.1:8080"; diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index 0154bec51a4..14b3ef8b622 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -1,11 +1,14 @@ -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; use anyhow::{bail, Context, Result}; use futures::{FutureExt, StreamExt}; -use libp2p::identity::Keypair; -use libp2p::swarm::SwarmEvent; -use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr}; +use libp2p::{ + identify, + identity::Keypair, + ping, + swarm::{NetworkBehaviour, SwarmEvent}, + Multiaddr, +}; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; @@ -60,8 +63,8 @@ pub async fn run_test( let maybe_id = None; // Run a ping interop test. Based on `is_dialer`, either dial the address - // retrieved via `listenAddr` key over the redis connection. Or wait to be pinged and have - // `dialerDone` key ready on the redis connection. + // retrieved via `listenAddr` key over the redis connection. Or wait to be + // pinged and have `dialerDone` key ready on the redis connection. match is_dialer { true => { let result: Vec = redis_client diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs index 8931c5c4166..f51c393e04c 100644 --- a/libp2p/src/bandwidth.rs +++ b/libp2p/src/bandwidth.rs @@ -20,13 +20,6 @@ #![allow(deprecated)] -use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; - -use futures::{ - io::{IoSlice, IoSliceMut}, - prelude::*, - ready, -}; use std::{ convert::TryFrom as _, io, @@ -38,8 +31,16 @@ use std::{ task::{Context, Poll}, }; -/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened -/// streams. +use futures::{ + io::{IoSlice, IoSliceMut}, + prelude::*, + ready, +}; + +use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; + +/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go +/// through all the opened streams. #[derive(Clone)] #[pin_project::pin_project] pub(crate) struct BandwidthLogging { @@ -103,9 +104,8 @@ where } /// Allows obtaining the average bandwidth of the streams. -#[deprecated( - note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." -)] +#[deprecated(note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or \ + `libp2p_metrics::BandwidthTransport` instead.")] pub struct BandwidthSinks { inbound: AtomicU64, outbound: AtomicU64, @@ -120,24 +120,29 @@ impl BandwidthSinks { }) } - /// Returns the total number of bytes that have been downloaded on all the streams. + /// Returns the total number of bytes that have been downloaded on all the + /// streams. /// - /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > **Note**: This method is by design subject to race conditions. The + /// > returned value should + /// > only ever be used for statistics purposes. pub fn total_inbound(&self) -> u64 { self.inbound.load(Ordering::Relaxed) } - /// Returns the total number of bytes that have been uploaded on all the streams. + /// Returns the total number of bytes that have been uploaded on all the + /// streams. /// - /// > **Note**: This method is by design subject to race conditions. The returned value should - /// > only ever be used for statistics purposes. + /// > **Note**: This method is by design subject to race conditions. The + /// > returned value should + /// > only ever be used for statistics purposes. pub fn total_outbound(&self) -> u64 { self.outbound.load(Ordering::Relaxed) } } -/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that goes through it. +/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that +/// goes through it. #[pin_project::pin_project] pub(crate) struct InstrumentedStream { #[pin] diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs index de003314cca..74aa6b558d2 100644 --- a/libp2p/src/builder.rs +++ b/libp2p/src/builder.rs @@ -33,31 +33,31 @@ mod select_security; /// # relay: libp2p_relay::client::Behaviour, /// # } /// -/// let swarm = SwarmBuilder::with_new_identity() -/// .with_tokio() -/// .with_tcp( -/// Default::default(), -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_quic() -/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? -/// .with_dns()? -/// .with_websocket( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// ) -/// .await? -/// .with_relay_client( -/// (libp2p_tls::Config::new, libp2p_noise::Config::new), -/// libp2p_yamux::Config::default, -/// )? -/// .with_behaviour(|_key, relay| MyBehaviour { relay })? -/// .with_swarm_config(|cfg| { -/// // Edit cfg here. -/// cfg -/// }) -/// .build(); +/// let swarm = SwarmBuilder::with_new_identity() +/// .with_tokio() +/// .with_tcp( +/// Default::default(), +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_quic() +/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? +/// .with_dns()? +/// .with_websocket( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// ) +/// .await? +/// .with_relay_client( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_behaviour(|_key, relay| MyBehaviour { relay })? +/// .with_swarm_config(|cfg| { +/// // Edit cfg here. +/// cfg +/// }) +/// .build(); /// # /// # Ok(()) /// # } @@ -70,11 +70,12 @@ pub struct SwarmBuilder { #[cfg(test)] mod tests { - use crate::SwarmBuilder; use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport}; use libp2p_identity::PeerId; use libp2p_swarm::NetworkBehaviour; + use crate::SwarmBuilder; + #[test] #[cfg(all( feature = "tokio", @@ -460,7 +461,8 @@ mod tests { .build(); } - /// Showcases how to provide custom transports unknown to the libp2p crate, e.g. WebRTC. + /// Showcases how to provide custom transports unknown to the libp2p crate, + /// e.g. WebRTC. #[test] #[cfg(feature = "tokio")] fn other_transport() -> Result<(), Box> { diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs index c9679a46767..681615d73dd 100644 --- a/libp2p/src/builder/phase.rs +++ b/libp2p/src/builder/phase.rs @@ -19,6 +19,8 @@ use bandwidth_metrics::*; use behaviour::*; use build::*; use dns::*; +use libp2p_core::{muxing::StreamMuxerBox, Transport}; +use libp2p_identity::Keypair; use other_transport::*; use provider::*; use quic::*; @@ -27,12 +29,11 @@ use swarm::*; use tcp::*; use websocket::*; -use super::select_muxer::SelectMuxerUpgrade; -use super::select_security::SelectSecurityUpgrade; -use super::SwarmBuilder; - -use libp2p_core::{muxing::StreamMuxerBox, Transport}; -use libp2p_identity::Keypair; +use super::{ + select_muxer::SelectMuxerUpgrade, + select_security::SelectSecurityUpgrade, + SwarmBuilder, +}; #[allow(unreachable_pub)] pub trait IntoSecurityUpgrade { diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs index cee9498fcaa..f24df5f3df5 100644 --- a/libp2p/src/builder/phase/bandwidth_logging.rs +++ b/libp2p/src/builder/phase/bandwidth_logging.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthLoggingPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs index 52daa731ddd..ddd292c140e 100644 --- a/libp2p/src/builder/phase/bandwidth_metrics.rs +++ b/libp2p/src/builder/phase/bandwidth_metrics.rs @@ -1,10 +1,9 @@ +use std::{marker::PhantomData, sync::Arc}; + use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; -use crate::transport_ext::TransportExt; -use crate::SwarmBuilder; -use std::marker::PhantomData; -use std::sync::Arc; +use crate::{transport_ext::TransportExt, SwarmBuilder}; pub struct BandwidthMetricsPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs index 939db935c80..22f8c617051 100644 --- a/libp2p/src/builder/phase/behaviour.rs +++ b/libp2p/src/builder/phase/behaviour.rs @@ -1,8 +1,9 @@ +use std::{convert::Infallible, marker::PhantomData}; + +use libp2p_swarm::NetworkBehaviour; + use super::*; use crate::SwarmBuilder; -use libp2p_swarm::NetworkBehaviour; -use std::convert::Infallible; -use std::marker::PhantomData; pub struct BehaviourPhase { pub(crate) relay_behaviour: R, diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs index 80a83994eeb..f9621da756b 100644 --- a/libp2p/src/builder/phase/build.rs +++ b/libp2p/src/builder/phase/build.rs @@ -1,9 +1,9 @@ +use libp2p_core::Transport; +use libp2p_swarm::Swarm; + #[allow(unused_imports)] use super::*; - use crate::SwarmBuilder; -use libp2p_core::Transport; -use libp2p_swarm::Swarm; pub struct BuildPhase { pub(crate) behaviour: B, diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs index 638064d58bb..83653836a34 100644 --- a/libp2p/src/builder/phase/dns.rs +++ b/libp2p/src/builder/phase/dns.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct DnsPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs index ceb86819dc7..e2511267cd3 100644 --- a/libp2p/src/builder/phase/identity.rs +++ b/libp2p/src/builder/phase/identity.rs @@ -1,6 +1,7 @@ +use std::marker::PhantomData; + use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; pub struct IdentityPhase {} diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs index e04621b2e3f..c3b951c8c75 100644 --- a/libp2p/src/builder/phase/other_transport.rs +++ b/libp2p/src/builder/phase/other_transport.rs @@ -1,20 +1,19 @@ -use std::convert::Infallible; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{convert::Infallible, marker::PhantomData, sync::Arc}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::Transport; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + Transport, +}; #[cfg(feature = "relay")] use libp2p_core::{Negotiated, UpgradeInfo}; #[cfg(feature = "relay")] use libp2p_identity::PeerId; +use super::*; #[allow(deprecated)] use crate::bandwidth::BandwidthSinks; use crate::SwarmBuilder; -use super::*; - pub struct OtherTransportPhase { pub(crate) transport: T, } diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs index 2a9154cda74..e416cac05f8 100644 --- a/libp2p/src/builder/phase/provider.rs +++ b/libp2p/src/builder/phase/provider.rs @@ -1,13 +1,16 @@ +use std::marker::PhantomData; + #[allow(unused_imports)] use super::*; use crate::SwarmBuilder; -use std::marker::PhantomData; /// Represents the phase where a provider is not yet specified. -/// This is a marker type used in the type-state pattern to ensure compile-time checks of the builder's state. +/// This is a marker type used in the type-state pattern to ensure compile-time +/// checks of the builder's state. pub enum NoProviderSpecified {} -// Define enums for each of the possible runtime environments. These are used as markers in the type-state pattern, -// allowing compile-time checks for the appropriate environment configuration. +// Define enums for each of the possible runtime environments. These are used as +// markers in the type-state pattern, allowing compile-time checks for the +// appropriate environment configuration. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] /// Represents the AsyncStd runtime environment. @@ -21,12 +24,14 @@ pub enum Tokio {} /// Represents the WasmBindgen environment for WebAssembly. pub enum WasmBindgen {} -/// Represents a phase in the SwarmBuilder where a provider has been chosen but not yet specified. +/// Represents a phase in the SwarmBuilder where a provider has been chosen but +/// not yet specified. pub struct ProviderPhase {} impl SwarmBuilder { /// Configures the SwarmBuilder to use the AsyncStd runtime. - /// This method is only available when compiling for non-Wasm targets with the `async-std` feature enabled. + /// This method is only available when compiling for non-Wasm targets with + /// the `async-std` feature enabled. #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] pub fn with_async_std(self) -> SwarmBuilder { SwarmBuilder { @@ -37,7 +42,8 @@ impl SwarmBuilder { } /// Configures the SwarmBuilder to use the Tokio runtime. - /// This method is only available when compiling for non-Wasm targets with the `tokio` feature enabled + /// This method is only available when compiling for non-Wasm targets with + /// the `tokio` feature enabled #[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] pub fn with_tokio(self) -> SwarmBuilder { SwarmBuilder { diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs index e030e9493bb..1b6329c1095 100644 --- a/libp2p/src/builder/phase/quic.rs +++ b/libp2p/src/builder/phase/quic.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::{marker::PhantomData, sync::Arc}; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::StreamMuxer; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -8,7 +8,9 @@ use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; all(not(target_arch = "wasm32"), feature = "websocket") ))] use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; -use std::{marker::PhantomData, sync::Arc}; + +use super::*; +use crate::SwarmBuilder; pub struct QuicPhase { pub(crate) transport: T, diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs index f8305f9d246..d8a6047f625 100644 --- a/libp2p/src/builder/phase/relay.rs +++ b/libp2p/src/builder/phase/relay.rs @@ -10,9 +10,8 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, Upgr #[cfg(feature = "relay")] use libp2p_identity::PeerId; -use crate::SwarmBuilder; - use super::*; +use crate::SwarmBuilder; pub struct RelayPhase { pub(crate) transport: T, @@ -22,9 +21,10 @@ pub struct RelayPhase { impl SwarmBuilder> { /// Adds a relay client transport. /// - /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, - /// i.e. they take the function themselves (without the invocation via `()`), not the - /// result of the function invocation. See example below. + /// Note that both `security_upgrade` and `multiplexer_upgrade` take + /// function pointers, i.e. they take the function themselves (without + /// the invocation via `()`), not the result of the function invocation. + /// See example below. /// /// ``` rust /// # use libp2p::SwarmBuilder; diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs index 4b7cf29b3d2..ec2d068f21e 100644 --- a/libp2p/src/builder/phase/tcp.rs +++ b/libp2p/src/builder/phase/tcp.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all( not(target_arch = "wasm32"), any(feature = "tcp", feature = "websocket") @@ -12,9 +12,14 @@ use libp2p_core::Transport; any(feature = "tcp", feature = "websocket") ))] use libp2p_core::{ - upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo, + upgrade::InboundConnectionUpgrade, + upgrade::OutboundConnectionUpgrade, + Negotiated, + UpgradeInfo, }; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct TcpPhase {} diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs index 68a85bb77b7..a23c6eca854 100644 --- a/libp2p/src/builder/phase/websocket.rs +++ b/libp2p/src/builder/phase/websocket.rs @@ -1,5 +1,5 @@ -use super::*; -use crate::SwarmBuilder; +use std::marker::PhantomData; + #[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; @@ -15,7 +15,9 @@ use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; feature = "relay" ))] use libp2p_identity::PeerId; -use std::marker::PhantomData; + +use super::*; +use crate::SwarmBuilder; pub struct WebsocketPhase { pub(crate) transport: T, @@ -126,8 +128,8 @@ impl_websocket_builder!( impl_websocket_builder!( "tokio", super::provider::Tokio, - // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent - // with above AsyncStd construction. + // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be + // consistent with above AsyncStd construction. futures::future::ready(libp2p_dns::tokio::Transport::system( libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default()) )), diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs index c93ba9d9991..93ae0547269 100644 --- a/libp2p/src/builder/select_muxer.rs +++ b/libp2p/src/builder/select_muxer.rs @@ -20,12 +20,15 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; use futures::future; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use std::iter::{Chain, Map}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; #[derive(Debug, Clone)] pub struct SelectMuxerUpgrade(A, B); diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs index d6c7f8c172f..32c19c310bc 100644 --- a/libp2p/src/builder/select_security.rs +++ b/libp2p/src/builder/select_security.rs @@ -21,16 +21,18 @@ #![allow(unreachable_pub)] +use std::iter::{Chain, Map}; + use either::Either; -use futures::future::MapOk; -use futures::{future, TryFutureExt}; -use libp2p_core::either::EitherFuture; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use futures::{future, future::MapOk, TryFutureExt}; +use libp2p_core::{ + either::EitherFuture, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use libp2p_identity::PeerId; -use std::iter::{Chain, Map}; -/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either -/// sub-upgrade. +/// Upgrade that combines two upgrades into one. Supports all the protocols +/// supported by either sub-upgrade. /// /// The protocols supported by the first element have a higher priority. #[derive(Debug, Clone)] diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 58f911e9445..1ec1cc530fc 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -34,11 +34,6 @@ pub use bytes; pub use futures; -#[doc(inline)] -pub use libp2p_core::multihash; -#[doc(inline)] -pub use multiaddr; - #[doc(inline)] pub use libp2p_allow_block_list as allow_block_list; #[cfg(feature = "autonat")] @@ -48,6 +43,8 @@ pub use libp2p_autonat as autonat; pub use libp2p_connection_limits as connection_limits; #[doc(inline)] pub use libp2p_core as core; +#[doc(inline)] +pub use libp2p_core::multihash; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; @@ -140,6 +137,8 @@ pub use libp2p_webtransport_websys as webtransport_websys; #[cfg(feature = "yamux")] #[doc(inline)] pub use libp2p_yamux as yamux; +#[doc(inline)] +pub use multiaddr; mod builder; mod transport_ext; @@ -149,15 +148,18 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; -pub use self::builder::SwarmBuilder; -pub use self::core::{ - transport::TransportError, - upgrade::{InboundUpgrade, OutboundUpgrade}, - Transport, -}; -pub use self::multiaddr::{multiaddr as build_multiaddr, Multiaddr}; -pub use self::swarm::Swarm; -pub use self::transport_ext::TransportExt; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; pub use libp2p_swarm::{Stream, StreamProtocol}; + +pub use self::{ + builder::SwarmBuilder, + core::{ + transport::TransportError, + upgrade::{InboundUpgrade, OutboundUpgrade}, + Transport, + }, + multiaddr::{multiaddr as build_multiaddr, Multiaddr}, + swarm::Swarm, + transport_ext::TransportExt, +}; diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs index 4f07484fc1f..667b006cd14 100644 --- a/libp2p/src/transport_ext.rs +++ b/libp2p/src/transport_ext.rs @@ -20,45 +20,43 @@ //! Provides the `TransportExt` trait. +use std::sync::Arc; + +use libp2p_identity::PeerId; + #[allow(deprecated)] use crate::bandwidth::{BandwidthLogging, BandwidthSinks}; -use crate::core::{ - muxing::{StreamMuxer, StreamMuxerBox}, - transport::Boxed, +use crate::{ + core::{ + muxing::{StreamMuxer, StreamMuxerBox}, + transport::Boxed, + }, + Transport, }; -use crate::Transport; -use libp2p_identity::PeerId; -use std::sync::Arc; -/// Trait automatically implemented on all objects that implement `Transport`. Provides some -/// additional utilities. +/// Trait automatically implemented on all objects that implement `Transport`. +/// Provides some additional utilities. pub trait TransportExt: Transport { - /// Adds a layer on the `Transport` that logs all traffic that passes through the streams - /// created by it. + /// Adds a layer on the `Transport` that logs all traffic that passes + /// through the streams created by it. /// - /// This method returns an `Arc` that can be used to retrieve the total number - /// of bytes transferred through the streams. + /// This method returns an `Arc` that can be used to + /// retrieve the total number of bytes transferred through the streams. /// /// # Example /// /// ``` - /// use libp2p_yamux as yamux; + /// use libp2p::{core::upgrade, identity, Transport, TransportExt}; /// use libp2p_noise as noise; /// use libp2p_tcp as tcp; - /// use libp2p::{ - /// core::upgrade, - /// identity, - /// TransportExt, - /// Transport, - /// }; + /// use libp2p_yamux as yamux; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// /// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) /// .upgrade(upgrade::Version::V1) /// .authenticate( - /// noise::Config::new(&id_keys) - /// .expect("Signing libp2p-noise static DH keypair failed."), + /// noise::Config::new(&id_keys).expect("Signing libp2p-noise static DH keypair failed."), /// ) /// .multiplex(yamux::Config::default()) /// .boxed(); @@ -66,9 +64,8 @@ pub trait TransportExt: Transport { /// let (transport, sinks) = transport.with_bandwidth_logging(); /// ``` #[allow(deprecated)] - #[deprecated( - note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." - )] + #[deprecated(note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or \ + `libp2p_metrics::BandwidthTransport` instead.")] fn with_bandwidth_logging(self) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) where Self: Sized + Send + Unpin + 'static, diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 0963c0ca59e..e7617cbbc41 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -20,45 +20,49 @@ //! # Hole Punching Tutorial //! -//! This tutorial shows hands-on how to overcome firewalls and NATs with libp2p's hole punching -//! mechanism. Before we get started, please read the [blog -//! post](https://blog.ipfs.io/2022-01-20-libp2p-hole-punching/) to familiarize yourself with libp2p's hole +//! This tutorial shows hands-on how to overcome firewalls and NATs with +//! libp2p's hole punching mechanism. Before we get started, please read the +//! [blog post](https://blog.ipfs.io/2022-01-20-libp2p-hole-punching/) to familiarize yourself with libp2p's hole //! punching mechanism on a conceptual level. //! -//! We will be using the [Circuit Relay](crate::relay) and the [Direct Connection -//! Upgrade through Relay (DCUtR)](crate::dcutr) protocol. +//! We will be using the [Circuit Relay](crate::relay) and the [Direct +//! Connection Upgrade through Relay (DCUtR)](crate::dcutr) protocol. //! //! You will need 3 machines for this tutorial: //! //! - A relay server: //! - Any public server will do, e.g. a cloud provider VM. //! - A listening client: -//! - Any computer connected to the internet, but not reachable from outside its own network, -//! works. -//! - This can e.g. be your friends laptop behind their router (firewall + NAT). -//! - This can e.g. be some cloud provider VM, shielded from incoming connections e.g. via -//! Linux's UFW on the same machine. -//! - Don't use a machine that is in the same network as the dialing client. (This would require -//! NAT hairpinning.) +//! - Any computer connected to the internet, but not reachable from outside +//! its own network, works. +//! - This can e.g. be your friends laptop behind their router (firewall + +//! NAT). +//! - This can e.g. be some cloud provider VM, shielded from incoming +//! connections e.g. via Linux's UFW on the same machine. +//! - Don't use a machine that is in the same network as the dialing client. +//! (This would require NAT hairpinning.) //! - A dialing client: -//! - Like the above, any computer connected to the internet, but not reachable from the outside. +//! - Like the above, any computer connected to the internet, but not +//! reachable from the outside. //! - Your local machine will likely fulfill these requirements. //! //! ## Setting up the relay server //! -//! Hole punching requires a public relay node for the two private nodes to coordinate their hole -//! punch via. For that we need a public server somewhere in the Internet. In case you don't have -//! one already, any cloud provider VM will do. +//! Hole punching requires a public relay node for the two private nodes to +//! coordinate their hole punch via. For that we need a public server somewhere +//! in the Internet. In case you don't have one already, any cloud provider VM +//! will do. //! -//! Either on the server directly, or on your local machine, compile the example relay server: +//! Either on the server directly, or on your local machine, compile the example +//! relay server: //! //! ``` bash //! ## Inside the rust-libp2p repository. //! cargo build --bin relay-server-example //! ``` //! -//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy -//! it to your server. +//! You can find the binary at `target/debug/relay-server-example`. In case you +//! built it locally, copy it to your server. //! //! On your server, start the relay server binary: //! @@ -66,9 +70,10 @@ //! ./relay-server-example --port 4001 --secret-key-seed 0 //! ``` //! -//! Now let's make sure that the server is public, in other words let's make sure one can reach it -//! through the Internet. First, either manually replace `$RELAY_SERVER_IP` in the following -//! commands or `export RELAY_SERVER_IP=ipaddr` with the appropriate relay server `ipaddr` in +//! Now let's make sure that the server is public, in other words let's make +//! sure one can reach it through the Internet. First, either manually replace +//! `$RELAY_SERVER_IP` in the following commands or `export +//! RELAY_SERVER_IP=ipaddr` with the appropriate relay server `ipaddr` in //! the dialing client and listening client. //! //! Now, from the dialing client: @@ -98,7 +103,8 @@ //! //! ``` bash //! $ libp2p-lookup direct --address /ip4/111.11.111.111/tcp/4001 -//! Lookup for peer with id PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded. +//! Lookup for peer with id +//! PeerId("12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN") succeeded. //! //! Protocol version: "/TODO/0.0.1" //! Agent version: "rust-libp2p/0.36.0" @@ -117,16 +123,16 @@ //! //! ## Setting up the listening client //! -//! Either on the listening client machine directly, or on your local machine, compile the example -//! DCUtR client: +//! Either on the listening client machine directly, or on your local machine, +//! compile the example DCUtR client: //! //! ``` bash //! ## Inside the rust-libp2p repository. //! cargo build --bin dcutr-example //! ``` //! -//! You can find the binary at `target/debug/dcutr-example`. In case you built it locally, copy -//! it to your listening client machine. +//! You can find the binary at `target/debug/dcutr-example`. In case you built +//! it locally, copy it to your listening client machine. //! //! On the listening client machine: //! @@ -142,9 +148,9 @@ //! [2022-05-11T10:38:54Z INFO client] Listening on "/ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/XXX" //! ``` //! -//! Now let's make sure that the listening client is not public, in other words let's make sure one -//! can not reach it directly through the Internet. From the dialing client test that you can not -//! connect on Layer 4 (TCP): +//! Now let's make sure that the listening client is not public, in other words +//! let's make sure one can not reach it directly through the Internet. From the +//! dialing client test that you can not connect on Layer 4 (TCP): //! //! ``` bash //! telnet $LISTENING_CLIENT_IP_OBSERVED_BY_RELAY 53160 @@ -158,17 +164,26 @@ //! //! You should see the following logs appear: //! -//! 1. The dialing client establishing a relayed connection to the listening client via the relay -//! server. Note the [`/p2p-circuit` protocol](crate::multiaddr::Protocol::P2pCircuit) in the +//! 1. The dialing client establishing a relayed connection to the listening +//! client via the relay server. Note the [`/p2p-circuit` +//! protocol](crate::multiaddr::Protocol::P2pCircuit) in the //! [`Multiaddr`](crate::Multiaddr). //! //! ``` ignore -//! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } -//! ``` +//! [2022-01-30T12:54:10Z INFO client] Established connection to +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { +//! address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/ +//! 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/ +//! 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer +//! } ``` //! //! 2. The direct connection upgrade, also known as hole punch, succeeding. -//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. +//! Reported by [`dcutr`](crate::dcutr) through +//! [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the +//! [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct +//! connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } -//! ``` +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: +//! PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: +//! Ok(2) } ``` diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index 31bf5ba3a14..e915c0ed1d9 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -55,8 +55,8 @@ //! edition = "2021" //! //! [dependencies] -//! libp2p = { version = "0.54", features = ["noise", "ping", "tcp", "tokio", "yamux"] } -//! futures = "0.3.30" +//! libp2p = { version = "0.54", features = ["noise", "ping", "tcp", +//! "tokio", "yamux"] } futures = "0.3.30" //! tokio = { version = "1.37.0", features = ["full"] } //! tracing-subscriber = { version = "0.3", features = ["env-filter"] } //! ``` @@ -72,6 +72,7 @@ //! //! ```rust //! use std::error::Error; +//! //! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] @@ -86,20 +87,23 @@ //! } //! ``` //! -//! Go ahead and build and run the above code with: `cargo run`. Nothing happening thus far. +//! Go ahead and build and run the above code with: `cargo run`. Nothing +//! happening thus far. //! //! ## Transport //! -//! Next up we need to construct a transport. Each transport in libp2p provides encrypted streams. -//! E.g. combining TCP to establish connections, NOISE to encrypt these connections and Yamux to run -//! one or more streams on a connection. Another libp2p transport is QUIC, providing encrypted -//! streams out-of-the-box. We will stick to TCP for now. Each of these implement the [`Transport`] -//! trait. +//! Next up we need to construct a transport. Each transport in libp2p provides +//! encrypted streams. E.g. combining TCP to establish connections, NOISE to +//! encrypt these connections and Yamux to run one or more streams on a +//! connection. Another libp2p transport is QUIC, providing encrypted +//! streams out-of-the-box. We will stick to TCP for now. Each of these +//! implement the [`Transport`] trait. //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -127,24 +131,28 @@ //! _what_ bytes and to _whom_ to send on the network. //! //! To make this more concrete, let's take a look at a simple implementation of -//! the [`NetworkBehaviour`] trait: the [`ping::Behaviour`](crate::ping::Behaviour). -//! As you might have guessed, similar to the good old ICMP `ping` network tool, -//! libp2p [`ping::Behaviour`](crate::ping::Behaviour) sends a ping to a peer and expects -//! to receive a pong in turn. The [`ping::Behaviour`](crate::ping::Behaviour) does not care _how_ -//! the ping and pong messages are sent on the network, whether they are sent via +//! the [`NetworkBehaviour`] trait: the +//! [`ping::Behaviour`](crate::ping::Behaviour). As you might have guessed, +//! similar to the good old ICMP `ping` network tool, +//! libp2p [`ping::Behaviour`](crate::ping::Behaviour) sends a ping to a peer +//! and expects to receive a pong in turn. The +//! [`ping::Behaviour`](crate::ping::Behaviour) does not care _how_ the ping and +//! pong messages are sent on the network, whether they are sent via //! TCP, whether they are encrypted via [noise](crate::noise) or just in -//! [plaintext](crate::plaintext). It only cares about _what_ messages and to _whom_ to sent on the -//! network. +//! [plaintext](crate::plaintext). It only cares about _what_ messages and to +//! _whom_ to sent on the network. //! //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly //! separate _how_ to send bytes from _what_ bytes and to _whom_ to send. //! -//! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: +//! With the above in mind, let's extend our example, creating a +//! [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -167,15 +175,17 @@ //! //! ## Swarm //! -//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we can build the [`Swarm`] -//! which connects the two, allowing both to make progress. Put simply, a [`Swarm`] drives both a -//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the [`NetworkBehaviour`] -//! to the [`Transport`] as well as events from the [`Transport`] to the [`NetworkBehaviour`]. +//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we can build +//! the [`Swarm`] which connects the two, allowing both to make progress. Put +//! simply, a [`Swarm`] drives both a [`Transport`] and a [`NetworkBehaviour`] +//! forward, passing commands from the [`NetworkBehaviour`] to the [`Transport`] +//! as well as events from the [`Transport`] to the [`NetworkBehaviour`]. //! //! ```rust //! use std::error::Error; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -199,18 +209,20 @@ //! //! ## Idle connection timeout //! -//! Now, for this example in particular, we need set the idle connection timeout. -//! Otherwise, the connection will be closed immediately. +//! Now, for this example in particular, we need set the idle connection +//! timeout. Otherwise, the connection will be closed immediately. //! -//! Whether you need to set this in your application too depends on your usecase. -//! Typically, connections are kept alive if they are "in use" by a certain protocol. -//! The ping protocol however is only an "auxiliary" kind of protocol. -//! Thus, without any other behaviour in place, we would not be able to observe the pings. +//! Whether you need to set this in your application too depends on your +//! usecase. Typically, connections are kept alive if they are "in use" by a +//! certain protocol. The ping protocol however is only an "auxiliary" kind of +//! protocol. Thus, without any other behaviour in place, we would not be able +//! to observe the pings. //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -226,7 +238,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! Ok(()) @@ -249,20 +263,21 @@ //! [github.com/multiformats/multiaddr](https://github.com/multiformats/multiaddr/). //! //! Let's make our local node listen on a new socket. -//! This socket is listening on multiple network interfaces at the same time. For -//! each network interface, a new listening address is created. These may change -//! over time as interfaces become available or unavailable. -//! For example, in case of our TCP transport it may (among others) listen on the -//! loopback interface (localhost) `/ip4/127.0.0.1/tcp/24915` as well as the local -//! network `/ip4/192.168.178.25/tcp/24915`. +//! This socket is listening on multiple network interfaces at the same time. +//! For each network interface, a new listening address is created. These may +//! change over time as interfaces become available or unavailable. +//! For example, in case of our TCP transport it may (among others) listen on +//! the loopback interface (localhost) `/ip4/127.0.0.1/tcp/24915` as well as the +//! local network `/ip4/192.168.178.25/tcp/24915`. //! //! In addition, if provided on the CLI, let's instruct our local node to dial a //! remote peer. //! //! ```rust //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; +//! //! use libp2p::{noise, ping, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -278,7 +293,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -305,9 +322,10 @@ //! //! ```no_run //! use std::{error::Error, time::Duration}; -//! use tracing_subscriber::EnvFilter; -//! use libp2p::{noise, ping, tcp, yamux, Multiaddr, swarm::SwarmEvent}; +//! //! use futures::prelude::*; +//! use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; +//! use tracing_subscriber::EnvFilter; //! //! #[tokio::main] //! async fn main() -> Result<(), Box> { @@ -323,7 +341,9 @@ //! yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? -//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) +//! .with_swarm_config(|cfg| { +//! cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) +//! }) //! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned @@ -378,9 +398,9 @@ //! //! Note: The [`Multiaddr`] at the end being one of the [`Multiaddr`] printed //! earlier in terminal window one. -//! Both peers have to be in the same network with which the address is associated. -//! In our case any printed addresses can be used, as both peers run on the same -//! device. +//! Both peers have to be in the same network with which the address is +//! associated. In our case any printed addresses can be used, as both peers run +//! on the same device. //! //! The two nodes will establish a connection and send each other ping and pong //! messages every 15 seconds. diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index f93cf4ffefa..0ad5617f8db 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -31,12 +31,12 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour, +//! allowed_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! allowed_peers: allow_block_list::Behaviour::default() +//! allowed_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` @@ -51,27 +51,37 @@ //! #[derive(NetworkBehaviour)] //! # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] //! struct MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour, +//! blocked_peers: allow_block_list::Behaviour, //! } //! //! # fn main() { //! let behaviour = MyBehaviour { -//! blocked_peers: allow_block_list::Behaviour::default() +//! blocked_peers: allow_block_list::Behaviour::default(), //! }; //! # } //! ``` -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + collections::{HashSet, VecDeque}, + convert::Infallible, + fmt, + task::{Context, Poll, Waker}, +}; + +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, + CloseConnection, + ConnectionDenied, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::collections::{HashSet, VecDeque}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll, Waker}; /// A [`NetworkBehaviour`] that can act as an allow or block list. #[derive(Default, Debug)] @@ -101,7 +111,8 @@ impl Behaviour { /// Allow connections to the given peer. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer + /// was already present in the set. pub fn allow_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -116,7 +127,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the + /// peer was not present in the set. pub fn disallow_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -139,7 +151,8 @@ impl Behaviour { /// /// All active connections to this peer will be closed immediately. /// - /// Returns whether the peer was newly inserted. Does nothing if the peer was already present in the set. + /// Returns whether the peer was newly inserted. Does nothing if the peer + /// was already present in the set. pub fn block_peer(&mut self, peer: PeerId) -> bool { let inserted = self.state.peers.insert(peer); if inserted { @@ -153,7 +166,8 @@ impl Behaviour { /// Unblock connections to a given peer. /// - /// Returns whether the peer was present in the set. Does nothing if the peer was not present in the set. + /// Returns whether the peer was present in the set. Does nothing if the + /// peer was not present in the set. pub fn unblock_peer(&mut self, peer: PeerId) -> bool { let removed = self.state.peers.remove(&peer); if removed { @@ -165,7 +179,8 @@ impl Behaviour { } } -/// A connection to this peer is not explicitly allowed and was thus [`denied`](ConnectionDenied). +/// A connection to this peer is not explicitly allowed and was thus +/// [`denied`](ConnectionDenied). #[derive(Debug)] pub struct NotAllowed { peer: PeerId, @@ -179,7 +194,8 @@ impl fmt::Display for NotAllowed { impl std::error::Error for NotAllowed {} -/// A connection to this peer was explicitly blocked and was thus [`denied`](ConnectionDenied). +/// A connection to this peer was explicitly blocked and was thus +/// [`denied`](ConnectionDenied). #[derive(Debug)] pub struct Blocked { peer: PeerId, @@ -294,10 +310,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; + use super::*; + #[async_std::test] async fn cannot_dial_blocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index 016a7f2cfd4..c13ddcbacfb 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -18,29 +18,45 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + convert::Infallible, + fmt, + task::{Context, Poll}, +}; + use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::{ConnectionEstablished, DialFailure, ListenFailure}, - dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, + ConnectionClosed, + ConnectionDenied, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::collections::{HashMap, HashSet}; -use std::convert::Infallible; -use std::fmt; -use std::task::{Context, Poll}; /// A [`NetworkBehaviour`] that enforces a set of [`ConnectionLimits`]. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed into the +/// behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) /// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this** -/// behaviour denied the connection. +/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and +/// respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) +/// variant contain a [`ConnectionDenied`] type that can be downcast to +/// [`Exceeded`] error if (and only if) **this** behaviour denied the +/// connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may +/// also be a different error. /// /// # Example /// @@ -53,9 +69,9 @@ use std::task::{Context, Poll}; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, -/// limits: connection_limits::Behaviour +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, +/// limits: connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -81,7 +97,8 @@ impl Behaviour { } /// Returns a mutable reference to [`ConnectionLimits`]. - /// > **Note**: A new limit will not be enforced against existing connections. + /// > **Note**: A new limit will not be enforced against existing + /// > connections. pub fn limits_mut(&mut self) -> &mut ConnectionLimits { &mut self.limits } @@ -158,32 +175,36 @@ pub struct ConnectionLimits { } impl ConnectionLimits { - /// Configures the maximum number of concurrently incoming connections being established. + /// Configures the maximum number of concurrently incoming connections being + /// established. pub fn with_max_pending_incoming(mut self, limit: Option) -> Self { self.max_pending_incoming = limit; self } - /// Configures the maximum number of concurrently outgoing connections being established. + /// Configures the maximum number of concurrently outgoing connections being + /// established. pub fn with_max_pending_outgoing(mut self, limit: Option) -> Self { self.max_pending_outgoing = limit; self } - /// Configures the maximum number of concurrent established inbound connections. + /// Configures the maximum number of concurrent established inbound + /// connections. pub fn with_max_established_incoming(mut self, limit: Option) -> Self { self.max_established_incoming = limit; self } - /// Configures the maximum number of concurrent established outbound connections. + /// Configures the maximum number of concurrent established outbound + /// connections. pub fn with_max_established_outgoing(mut self, limit: Option) -> Self { self.max_established_outgoing = limit; self } - /// Configures the maximum number of concurrent established connections (both - /// inbound and outbound). + /// Configures the maximum number of concurrent established connections + /// (both inbound and outbound). /// /// Note: This should be used in conjunction with /// [`ConnectionLimits::with_max_established_incoming`] to prevent possible @@ -193,8 +214,8 @@ impl ConnectionLimits { self } - /// Configures the maximum number of concurrent established connections per peer, - /// regardless of direction (incoming or outgoing). + /// Configures the maximum number of concurrent established connections per + /// peer, regardless of direction (incoming or outgoing). pub fn with_max_established_per_peer(mut self, limit: Option) -> Self { self.max_established_per_peer = limit; self @@ -367,14 +388,19 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { - use super::*; use libp2p_swarm::{ - behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError, - ListenError, Swarm, SwarmEvent, + behaviour::toggle::Toggle, + dial_opts::{DialOpts, PeerCondition}, + DialError, + ListenError, + Swarm, + SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use quickcheck::*; + use super::*; + #[test] fn max_outgoing() { use rand::Rng; @@ -480,13 +506,17 @@ mod tests { quickcheck(prop as fn(_)); } - /// Another sibling [`NetworkBehaviour`] implementation might deny established connections in - /// [`handle_established_outbound_connection`] or [`handle_established_inbound_connection`]. - /// [`Behaviour`] must not increase the established counters in - /// [`handle_established_outbound_connection`] or [`handle_established_inbound_connection`], but - /// in [`SwarmEvent::ConnectionEstablished`] as the connection might still be denied by a - /// sibling [`NetworkBehaviour`] in the former case. Only in the latter case - /// ([`SwarmEvent::ConnectionEstablished`]) can the connection be seen as established. + /// Another sibling [`NetworkBehaviour`] implementation might deny + /// established connections in + /// [`handle_established_outbound_connection`] or + /// [`handle_established_inbound_connection`]. [`Behaviour`] must not + /// increase the established counters in + /// [`handle_established_outbound_connection`] or + /// [`handle_established_inbound_connection`], but + /// in [`SwarmEvent::ConnectionEstablished`] as the connection might still + /// be denied by a sibling [`NetworkBehaviour`] in the former case. Only + /// in the latter case ([`SwarmEvent::ConnectionEstablished`]) can the + /// connection be seen as established. #[test] fn support_other_behaviour_denying_connection() { let mut swarm1 = Swarm::new_ephemeral(|_| { @@ -520,7 +550,8 @@ mod tests { .limits .established_inbound_connections .len(), - "swarm1 connection limit behaviour to not count denied established connection as established connection" + "swarm1 connection limit behaviour to not count denied established connection as \ + established connection" ) }); } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index e6c563b3c32..7d46b1849bd 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -1,10 +1,8 @@ +use std::{error::Error, path::Path}; + use base64::prelude::*; +use libp2p_identity::{Keypair, PeerId}; use serde::{Deserialize, Serialize}; -use std::error::Error; -use std::path::Path; - -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; #[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index 64d98005369..974edff7ba2 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -1,9 +1,12 @@ +use std::{ + error::Error, + path::PathBuf, + str::{self, FromStr}, + sync::mpsc, + thread, +}; + use base64::prelude::*; -use std::error::Error; -use std::path::PathBuf; -use std::str::{self, FromStr}; -use std::sync::mpsc; -use std::thread; mod config; @@ -39,8 +42,8 @@ enum Command { }, } -// Due to the fact that a peer id uses a SHA-256 multihash, it always starts with the -// bytes 0x1220, meaning that only some characters are valid. +// Due to the fact that a peer id uses a SHA-256 multihash, it always starts +// with the bytes 0x1220, meaning that only some characters are valid. const ALLOWED_FIRST_BYTE: &[u8] = b"NPQRSTUVWXYZ"; // The base58 alphabet is not necessarily obvious. @@ -60,10 +63,11 @@ fn main() -> Result<(), Box> { let peer_id = keypair.public().into(); assert_eq!( - PeerId::from_str(&config.identity.peer_id)?, - peer_id, - "Expect peer id derived from private key and peer id retrieved from config to match." - ); + PeerId::from_str(&config.identity.peer_id)?, + peer_id, + "Expect peer id derived from private key and peer id retrieved from config to \ + match." + ); (peer_id, keypair) } diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index e2a89977991..00720c019a2 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -18,35 +18,48 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, -}; -use std::convert::Infallible; - use std::{ + convert::Infallible, fmt, task::{Context, Poll}, time::{Duration, Instant}, }; + +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + dummy, + ConnectionDenied, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, +}; use sysinfo::MemoryRefreshKind; /// A [`NetworkBehaviour`] that enforces a set of memory usage based limits. /// -/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// For these limits to take effect, this needs to be composed into the +/// behaviour tree of your application. /// -/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// If a connection is denied due to a limit, either a +/// [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) /// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. -/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this** +/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and +/// respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) +/// variant contain a [`ConnectionDenied`] type that can be downcast to +/// [`MemoryUsageLimitExceeded`] error if (and only if) **this** /// behaviour denied the connection. /// -/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may +/// also be a different error. /// -/// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive. -/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour. +/// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are +/// mutually exclusive. If you need to employ both of them, compose two +/// instances of [Behaviour] into your custom behaviour. /// /// # Example /// @@ -58,8 +71,8 @@ use sysinfo::MemoryRefreshKind; /// #[derive(NetworkBehaviour)] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// limits: memory_connection_limits::Behaviour +/// identify: identify::Behaviour, +/// limits: memory_connection_limits::Behaviour, /// } /// ``` pub struct Behaviour { @@ -68,7 +81,8 @@ pub struct Behaviour { last_refreshed: Instant, } -/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale. +/// The maximum duration for which the retrieved memory-stats of the process are +/// allowed to be stale. /// /// Once exceeded, we will retrieve new stats. const MAX_STALE_DURATION: Duration = Duration::from_millis(100); @@ -76,7 +90,8 @@ const MAX_STALE_DURATION: Duration = Duration::from_millis(100); impl Behaviour { /// Sets the process memory usage threshold in absolute bytes. /// - /// New inbound and outbound connections will be denied when the threshold is reached. + /// New inbound and outbound connections will be denied when the threshold + /// is reached. pub fn with_max_bytes(max_allowed_bytes: usize) -> Self { Self { max_allowed_bytes, @@ -87,9 +102,11 @@ impl Behaviour { } } - /// Sets the process memory usage threshold in the percentage of the total physical memory. + /// Sets the process memory usage threshold in the percentage of the total + /// physical memory. /// - /// New inbound and outbound connections will be denied when the threshold is reached. + /// New inbound and outbound connections will be denied when the threshold + /// is reached. pub fn with_max_percentage(percentage: f64) -> Self { use sysinfo::{RefreshKind, System}; @@ -223,9 +240,9 @@ impl fmt::Display for MemoryUsageLimitExceeded { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "process physical memory usage limit exceeded: process memory: {} bytes, max allowed: {} bytes", - self.process_physical_memory_bytes, - self.max_allowed_bytes, + "process physical memory usage limit exceeded: process memory: {} bytes, max allowed: \ + {} bytes", + self.process_physical_memory_bytes, self.max_allowed_bytes, ) } } diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs index 7f89e2c7a9a..3859386a303 100644 --- a/misc/memory-connection-limits/tests/max_bytes.rs +++ b/misc/memory-connection-limits/tests/max_bytes.rs @@ -20,14 +20,14 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use util::*; - use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm}; use libp2p_swarm_test::SwarmExt; +use util::*; #[test] fn max_bytes() { @@ -69,7 +69,8 @@ fn max_bytes() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are + // up-to-date when we try to exceed it. match network .dial( diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs index bfb1b504af5..6131f7a1e2c 100644 --- a/misc/memory-connection-limits/tests/max_percentage.rs +++ b/misc/memory-connection-limits/tests/max_percentage.rs @@ -20,18 +20,19 @@ mod util; +use std::time::Duration; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_memory_connection_limits::*; -use std::time::Duration; -use sysinfo::{MemoryRefreshKind, RefreshKind}; -use util::*; - use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, - DialError, Swarm, + DialError, + Swarm, }; use libp2p_swarm_test::SwarmExt; +use sysinfo::{MemoryRefreshKind, RefreshKind}; +use util::*; #[test] fn max_percentage() { @@ -76,7 +77,8 @@ fn max_percentage() { .expect("Unexpected connection limit."); } - std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are + // up-to-date when we try to exceed it. match network .dial( diff --git a/misc/memory-connection-limits/tests/util/mod.rs b/misc/memory-connection-limits/tests/util/mod.rs index 333b0ee135f..1a3a16d090f 100644 --- a/misc/memory-connection-limits/tests/util/mod.rs +++ b/misc/memory-connection-limits/tests/util/mod.rs @@ -18,15 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + dummy, + ConnectionDenied, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::convert::Infallible; #[derive(libp2p_swarm_derive::NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs index 8a0f54e5b65..c7b85025e4b 100644 --- a/misc/metrics/src/bandwidth.rs +++ b/misc/metrics/src/bandwidth.rs @@ -1,4 +1,10 @@ -use crate::protocol_stack; +use std::{ + convert::TryFrom as _, + io, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{MapOk, TryFutureExt}, io::{IoSlice, IoSliceMut}, @@ -16,12 +22,8 @@ use prometheus_client::{ metrics::{counter::Counter, family::Family}, registry::{Registry, Unit}, }; -use std::{ - convert::TryFrom as _, - io, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol_stack; #[derive(Debug, Clone)] #[pin_project::pin_project] @@ -160,8 +162,8 @@ impl ConnectionMetrics { } } -/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened -/// streams. +/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go +/// through all the opened streams. #[derive(Clone)] #[pin_project::pin_project] pub struct Muxer { @@ -224,7 +226,8 @@ where } } -/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that goes through it. +/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that +/// goes through it. #[pin_project::pin_project] pub struct InstrumentedStream { #[pin] diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 3e60dca2cab..6a0f27394e9 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/gossipsub.rs b/misc/metrics/src/gossipsub.rs index 2d90b92fbc6..b3e2e11f0b0 100644 --- a/misc/metrics/src/gossipsub.rs +++ b/misc/metrics/src/gossipsub.rs @@ -18,8 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::metrics::counter::Counter; -use prometheus_client::registry::Registry; +use prometheus_client::{metrics::counter::Counter, registry::Registry}; pub(crate) struct Metrics { messages: Counter, diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 03ac3f9634e..f46e157a779 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -18,17 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol_stack; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use prometheus_client::collector::Collector; -use prometheus_client::encoding::{DescriptorEncoder, EncodeMetric}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::gauge::ConstGauge; -use prometheus_client::metrics::MetricType; -use prometheus_client::registry::Registry; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use prometheus_client::{ + collector::Collector, + encoding::{DescriptorEncoder, EncodeMetric}, + metrics::{counter::Counter, gauge::ConstGauge, MetricType}, + registry::Registry, +}; + +use crate::protocol_stack; const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[ #[cfg(feature = "dcutr")] @@ -72,24 +76,23 @@ impl Metrics { let pushed = Counter::default(); sub_registry.register( "pushed", - "Number of times identification information of the local node has \ - been actively pushed to a peer.", + "Number of times identification information of the local node has been actively \ + pushed to a peer.", pushed.clone(), ); let received = Counter::default(); sub_registry.register( "received", - "Number of times identification information has been received from \ - a peer", + "Number of times identification information has been received from a peer", received.clone(), ); let sent = Counter::default(); sub_registry.register( "sent", - "Number of times identification information of the local node has \ - been sent to a peer in response to an identification request", + "Number of times identification information of the local node has been sent to a peer \ + in response to an identification request", sent.clone(), ); @@ -205,7 +208,8 @@ impl Collector for Peers { { let mut family_encoder = encoder.encode_descriptor( "remote_protocols", - "Number of connected nodes supporting a specific protocol, with \"unrecognized\" for each peer supporting one or more unrecognized protocols", + "Number of connected nodes supporting a specific protocol, with \"unrecognized\" \ + for each peer supporting one or more unrecognized protocols", None, MetricType::Gauge, )?; diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index bd5a6526737..481c315282c 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; pub(crate) struct Metrics { query_result_get_record_ok: Counter, @@ -126,7 +130,8 @@ impl Metrics { let routing_updated = Family::default(); sub_registry.register( "routing_updated", - "Number of peers added, updated or evicted to, in or from a specific kbucket in the routing table", + "Number of peers added, updated or evicted to, in or from a specific kbucket in the \ + routing table", routing_updated.clone(), ); diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 74fd15e2181..1fd79e7846f 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -67,8 +67,8 @@ impl Metrics { /// Create a new set of Swarm and protocol [`Metrics`]. /// /// ``` - /// use prometheus_client::registry::Registry; /// use libp2p_metrics::Metrics; + /// use prometheus_client::registry::Registry; /// let mut registry = Registry::default(); /// let metrics = Metrics::new(&mut registry); /// ``` diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index afdd05134a6..ce653c72ea1 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -18,11 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; #[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)] struct FailureLabels { diff --git a/misc/metrics/src/protocol_stack.rs b/misc/metrics/src/protocol_stack.rs index 57760df79a1..5e3745bdd45 100644 --- a/misc/metrics/src/protocol_stack.rs +++ b/misc/metrics/src/protocol_stack.rs @@ -18,13 +18,21 @@ mod tests { #[test] fn ip6_tcp_wss_p2p() { - let ma = Multiaddr::try_from("/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/wss/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC").expect("testbad"); + let ma = Multiaddr::try_from( + "/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/wss/p2p/\ + QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + ) + .expect("testbad"); let protocol_stack = as_string(&ma); assert_eq!(protocol_stack, "/ip6/tcp/wss/p2p"); - let ma = Multiaddr::try_from("/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/tls/ws/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC").expect("testbad"); + let ma = Multiaddr::try_from( + "/ip6/2001:8a0:7ac5:4201:3ac9:86ff:fe31:7095/tcp/8000/tls/ws/p2p/\ + QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC", + ) + .expect("testbad"); let protocol_stack = as_string(&ma); diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 607daf3f1e1..d4c25b6eb3e 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::registry::Registry; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; pub(crate) struct Metrics { events: Family, diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 51c0a0af253..6e95d082de6 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -18,18 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; -use crate::protocol_stack; use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::{Registry, Unit}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::Family, + histogram::{exponential_buckets, Histogram}, + }, + registry::{Registry, Unit}, +}; use web_time::Instant; +use crate::protocol_stack; + pub(crate) struct Metrics { connections_incoming: Family, connections_incoming_error: Family, diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 83bb4909041..3f2eccf1a11 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -20,17 +20,23 @@ //! Protocol negotiation strategies for the peer acting as the dialer. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError, Version}; - -use futures::prelude::*; use std::{ convert::TryFrom as _, - iter, mem, + iter, + mem, pin::Pin, task::{Context, Poll}, }; +use futures::prelude::*; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, + NegotiationError, + Version, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _dialer_ (or _initiator_). /// @@ -84,8 +90,9 @@ enum State { impl Future for DialerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because we produce + // a `Negotiated` as the output. It also makes + // the implementation considerably easier to write. R: AsyncRead + AsyncWrite + Unpin, I: Iterator, I::Item: AsRef, @@ -204,15 +211,19 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::listener_select_proto; - use async_std::future::timeout; - use async_std::net::{TcpListener, TcpStream}; - use quickcheck::{Arbitrary, Gen, GenRange}; use std::time::Duration; + + use async_std::{ + future::timeout, + net::{TcpListener, TcpStream}, + }; + use quickcheck::{Arbitrary, Gen, GenRange}; use tracing::metadata::LevelFilter; use tracing_subscriber::EnvFilter; + use super::*; + use crate::listener_select_proto; + #[test] fn select_proto_basic() { async fn run(version: Version) { @@ -353,8 +364,8 @@ mod tests { .unwrap(); assert_eq!(proto, "/proto1"); - // client can close the connection even though protocol negotiation is not yet done, i.e. - // `_server_connection` had been untouched. + // client can close the connection even though protocol negotiation is not yet + // done, i.e. `_server_connection` had been untouched. io.close().await.unwrap(); }); diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index 3a7988d0548..5efaeec8b1f 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*}; use std::{ convert::TryFrom as _, io, @@ -27,6 +25,9 @@ use std::{ task::{Context, Poll}, }; +use bytes::{Buf as _, BufMut as _, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*}; + const MAX_LEN_BYTES: u16 = 2; const MAX_FRAME_SIZE: u16 = (1 << (MAX_LEN_BYTES * 8 - MAX_LEN_BYTES)) - 1; const DEFAULT_BUFFER_SIZE: usize = 64; @@ -35,15 +36,16 @@ const DEFAULT_BUFFER_SIZE: usize = 64; /// wrapping an underlying `AsyncRead + AsyncWrite` I/O resource. /// /// We purposely only support a frame sizes up to 16KiB (2 bytes unsigned varint -/// frame length). Frames mostly consist in a short protocol name, which is highly -/// unlikely to be more than 16KiB long. +/// frame length). Frames mostly consist in a short protocol name, which is +/// highly unlikely to be more than 16KiB long. #[pin_project::pin_project] #[derive(Debug)] pub(crate) struct LengthDelimited { /// The inner I/O resource. #[pin] inner: R, - /// Read buffer for a single incoming unsigned-varint length-delimited frame. + /// Read buffer for a single incoming unsigned-varint length-delimited + /// frame. read_buffer: BytesMut, /// Write buffer for outgoing unsigned-varint length-delimited frames. write_buffer: BytesMut, @@ -84,7 +86,8 @@ impl LengthDelimited { } } - /// Drops the [`LengthDelimited`] resource, yielding the underlying I/O stream. + /// Drops the [`LengthDelimited`] resource, yielding the underlying I/O + /// stream. /// /// # Panic /// @@ -98,9 +101,9 @@ impl LengthDelimited { self.inner } - /// Converts the [`LengthDelimited`] into a [`LengthDelimitedReader`], dropping the - /// uvi-framed `Sink` in favour of direct `AsyncWrite` access to the underlying - /// I/O stream. + /// Converts the [`LengthDelimited`] into a [`LengthDelimitedReader`], + /// dropping the uvi-framed `Sink` in favour of direct `AsyncWrite` + /// access to the underlying I/O stream. /// /// This is typically done if further uvi-framed messages are expected to be /// received but no more such messages are written, allowing the writing of @@ -293,7 +296,8 @@ where } /// A `LengthDelimitedReader` implements a `Stream` of uvi-length-delimited -/// frames on an underlying I/O resource combined with direct `AsyncWrite` access. +/// frames on an underlying I/O resource combined with direct `AsyncWrite` +/// access. #[pin_project::pin_project] #[derive(Debug)] pub(crate) struct LengthDelimitedReader { @@ -302,7 +306,8 @@ pub(crate) struct LengthDelimitedReader { } impl LengthDelimitedReader { - /// Destroys the `LengthDelimitedReader` and returns the underlying I/O stream. + /// Destroys the `LengthDelimitedReader` and returns the underlying I/O + /// stream. /// /// This method is guaranteed not to drop any data read from or not yet /// submitted to the underlying I/O stream. @@ -311,9 +316,10 @@ impl LengthDelimitedReader { /// /// Will panic if called while there is data in the read or write buffer. /// The read buffer is guaranteed to be empty whenever [`Stream::poll_next`] - /// yield a new `Message`. The write buffer is guaranteed to be empty whenever - /// [`LengthDelimited::poll_write_buffer`] yields [`Poll::Ready`] or after - /// the [`Sink`] has been completely flushed via [`Sink::poll_flush`]. + /// yield a new `Message`. The write buffer is guaranteed to be empty + /// whenever [`LengthDelimited::poll_write_buffer`] yields + /// [`Poll::Ready`] or after the [`Sink`] has been completely flushed + /// via [`Sink::poll_flush`]. pub(crate) fn into_inner(self) -> R { self.inner.into_inner() } @@ -383,10 +389,12 @@ where #[cfg(test)] mod tests { - use crate::length_delimited::LengthDelimited; + use std::io::ErrorKind; + use futures::{io::Cursor, prelude::*}; use quickcheck::*; - use std::io::ErrorKind; + + use crate::length_delimited::LengthDelimited; #[test] fn basic_read() { diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index 5565623f25e..55cb34ce7e5 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -20,28 +20,29 @@ //! # Multistream-select Protocol Negotiation //! -//! This crate implements the `multistream-select` protocol, which is the protocol -//! used by libp2p to negotiate which application-layer protocol to use with the -//! remote on a connection or substream. +//! This crate implements the `multistream-select` protocol, which is the +//! protocol used by libp2p to negotiate which application-layer protocol to use +//! with the remote on a connection or substream. //! -//! > **Note**: This crate is used primarily by core components of *libp2p* and it +//! > **Note**: This crate is used primarily by core components of *libp2p* and +//! > it //! > is usually not used directly on its own. //! //! ## Roles //! //! Two peers using the multistream-select negotiation protocol on an I/O stream -//! are distinguished by their role as a _dialer_ (or _initiator_) or as a _listener_ -//! (or _responder_). Thereby the dialer plays the active part, driving the protocol, -//! whereas the listener reacts to the messages received. +//! are distinguished by their role as a _dialer_ (or _initiator_) or as a +//! _listener_ (or _responder_). Thereby the dialer plays the active part, +//! driving the protocol, whereas the listener reacts to the messages received. //! -//! The dialer has two options: it can either pick a protocol from the complete list -//! of protocols that the listener supports, or it can directly suggest a protocol. -//! Either way, a selected protocol is sent to the listener who can either accept (by -//! echoing the same protocol) or reject (by responding with a message stating -//! "not available"). If a suggested protocol is not available, the dialer may -//! suggest another protocol. This process continues until a protocol is agreed upon, -//! yielding a [`Negotiated`] stream, or the dialer has run out of -//! alternatives. +//! The dialer has two options: it can either pick a protocol from the complete +//! list of protocols that the listener supports, or it can directly suggest a +//! protocol. Either way, a selected protocol is sent to the listener who can +//! either accept (by echoing the same protocol) or reject (by responding with a +//! message stating "not available"). If a suggested protocol is not available, +//! the dialer may suggest another protocol. This process continues until a +//! protocol is agreed upon, yielding a [`Negotiated`] stream, or the dialer has +//! run out of alternatives. //! //! See [`dialer_select_proto`] and [`listener_select_proto`]. //! @@ -57,12 +58,13 @@ //! [`Negotiated`] I/O stream before the negotiation //! data has been flushed. It is then expecting confirmation for that protocol //! as the first messages read from the stream. This behaviour allows the dialer -//! to immediately send data relating to the negotiated protocol together with the -//! remaining negotiation message(s). Note, however, that a dialer that performs -//! multiple 0-RTT negotiations in sequence for different protocols layered on -//! top of each other may trigger undesirable behaviour for a listener not -//! supporting one of the intermediate protocols. See -//! [`dialer_select_proto`] and the documentation of [`Version::V1Lazy`] for further details. +//! to immediately send data relating to the negotiated protocol together with +//! the remaining negotiation message(s). Note, however, that a dialer that +//! performs multiple 0-RTT negotiations in sequence for different protocols +//! layered on top of each other may trigger undesirable behaviour for a +//! listener not supporting one of the intermediate protocols. See +//! [`dialer_select_proto`] and the documentation of [`Version::V1Lazy`] for +//! further details. //! //! ## Examples //! @@ -70,20 +72,21 @@ //! //! ```no_run //! use async_std::net::TcpStream; -//! use multistream_select::{dialer_select_proto, Version}; //! use futures::prelude::*; +//! use multistream_select::{dialer_select_proto, Version}; //! //! async_std::task::block_on(async move { //! let socket = TcpStream::connect("127.0.0.1:10333").await.unwrap(); //! //! let protos = vec!["/echo/1.0.0", "/echo/2.5.0"]; -//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1).await.unwrap(); +//! let (protocol, _io) = dialer_select_proto(socket, protos, Version::V1) +//! .await +//! .unwrap(); //! //! println!("Negotiated protocol: {:?}", protocol); //! // You can now use `_io` to communicate with the remote. //! }); //! ``` -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -93,10 +96,12 @@ mod listener_select; mod negotiated; mod protocol; -pub use self::dialer_select::{dialer_select_proto, DialerSelectFuture}; -pub use self::listener_select::{listener_select_proto, ListenerSelectFuture}; -pub use self::negotiated::{Negotiated, NegotiatedComplete, NegotiationError}; -pub use self::protocol::ProtocolError; +pub use self::{ + dialer_select::{dialer_select_proto, DialerSelectFuture}, + listener_select::{listener_select_proto, ListenerSelectFuture}, + negotiated::{Negotiated, NegotiatedComplete, NegotiationError}, + protocol::ProtocolError, +}; /// Supported multistream-select versions. #[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] @@ -118,21 +123,31 @@ pub enum Version { /// with the first round of application protocol data (or an attempt /// is made to read from the `Negotiated` I/O stream). /// - /// A listener will behave identically to `V1`. This ensures interoperability with `V1`. - /// Notably, it will immediately send the multistream header as well as the protocol - /// confirmation, resulting in multiple frames being sent on the underlying transport. - /// Nevertheless, if the listener supports the protocol that the dialer optimistically - /// settled on, it can be a 0-RTT negotiation. + /// A listener will behave identically to `V1`. This ensures + /// interoperability with `V1`. Notably, it will immediately send the + /// multistream header as well as the protocol confirmation, resulting + /// in multiple frames being sent on the underlying transport. + /// Nevertheless, if the listener supports the protocol that the dialer + /// optimistically settled on, it can be a 0-RTT negotiation. /// - /// > **Note**: `V1Lazy` is specific to `rust-libp2p`. The wire protocol is identical to `V1` - /// > and generally interoperable with peers only supporting `V1`. Nevertheless, there is a - /// > pitfall that is rarely encountered: When nesting multiple protocol negotiations, the - /// > listener should either be known to support all of the dialer's optimistically chosen - /// > protocols or there is must be no intermediate protocol without a payload and none of - /// > the protocol payloads must have the potential for being mistaken for a multistream-select - /// > protocol message. This avoids rare edge-cases whereby the listener may not recognize - /// > upgrade boundaries and erroneously process a request despite not supporting one of - /// > the intermediate protocols that the dialer committed to. See [1] and [2]. + /// > **Note**: `V1Lazy` is specific to `rust-libp2p`. The wire protocol is + /// > identical to `V1` + /// > and generally interoperable with peers only supporting `V1`. + /// > Nevertheless, there is a + /// > pitfall that is rarely encountered: When nesting multiple protocol + /// > negotiations, the + /// > listener should either be known to support all of the dialer's + /// > optimistically chosen + /// > protocols or there is must be no intermediate protocol without a + /// > payload and none of + /// > the protocol payloads must have the potential for being mistaken for a + /// > multistream-select + /// > protocol message. This avoids rare edge-cases whereby the listener may + /// > not recognize + /// > upgrade boundaries and erroneously process a request despite not + /// > supporting one of + /// > the intermediate protocols that the dialer committed to. See [1] and + /// > [2]. /// /// [1]: https://github.com/multiformats/go-multistream/issues/20 /// [2]: https://github.com/libp2p/rust-libp2p/pull/1212 diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index b4236310a1d..8a9ec289669 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -21,11 +21,6 @@ //! Protocol negotiation strategies for the peer acting as the listener //! in a multistream-select protocol negotiation. -use crate::protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}; -use crate::{Negotiated, NegotiationError}; - -use futures::prelude::*; -use smallvec::SmallVec; use std::{ convert::TryFrom as _, mem, @@ -33,6 +28,15 @@ use std::{ task::{Context, Poll}, }; +use futures::prelude::*; +use smallvec::SmallVec; + +use crate::{ + protocol::{HeaderLine, Message, MessageIO, Protocol, ProtocolError}, + Negotiated, + NegotiationError, +}; + /// Returns a `Future` that negotiates a protocol on the given I/O stream /// for a peer acting as the _listener_ (or _responder_). /// @@ -109,8 +113,10 @@ enum State { impl Future for ListenerSelectFuture where - // The Unpin bound here is required because we produce a `Negotiated` as the output. - // It also makes the implementation considerably easier to write. + // The Unpin bound here is required because we + // produce a `Negotiated` as the output. + // It also makes the implementation considerably + // easier to write. R: AsyncRead + AsyncWrite + Unpin, N: AsRef + Clone, { @@ -186,16 +192,16 @@ where // reading the `N/A` response. if let ProtocolError::InvalidMessage = &err { tracing::trace!( - "Listener: Negotiation failed with invalid \ - message after protocol rejection." + "Listener: Negotiation failed with invalid message after \ + protocol rejection." ); return Poll::Ready(Err(NegotiationError::Failed)); } if let ProtocolError::IoError(e) = &err { if e.kind() == std::io::ErrorKind::UnexpectedEof { tracing::trace!( - "Listener: Negotiation failed with EOF \ - after protocol rejection." + "Listener: Negotiation failed with EOF after protocol \ + rejection." ); return Poll::Ready(Err(NegotiationError::Failed)); } diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index a24014a4f5f..de5051d2929 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -18,7 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; +use std::{ + error::Error, + fmt, + io, + mem, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ io::{IoSlice, IoSliceMut}, @@ -26,21 +33,17 @@ use futures::{ ready, }; use pin_project::pin_project; -use std::{ - error::Error, - fmt, io, mem, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::protocol::{HeaderLine, Message, MessageReader, Protocol, ProtocolError}; /// An I/O stream that has settled on an (application-layer) protocol to use. /// /// A `Negotiated` represents an I/O stream that has _settled_ on a protocol -/// to use. In particular, it is not implied that all of the protocol negotiation -/// frames have yet been sent and / or received, just that the selected protocol -/// is fully determined. This is to allow the last protocol negotiation frames -/// sent by a peer to be combined in a single write, possibly piggy-backing -/// data from the negotiated protocol on top. +/// to use. In particular, it is not implied that all of the protocol +/// negotiation frames have yet been sent and / or received, just that the +/// selected protocol is fully determined. This is to allow the last protocol +/// negotiation frames sent by a peer to be combined in a single write, possibly +/// piggy-backing data from the negotiated protocol on top. /// /// Reading from a `Negotiated` I/O stream that still has pending negotiation /// protocol data to send implicitly triggers flushing of all yet unsent data. @@ -59,8 +62,10 @@ pub struct NegotiatedComplete { impl Future for NegotiatedComplete where - // `Unpin` is required not because of implementation details but because we produce the - // `Negotiated` as the output of the future. + // `Unpin` is required not because of + // implementation details but because we produce + // the `Negotiated` as the output of the + // future. TInner: AsyncRead + AsyncWrite + Unpin, { type Output = Result, NegotiationError>; @@ -205,8 +210,8 @@ enum State { /// The underlying I/O stream. #[pin] io: MessageReader, - /// The expected negotiation header/preamble (i.e. multistream-select version), - /// if one is still expected to be received. + /// The expected negotiation header/preamble (i.e. multistream-select + /// version), if one is still expected to be received. header: Option, /// The expected application protocol (i.e. name and version). protocol: Protocol, @@ -250,13 +255,13 @@ where } // TODO: implement once method is stabilized in the futures crate - /*unsafe fn initializer(&self) -> Initializer { - match &self.state { - State::Completed { io, .. } => io.initializer(), - State::Expecting { io, .. } => io.inner_ref().initializer(), - State::Invalid => panic!("Negotiated: Invalid state"), - } - }*/ + // unsafe fn initializer(&self) -> Initializer { + // match &self.state { + // State::Completed { io, .. } => io.initializer(), + // State::Expecting { io, .. } => io.inner_ref().initializer(), + // State::Invalid => panic!("Negotiated: Invalid state"), + // } + // } fn poll_read_vectored( mut self: Pin<&mut Self>, @@ -305,7 +310,8 @@ where } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Ensure all data has been flushed, including optimistic multistream-select messages. + // Ensure all data has been flushed, including optimistic multistream-select + // messages. ready!(self .as_mut() .poll_flush(cx) @@ -317,7 +323,10 @@ where StateProj::Expecting { io, .. } => { let close_poll = io.poll_close(cx); if let Poll::Ready(Ok(())) = close_poll { - tracing::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending") + tracing::debug!( + "Stream closed. Confirmation from remote for optimstic protocol \ + negotiation still pending" + ) } close_poll } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index 92b6acedaeb..687c5b7a49f 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -25,19 +25,23 @@ //! `Stream` and `Sink` implementations of `MessageIO` and //! `MessageReader`. -use crate::length_delimited::{LengthDelimited, LengthDelimitedReader}; -use crate::Version; - -use bytes::{BufMut, Bytes, BytesMut}; -use futures::{io::IoSlice, prelude::*, ready}; use std::{ error::Error, - fmt, io, + fmt, + io, pin::Pin, task::{Context, Poll}, }; + +use bytes::{BufMut, Bytes, BytesMut}; +use futures::{io::IoSlice, prelude::*, ready}; use unsigned_varint as uvi; +use crate::{ + length_delimited::{LengthDelimited, LengthDelimitedReader}, + Version, +}; + /// The maximum number of supported protocols that can be processed. const MAX_PROTOCOLS: usize = 1000; @@ -248,9 +252,9 @@ impl MessageIO { /// [`Message`]-oriented `Sink` in favour of direct `AsyncWrite` access /// to the underlying I/O stream. /// - /// This is typically done if further negotiation messages are expected to be - /// received but no more messages are written, allowing the writing of - /// follow-up protocol data to commence. + /// This is typically done if further negotiation messages are expected to + /// be received but no more messages are written, allowing the writing + /// of follow-up protocol data to commence. pub(crate) fn into_reader(self) -> MessageReader { MessageReader { inner: self.inner.into_reader(), @@ -261,11 +265,12 @@ impl MessageIO { /// /// # Panics /// - /// Panics if the read buffer or write buffer is not empty, meaning that an incoming - /// protocol negotiation frame has been partially read or an outgoing frame - /// has not yet been flushed. The read buffer is guaranteed to be empty whenever - /// `MessageIO::poll` returned a message. The write buffer is guaranteed to be empty - /// when the sink has been flushed. + /// Panics if the read buffer or write buffer is not empty, meaning that an + /// incoming protocol negotiation frame has been partially read or an + /// outgoing frame has not yet been flushed. The read buffer is + /// guaranteed to be empty whenever `MessageIO::poll` returned a + /// message. The write buffer is guaranteed to be empty when the sink + /// has been flushed. pub(crate) fn into_inner(self) -> R { self.inner.into_inner() } @@ -331,11 +336,12 @@ impl MessageReader { /// /// # Panics /// - /// Panics if the read buffer or write buffer is not empty, meaning that either - /// an incoming protocol negotiation frame has been partially read, or an - /// outgoing frame has not yet been flushed. The read buffer is guaranteed to - /// be empty whenever `MessageReader::poll` returned a message. The write - /// buffer is guaranteed to be empty whenever the sink has been flushed. + /// Panics if the read buffer or write buffer is not empty, meaning that + /// either an incoming protocol negotiation frame has been partially + /// read, or an outgoing frame has not yet been flushed. The read buffer + /// is guaranteed to be empty whenever `MessageReader::poll` returned a + /// message. The write buffer is guaranteed to be empty whenever the + /// sink has been flushed. pub(crate) fn into_inner(self) -> R { self.inner.into_inner() } @@ -461,10 +467,12 @@ impl fmt::Display for ProtocolError { #[cfg(test)] mod tests { - use super::*; - use quickcheck::*; use std::iter; + use quickcheck::*; + + use super::*; + impl Arbitrary for Protocol { fn arbitrary(g: &mut Gen) -> Protocol { let n = g.gen_range(1..g.size()); diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index c57b7da7db8..01ee2cccdc4 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,10 +1,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use std::{io, marker::PhantomData}; + use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, BufMut, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; -use std::io; -use std::marker::PhantomData; mod generated; @@ -13,8 +13,9 @@ pub use generated::test as proto; /// [`Codec`] implements [`Encoder`] and [`Decoder`], uses [`unsigned_varint`] /// -/// to prefix messages with their length and uses [`quick_protobuf`] and a provided -/// `struct` implementing [`MessageRead`] and [`MessageWrite`] to do the encoding. +/// to prefix messages with their length and uses [`quick_protobuf`] and a +/// provided `struct` implementing [`MessageRead`] and [`MessageWrite`] to do +/// the encoding. pub struct Codec { max_message_len_bytes: usize, phantom: PhantomData<(In, Out)>, @@ -46,7 +47,8 @@ impl Encoder for Codec { } } -/// Write the message's length (i.e. `size`) to `dst` as a variable-length integer. +/// Write the message's length (i.e. `size`) to `dst` as a variable-length +/// integer. fn write_length(message: &impl MessageWrite, dst: &mut BytesMut) { let message_length = message.get_size(); @@ -182,12 +184,13 @@ impl From for io::Error { #[cfg(test)] mod tests { - use super::*; + use std::error::Error; + use asynchronous_codec::FramedRead; - use futures::io::Cursor; - use futures::{FutureExt, StreamExt}; + use futures::{io::Cursor, FutureExt, StreamExt}; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use std::error::Error; + + use super::*; #[test] fn honors_max_message_length() { @@ -244,7 +247,8 @@ mod tests { QuickCheck::new().quickcheck(prop as fn(_, _) -> _) } - /// Constructs a [`BytesMut`] of the provided length where the message is all zeros. + /// Constructs a [`BytesMut`] of the provided length where the message is + /// all zeros. fn varint_zeroes(length: usize) -> BytesMut { let mut buf = unsigned_varint::encode::usize_buffer(); let encoded_length = unsigned_varint::encode::usize(length, &mut buf); diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs index 65dafe065d1..a434d3ce17f 100644 --- a/misc/quick-protobuf-codec/tests/large_message.rs +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -1,7 +1,6 @@ use asynchronous_codec::Encoder; use bytes::BytesMut; -use quick_protobuf_codec::proto; -use quick_protobuf_codec::Codec; +use quick_protobuf_codec::{proto, Codec}; #[test] fn encode_large_message() { diff --git a/misc/quickcheck-ext/src/lib.rs b/misc/quickcheck-ext/src/lib.rs index 4ada7e73ba1..9c2deec8743 100644 --- a/misc/quickcheck-ext/src/lib.rs +++ b/misc/quickcheck-ext/src/lib.rs @@ -1,9 +1,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use quickcheck::*; - use core::ops::Range; + use num_traits::sign::Unsigned; +pub use quickcheck::*; pub trait GenRange { fn gen_range(&mut self, _range: Range) -> T; diff --git a/misc/rw-stream-sink/src/lib.rs b/misc/rw-stream-sink/src/lib.rs index f10e683ad33..4bbd4b6e64f 100644 --- a/misc/rw-stream-sink/src/lib.rs +++ b/misc/rw-stream-sink/src/lib.rs @@ -23,11 +23,11 @@ //! [`AsyncRead`] and [`AsyncWrite`]. //! //! Each call to [`AsyncWrite::poll_write`] will send one packet to the sink. -//! Calls to [`AsyncRead::poll_read`] will read from the stream's incoming packets. +//! Calls to [`AsyncRead::poll_read`] will read from the stream's incoming +//! packets. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::{prelude::*, ready}; use std::{ io::{self, Read}, mem, @@ -35,6 +35,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{prelude::*, ready}; + static_assertions::const_assert!(mem::size_of::() <= mem::size_of::()); /// Wraps a [`Stream`] and [`Sink`] whose items are buffers. @@ -115,14 +117,16 @@ where #[cfg(test)] mod tests { - use super::RwStreamSink; - use async_std::task; - use futures::{channel::mpsc, prelude::*}; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_std::task; + use futures::{channel::mpsc, prelude::*}; + + use super::RwStreamSink; + // This struct merges a stream and a sink and is quite useful for tests. struct Wrapper(St, Si); diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs index 36b18c9798d..526b1f89364 100644 --- a/misc/server/src/behaviour.rs +++ b/misc/server/src/behaviour.rs @@ -1,13 +1,16 @@ -use libp2p::autonat; -use libp2p::identify; -use libp2p::kad; -use libp2p::ping; -use libp2p::relay; -use libp2p::swarm::behaviour::toggle::Toggle; -use libp2p::swarm::{NetworkBehaviour, StreamProtocol}; -use libp2p::{identity, Multiaddr, PeerId}; -use std::str::FromStr; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; + +use libp2p::{ + autonat, + identify, + identity, + kad, + ping, + relay, + swarm::{behaviour::toggle::Toggle, NetworkBehaviour, StreamProtocol}, + Multiaddr, + PeerId, +}; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs index c3e3ec529c1..2e4b2746d09 100644 --- a/misc/server/src/config.rs +++ b/misc/server/src/config.rs @@ -1,7 +1,7 @@ +use std::{error::Error, path::Path}; + use libp2p::Multiaddr; use serde_derive::Deserialize; -use std::error::Error; -use std::path::Path; #[derive(Clone, Deserialize)] #[serde(rename_all = "PascalCase")] diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index cee1aa96e28..87a8adb94e0 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -18,15 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use axum::extract::State; -use axum::http::StatusCode; -use axum::response::IntoResponse; -use axum::routing::get; -use axum::Router; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use std::net::SocketAddr; -use std::sync::{Arc, Mutex}; +use std::{ + net::SocketAddr, + sync::{Arc, Mutex}, +}; + +use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router}; +use prometheus_client::{encoding::text::encode, registry::Registry}; use tokio::net::TcpListener; const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 820921beaed..ad355c0c267 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -1,18 +1,20 @@ +use std::{error::Error, path::PathBuf, str::FromStr}; + use base64::Engine; use clap::Parser; use futures::stream::StreamExt; -use libp2p::identity; -use libp2p::identity::PeerId; -use libp2p::kad; -use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::SwarmEvent; -use libp2p::tcp; -use libp2p::{identify, noise, yamux}; -use prometheus_client::metrics::info::Info; -use prometheus_client::registry::Registry; -use std::error::Error; -use std::path::PathBuf; -use std::str::FromStr; +use libp2p::{ + identify, + identity, + identity::PeerId, + kad, + metrics::{Metrics, Recorder}, + noise, + swarm::SwarmEvent, + tcp, + yamux, +}; +use prometheus_client::{metrics::info::Info, registry::Registry}; use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs index a02c4d1116d..babd0a84e27 100644 --- a/misc/webrtc-utils/src/fingerprint.rs +++ b/misc/webrtc-utils/src/fingerprint.rs @@ -19,16 +19,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::multihash; use sha2::Digest as _; -use std::fmt; pub const SHA256: &str = "sha-256"; const MULTIHASH_SHA256_CODE: u64 = 0x12; type Multihash = multihash::Multihash<64>; -/// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. +/// A certificate fingerprint that is assumed to be created using the SHA256 +/// hash algorithm. #[derive(Eq, PartialEq, Copy, Clone)] pub struct Fingerprint([u8; 32]); @@ -39,7 +41,8 @@ impl Fingerprint { Fingerprint(digest) } - /// Creates a new [Fingerprint] from a raw certificate by hashing the given bytes with SHA256. + /// Creates a new [Fingerprint] from a raw certificate by hashing the given + /// bytes with SHA256. pub fn from_certificate(bytes: &[u8]) -> Self { Fingerprint(sha2::Sha256::digest(bytes).into()) } @@ -85,7 +88,8 @@ impl fmt::Debug for Fingerprint { mod tests { use super::*; - const SDP_FORMAT: &str = "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC"; + const SDP_FORMAT: &str = "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:\ + 3F:DF:E1:12:C1:EE:BB:6C:C6:AC"; const REGULAR_FORMAT: [u8; 32] = hex_literal::hex!("7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC"); diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs index 9180acfc1ca..7964c1694b9 100644 --- a/misc/webrtc-utils/src/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -19,16 +19,17 @@ // DEALINGS IN THE SOFTWARE. use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; +pub use noise::Error; use crate::fingerprint::Fingerprint; -pub use noise::Error; - pub async fn inbound( id_keys: identity::Keypair, stream: T, @@ -42,8 +43,8 @@ where .unwrap() .with_prologue(noise_prologue(client_fingerprint, server_fingerprint)); let info = noise.protocol_info().next().unwrap(); - // Note the roles are reversed because it allows the server (webrtc connection responder) to - // send application data 0.5 RTT earlier. + // Note the roles are reversed because it allows the server (webrtc connection + // responder) to send application data 0.5 RTT earlier. let (peer_id, mut channel) = noise.upgrade_outbound(stream, info).await?; channel.close().await?; @@ -64,8 +65,8 @@ where .unwrap() .with_prologue(noise_prologue(client_fingerprint, server_fingerprint)); let info = noise.protocol_info().next().unwrap(); - // Note the roles are reversed because it allows the server (webrtc connection responder) to - // send application data 0.5 RTT earlier. + // Note the roles are reversed because it allows the server (webrtc connection + // responder) to send application data 0.5 RTT earlier. let (peer_id, mut channel) = noise.upgrade_inbound(stream, info).await?; channel.close().await?; @@ -89,9 +90,10 @@ pub(crate) fn noise_prologue( #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn noise_prologue_tests() { let a = Fingerprint::raw(hex!( diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 0796548f449..0a189c06a35 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -18,13 +18,13 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::fingerprint::Fingerprint; -use serde::Serialize; use std::net::{IpAddr, SocketAddr}; + +use rand::{distributions::Alphanumeric, thread_rng, Rng}; +use serde::Serialize; use tinytemplate::TinyTemplate; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use crate::fingerprint::Fingerprint; pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String { let answer = render_description( @@ -43,37 +43,41 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & // // a=ice-lite // -// A lite implementation is only appropriate for devices that will *always* be connected to -// the public Internet and have a public IP address at which it can receive packets from any -// correspondent. ICE will not function when a lite implementation is placed behind a NAT -// (RFC8445). +// A lite implementation is only appropriate for devices that will *always* +// be connected to the public Internet and have a public IP address at which +// it can receive packets from any correspondent. ICE will not function when +// a lite implementation is placed behind a NAT (RFC8445). // // a=tls-id: // // "TLS ID" uniquely identifies a TLS association. -// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS connection -// must be reopened in case of ICE renegotiation. Considering that ICE renegotiations -// never happen in our use case, we can simply put a random value and not care about -// it. Note however that the TLS ID in the answer must be present if and only if the -// offer contains one. (RFC8842) -// TODO: is it true that renegotiations never happen? what about a connection closing? -// "tls-id" attribute MUST be present in the initial offer and respective answer (RFC8839). -// XXX: but right now browsers don't send it. +// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS +// connection must be reopened in case of ICE renegotiation. Considering +// that ICE renegotiations never happen in our use case, we can simply put a +// random value and not care about it. Note however that the TLS ID in the +// answer must be present if and only if the offer contains one. (RFC8842) +// TODO: is it true that renegotiations never happen? what about a +// connection closing? "tls-id" attribute MUST be present in the initial +// offer and respective answer (RFC8839). XXX: but right now browsers don't +// send it. // // a=setup:passive // -// "passive" indicates that the remote DTLS server will only listen for incoming -// connections. (RFC5763) +// "passive" indicates that the remote DTLS server will only listen for +// incoming connections. (RFC5763) // The answerer (server) MUST not be located behind a NAT (RFC6135). // -// The answerer MUST use either a setup attribute value of setup:active or setup:passive. -// Note that if the answerer uses setup:passive, then the DTLS handshake will not begin until -// the answerer is received, which adds additional latency. setup:active allows the answer and -// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. +// The answerer MUST use either a setup attribute value of setup:active or +// setup:passive. Note that if the answerer uses setup:passive, then the +// DTLS handshake will not begin until the answerer is received, which adds +// additional latency. setup:active allows the answer and the DTLS handshake +// to occur in parallel. Thus, setup:active is RECOMMENDED. // -// a=candidate: +// a=candidate: +// // -// A transport address for a candidate that can be used for connectivity checks (RFC8839). +// A transport address for a candidate that can be used for connectivity +// checks (RFC8839). // // a=end-of-candidates const SERVER_SESSION_DESCRIPTION: &str = "v=0 @@ -102,8 +106,8 @@ enum IpVersion { IP6, } -/// Context passed to the templating engine, which replaces the above placeholders (e.g. -/// `{IP_VERSION}`) with real values. +/// Context passed to the templating engine, which replaces the above +/// placeholders (e.g. `{IP_VERSION}`) with real values. #[derive(Serialize)] struct DescriptionContext { pub(crate) ip_version: IpVersion, diff --git a/misc/webrtc-utils/src/stream.rs b/misc/webrtc-utils/src/stream.rs index 17f746a92a1..99d04470d4c 100644 --- a/misc/webrtc-utils/src/stream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -19,20 +19,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; -use futures::{channel::oneshot, prelude::*, ready}; - use std::{ io, pin::Pin, task::{Context, Poll}, }; -use crate::proto::{Flag, Message}; +use bytes::Bytes; +use futures::{channel::oneshot, prelude::*, ready}; + use crate::{ - stream::drop_listener::GracefullyClosed, - stream::framed_dc::FramedDc, - stream::state::{Closing, State}, + proto::{Flag, Message}, + stream::{ + drop_listener::GracefullyClosed, + framed_dc::FramedDc, + state::{Closing, State}, + }, }; mod drop_listener; @@ -41,8 +43,8 @@ mod state; /// Maximum length of a message. /// -/// "As long as message interleaving is not supported, the sender SHOULD limit the maximum message -/// size to 16 KB to avoid monopolization." +/// "As long as message interleaving is not supported, the sender SHOULD limit +/// the maximum message size to 16 KB to avoid monopolization." /// Source: pub const MAX_MSG_LEN: usize = 16 * 1024; /// Length of varint, in bytes. @@ -55,13 +57,15 @@ const MAX_DATA_LEN: usize = MAX_MSG_LEN - VARINT_LEN - PROTO_OVERHEAD; pub use drop_listener::DropListener; /// A stream backed by a WebRTC data channel. /// -/// To be a proper libp2p stream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well -/// as support a half-closed state which we do by framing messages in a protobuf envelope. +/// To be a proper libp2p stream, we need to implement [`AsyncRead`] and +/// [`AsyncWrite`] as well as support a half-closed state which we do by framing +/// messages in a protobuf envelope. pub struct Stream { io: FramedDc, state: State, read_buffer: Bytes, - /// Dropping this will close the oneshot and notify the receiver by emitting `Canceled`. + /// Dropping this will close the oneshot and notify the receiver by emitting + /// `Canceled`. drop_notifier: Option>, } @@ -69,7 +73,8 @@ impl Stream where T: AsyncRead + AsyncWrite + Unpin + Clone, { - /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped. + /// Returns a new [`Stream`] and a [`DropListener`], which will notify the + /// receiver when/if the stream is dropped. pub fn new(data_channel: T) -> (Self, DropListener) { let (sender, receiver) = oneshot::channel(); @@ -175,8 +180,9 @@ where buf: &[u8], ) -> Poll> { while self.state.read_flags_in_async_write() { - // TODO: In case AsyncRead::poll_read encountered an error or returned None earlier, we will poll the - // underlying I/O resource once more. Is that allowed? How about introducing a state IoReadClosed? + // TODO: In case AsyncRead::poll_read encountered an error or returned None + // earlier, we will poll the underlying I/O resource once more. Is + // that allowed? How about introducing a state IoReadClosed? let Self { read_buffer, @@ -265,11 +271,12 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::stream::framed_dc::codec; use asynchronous_codec::Encoder; use bytes::BytesMut; + use super::*; + use crate::stream::framed_dc::codec; + #[test] fn max_data_len() { // Largest possible message. @@ -285,8 +292,8 @@ mod tests { let mut dst = BytesMut::new(); codec.encode(protobuf, &mut dst).unwrap(); - // Ensure the varint prefixed and protobuf encoded largest message is no longer than the - // maximum limit specified in the libp2p WebRTC specification. + // Ensure the varint prefixed and protobuf encoded largest message is no longer + // than the maximum limit specified in the libp2p WebRTC specification. assert_eq!(dst.len(), MAX_MSG_LEN); } } diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index 9745e3d4364..f45a8d924a1 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -18,17 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::oneshot; -use futures::channel::oneshot::Canceled; -use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + pin::Pin, + task::{Context, Poll}, +}; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + AsyncRead, + AsyncWrite, + FutureExt, + SinkExt, +}; -use crate::proto::{Flag, Message}; -use crate::stream::framed_dc::FramedDc; +use crate::{ + proto::{Flag, Message}, + stream::framed_dc::FramedDc, +}; #[must_use] pub struct DropListener { diff --git a/misc/webrtc-utils/src/stream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs index 721178fdcd3..fc2ef81baa7 100644 --- a/misc/webrtc-utils/src/stream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -21,8 +21,10 @@ use asynchronous_codec::Framed; use futures::{AsyncRead, AsyncWrite}; -use crate::proto::Message; -use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; +use crate::{ + proto::Message, + stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}, +}; pub(crate) type FramedDc = Framed>; pub(crate) fn new(inner: T) -> FramedDc @@ -30,8 +32,8 @@ where T: AsyncRead + AsyncWrite, { let mut framed = Framed::new(inner, codec()); - // If not set, `Framed` buffers up to 131kB of data before sending, which leads to "outbound - // packet larger than maximum message size" error in webrtc-rs. + // If not set, `Framed` buffers up to 131kB of data before sending, which leads + // to "outbound packet larger than maximum message size" error in webrtc-rs. framed.set_send_high_water_mark(MAX_DATA_LEN); framed } diff --git a/misc/webrtc-utils/src/stream/state.rs b/misc/webrtc-utils/src/stream/state.rs index 082325e4d47..1d4fe2bc9af 100644 --- a/misc/webrtc-utils/src/stream/state.rs +++ b/misc/webrtc-utils/src/stream/state.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use bytes::Bytes; - use std::io; +use bytes::Bytes; + use crate::proto::Flag; #[derive(Debug, Copy, Clone)] @@ -44,10 +44,11 @@ pub(crate) enum State { }, } -/// Represents the state of closing one half (either read or write) of the connection. +/// Represents the state of closing one half (either read or write) of the +/// connection. /// -/// Gracefully closing the read or write requires sending the `STOP_SENDING` or `FIN` flag respectively -/// and flushing the underlying connection. +/// Gracefully closing the read or write requires sending the `STOP_SENDING` or +/// `FIN` flag respectively and flushing the underlying connection. #[derive(Debug, Copy, Clone)] pub(crate) enum Closing { Requested, @@ -179,10 +180,11 @@ impl State { } } - /// Whether we should read from the stream in the [`futures::AsyncWrite`] implementation. + /// Whether we should read from the stream in the [`futures::AsyncWrite`] + /// implementation. /// - /// This is necessary for read-closed streams because we would otherwise not read any more flags from - /// the socket. + /// This is necessary for read-closed streams because we would otherwise not + /// read any more flags from the socket. pub(crate) fn read_flags_in_async_write(&self) -> bool { matches!(self, Self::ReadClosed) } @@ -277,7 +279,8 @@ impl State { } } - /// Acts as a "barrier" for [`Stream::poll_close_read`](super::Stream::poll_close_read). + /// Acts as a "barrier" for + /// [`Stream::poll_close_read`](super::Stream::poll_close_read). pub(crate) fn close_read_barrier(&mut self) -> io::Result> { loop { match self { @@ -324,9 +327,10 @@ impl State { #[cfg(test)] mod tests { - use super::*; use std::io::ErrorKind; + use super::*; + #[test] fn cannot_read_after_receiving_fin() { let mut open = State::Open; diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs index 440ad73ed02..26fcc4d155a 100644 --- a/misc/webrtc-utils/src/transport.rs +++ b/misc/webrtc-utils/src/transport.rs @@ -1,8 +1,11 @@ -use crate::fingerprint::Fingerprint; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; use std::net::{IpAddr, SocketAddr}; -/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. +use libp2p_core::{multiaddr::Protocol, Multiaddr}; + +use crate::fingerprint::Fingerprint; + +/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] +/// for dialing. pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { let mut iter = addr.iter(); @@ -38,12 +41,15 @@ pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerpri #[cfg(test)] mod tests { - use super::*; use std::net::{Ipv4Addr, Ipv6Addr}; + use super::*; + #[test] fn parse_valid_address_with_certhash_and_p2p() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/\ + uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/\ + 12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" .parse() .unwrap(); @@ -62,7 +68,8 @@ mod tests { #[test] fn peer_id_is_not_required() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/\ + uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" .parse() .unwrap(); @@ -81,10 +88,11 @@ mod tests { #[test] fn parse_ipv6() { - let addr = - "/ip6/::1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" - .parse() - .unwrap(); + let addr = "/ip6/::1/udp/12345/webrtc-direct/certhash/\ + uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/\ + 12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + .parse() + .unwrap(); let maybe_parsed = parse_webrtc_dial_addr(&addr); diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 44eafa884ac..052d5dfeb60 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -21,21 +21,30 @@ //! A benchmark for the `split_send_size` configuration option //! using different transports. +use std::{pin::Pin, time::Duration}; + use async_std::task; use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}; -use futures::future::poll_fn; -use futures::prelude::*; -use futures::{channel::oneshot, future::join}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::transport::ListenerId; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport}; +use futures::{ + channel::oneshot, + future::{join, poll_fn}, + prelude::*, +}; +use libp2p_core::{ + multiaddr::multiaddr, + muxing, + muxing::StreamMuxerExt, + transport, + transport::ListenerId, + upgrade, + Endpoint, + Multiaddr, + Transport, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; -use std::pin::Pin; -use std::time::Duration; use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -120,7 +129,8 @@ fn run( } transport::TransportEvent::Incoming { upgrade, .. } => { let (_peer, mut conn) = upgrade.await.unwrap(); - // Just calling `poll_inbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_inbound` without `poll` is fine here because mplex makes + // progress through all `poll_` functions. It is hacky though. let mut s = poll_fn(|cx| conn.poll_inbound_unpin(cx)) .await .expect("unexpected error"); @@ -158,7 +168,8 @@ fn run( .unwrap() .await .unwrap(); - // Just calling `poll_outbound` without `poll` is fine here because mplex makes progress through all `poll_` functions. It is hacky though. + // Just calling `poll_outbound` without `poll` is fine here because mplex makes + // progress through all `poll_` functions. It is hacky though. let mut stream = poll_fn(|cx| conn.poll_outbound_unpin(cx)).await.unwrap(); let mut off = 0; loop { diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index 014ee899280..4f0ae725092 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -18,19 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use asynchronous_codec::{Decoder, Encoder}; -use bytes::{BufMut, Bytes, BytesMut}; -use libp2p_core::Endpoint; use std::{ fmt, hash::{Hash, Hasher}, - io, mem, + io, + mem, }; + +use asynchronous_codec::{Decoder, Encoder}; +use bytes::{BufMut, Bytes, BytesMut}; +use libp2p_core::Endpoint; use unsigned_varint::{codec, encode}; // Maximum size for a packet: 1MB as per the spec. -// Since data is entirely buffered before being dispatched, we need a limit or remotes could just -// send a 4 TB-long packet full of zeroes that we kill our process with an OOM error. +// Since data is entirely buffered before being dispatched, we need a limit or +// remotes could just send a 4 TB-long packet full of zeroes that we kill our +// process with an OOM error. pub(crate) const MAX_FRAME_SIZE: usize = 1024 * 1024; /// A unique identifier used by the local node for a substream. @@ -41,13 +44,18 @@ pub(crate) const MAX_FRAME_SIZE: usize = 1024 * 1024; /// > **Note**: Streams are identified by a number and a role encoded as a flag /// > on each frame that is either odd (for receivers) or even (for initiators). /// > `Open` frames do not have a flag, but are sent unidirectionally. As a -/// > consequence, we need to remember if a stream was initiated by us or remotely +/// > consequence, we need to remember if a stream was initiated by us or +/// > remotely /// > and we store the information from our point of view as a `LocalStreamId`, -/// > i.e. receiving an `Open` frame results in a local ID with role `Endpoint::Listener`, -/// > whilst sending an `Open` frame results in a local ID with role `Endpoint::Dialer`. -/// > Receiving a frame with a flag identifying the remote as a "receiver" means that +/// > i.e. receiving an `Open` frame results in a local ID with role +/// > `Endpoint::Listener`, +/// > whilst sending an `Open` frame results in a local ID with role +/// > `Endpoint::Dialer`. +/// > Receiving a frame with a flag identifying the remote as a "receiver" means +/// > that /// > we initiated the stream, so the local ID has the role `Endpoint::Dialer`. -/// > Conversely, when receiving a frame with a flag identifying the remote as a "sender", +/// > Conversely, when receiving a frame with a flag identifying the remote as a +/// > "sender", /// > the corresponding local ID has the role `Endpoint::Listener`. #[derive(Copy, Clone, Eq, Debug)] pub(crate) struct LocalStreamId { @@ -66,11 +74,11 @@ impl fmt::Display for LocalStreamId { /// Manual implementation of [`PartialEq`]. /// -/// This is equivalent to the derived one but we purposely don't derive it because it triggers the -/// `clippy::derive_hash_xor_eq` lint. +/// This is equivalent to the derived one but we purposely don't derive it +/// because it triggers the `clippy::derive_hash_xor_eq` lint. /// -/// This [`PartialEq`] implementation satisfies the rule of v1 == v2 -> hash(v1) == hash(v2). -/// The inverse is not true but does not have to be. +/// This [`PartialEq`] implementation satisfies the rule of v1 == v2 -> hash(v1) +/// == hash(v2). The inverse is not true but does not have to be. impl PartialEq for LocalStreamId { fn eq(&self, other: &Self) -> bool { self.num.eq(&other.num) && self.role.eq(&other.role) diff --git a/muxers/mplex/src/config.rs b/muxers/mplex/src/config.rs index 3bf5e703a18..45bb05b2240 100644 --- a/muxers/mplex/src/config.rs +++ b/muxers/mplex/src/config.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::MAX_FRAME_SIZE; use std::cmp; +use crate::codec::MAX_FRAME_SIZE; + pub(crate) const DEFAULT_MPLEX_PROTOCOL_NAME: &str = "/mplex/6.7.0"; /// Configuration for the multiplexer. diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 50fc0fc1d3f..27d5a3ef6fe 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -18,23 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Codec, Frame, LocalStreamId, RemoteStreamId}; -use crate::{MaxBufferBehaviour, MplexConfig}; +pub(crate) use std::io::{Error, Result}; +use std::{ + cmp, + collections::VecDeque, + fmt, + io, + mem, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use asynchronous_codec::Framed; use bytes::Bytes; -use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; -use futures::{prelude::*, ready, stream::Fuse}; +use futures::{ + prelude::*, + ready, + stream::Fuse, + task::{waker_ref, ArcWake, AtomicWaker, WakerRef}, +}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; -use std::collections::VecDeque; -use std::{ - cmp, fmt, io, mem, - sync::Arc, - task::{Context, Poll, Waker}, -}; -pub(crate) use std::io::{Error, Result}; +use crate::{ + codec::{Codec, Frame, LocalStreamId, RemoteStreamId}, + MaxBufferBehaviour, + MplexConfig, +}; /// A connection identifier. /// /// Randomly generated and mainly intended to improve log output @@ -194,7 +205,8 @@ where } } - /// Waits for a new inbound substream, returning the corresponding `LocalStreamId`. + /// Waits for a new inbound substream, returning the corresponding + /// `LocalStreamId`. /// /// If the number of already used substreams (i.e. substreams that have not /// yet been dropped via `drop_substream`) reaches the configured @@ -302,13 +314,13 @@ where /// reading and writing immediately. The remote is informed /// based on the current state of the substream: /// - /// * If the substream was open, a `Reset` frame is sent at - /// the next opportunity. - /// * If the substream was half-closed, i.e. a `Close` frame - /// has already been sent, nothing further happens. - /// * If the substream was half-closed by the remote, i.e. - /// a `Close` frame has already been received, a `Close` - /// frame is sent at the next opportunity. + /// * If the substream was open, a `Reset` frame is sent at the next + /// opportunity. + /// * If the substream was half-closed, i.e. a `Close` frame has already + /// been sent, nothing further happens. + /// * If the substream was half-closed by the remote, i.e. a `Close` frame + /// has already been received, a `Close` frame is sent at the next + /// opportunity. /// /// If the multiplexed stream is closed or encountered /// an error earlier, or there is no known substream with @@ -424,7 +436,8 @@ where /// this method call are buffered up to the configured `max_substreams` /// and under consideration of the number of already used substreams, /// thereby waking the task that last called `poll_next_stream`, if any. - /// Inbound substreams received in excess of that limit are immediately reset. + /// Inbound substreams received in excess of that limit are immediately + /// reset. pub(crate) fn poll_read_stream( &mut self, cx: &Context<'_>, @@ -669,8 +682,8 @@ where // reading `Data` frames from the current stream when unblocked. debug_assert!( blocked_id != &id, - "Unexpected attempt at reading a new \ - frame from a substream with a full buffer." + "Unexpected attempt at reading a new frame from a substream with a full \ + buffer." ); let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id); } else { @@ -1146,15 +1159,14 @@ const EXTRA_PENDING_FRAMES: usize = 1000; #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, num::NonZeroU8, ops::DerefMut, pin::Pin}; + use async_std::task; use asynchronous_codec::{Decoder, Encoder}; use bytes::BytesMut; use quickcheck::*; - use std::collections::HashSet; - use std::num::NonZeroU8; - use std::ops::DerefMut; - use std::pin::Pin; + + use super::*; impl Arbitrary for MaxBufferBehaviour { fn arbitrary(g: &mut Gen) -> MaxBufferBehaviour { diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index 17ca9ad46f6..2e6bbdfbf26 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -26,15 +26,23 @@ mod codec; mod config; mod io; -pub use config::{MaxBufferBehaviour, MplexConfig}; +use std::{ + cmp, + iter, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use bytes::Bytes; use codec::LocalStreamId; +pub use config::{MaxBufferBehaviour, MplexConfig}; use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, +}; use parking_lot::Mutex; -use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; impl UpgradeInfo for MplexConfig { type Info = &'static str; diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index d03bdbdfed7..377ba523891 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -1,15 +1,21 @@ +use std::{ + fmt, + future::Future, + mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{future, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Stream, StreamExt}; +use libp2p_core::{ + muxing::StreamMuxerExt, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + StreamMuxer, + UpgradeInfo, +}; + use crate::future::{BoxFuture, Either, FutureExt}; -use futures::{future, AsyncRead, AsyncWrite}; -use futures::{AsyncReadExt, Stream}; -use futures::{AsyncWriteExt, StreamExt}; -use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::{StreamMuxer, UpgradeInfo}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, mem}; pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M) where @@ -41,7 +47,8 @@ where .unwrap() } -/// Verifies that Alice can send a message and immediately close the stream afterwards and Bob can use `read_to_end` to read the entire message. +/// Verifies that Alice can send a message and immediately close the stream +/// afterwards and Bob can use `read_to_end` to read the entire message. pub async fn close_implies_flush(alice: A, bob: B) where A: StreamMuxer + Unpin, @@ -99,7 +106,8 @@ where .await; } -/// Runs the given protocol between the two parties, ensuring commutativity, i.e. either party can be the dialer and listener. +/// Runs the given protocol between the two parties, ensuring commutativity, +/// i.e. either party can be the dialer and listener. async fn run_commutative( mut alice: A, mut bob: B, @@ -119,8 +127,10 @@ async fn run_commutative( /// Runs a given protocol between the two parties. /// -/// The first party will open a new substream and the second party will wait for this. -/// The [`StreamMuxer`] is polled until both parties have completed the protocol to ensure that the underlying connection can make progress at all times. +/// The first party will open a new substream and the second party will wait for +/// this. The [`StreamMuxer`] is polled until both parties have completed the +/// protocol to ensure that the underlying connection can make progress at all +/// times. async fn run( dialer: &mut A, listener: &mut B, diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index bcfeb62fccf..614791c2dd3 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,17 +22,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use either::Either; -use futures::{prelude::*, ready}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; -use std::collections::VecDeque; -use std::io::{IoSlice, IoSliceMut}; -use std::task::Waker; use std::{ - io, iter, + collections::VecDeque, + io, + io::{IoSlice, IoSliceMut}, + iter, pin::Pin, - task::{Context, Poll}, + task::{Context, Poll, Waker}, +}; + +use either::Either; +use futures::{prelude::*, ready}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}, }; use thiserror::Error; @@ -40,15 +43,20 @@ use thiserror::Error; #[derive(Debug)] pub struct Muxer { connection: Either, yamux013::Connection>, - /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. + /// Temporarily buffers inbound streams in case our node is performing + /// backpressure on the remote. /// - /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the - /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via - /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general - /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. + /// The only way how yamux can make progress is by calling + /// [`yamux013::Connection::poll_next_inbound`]. However, the + /// [`StreamMuxer`] interface is designed to allow a caller to selectively + /// make progress via [`StreamMuxer::poll_inbound`] and + /// [`StreamMuxer::poll_outbound`] whilst the more general + /// [`StreamMuxer::poll`] is designed to make progress on existing streams + /// etc. /// - /// This buffer stores inbound streams that are created whilst [`StreamMuxer::poll`] is called. - /// Once the buffer is full, new inbound streams are dropped. + /// This buffer stores inbound streams that are created whilst + /// [`StreamMuxer::poll`] is called. Once the buffer is full, new + /// inbound streams are dropped. inbound_stream_buffer: VecDeque, /// Waker to be called when new inbound streams are available. inbound_stream_waker: Option, @@ -57,7 +65,9 @@ pub struct Muxer { /// How many streams to buffer before we start resetting them. /// /// This is equal to the ACK BACKLOG in `rust-yamux`. -/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog. +/// Thus, for peers running on a recent version of `rust-libp2p`, we should +/// never need to reset streams because they'll voluntarily stop opening them +/// once they hit the ACK backlog. const MAX_BUFFERED_INBOUND_STREAMS: usize = 256; impl Muxer @@ -310,7 +320,8 @@ impl Config { /// Sets the size (in bytes) of the receive window per substream. #[deprecated( - note = "Will be replaced in the next breaking release with a connection receive window size limit." + note = "Will be replaced in the next breaking release with a connection receive window \ + size limit." )] pub fn set_receive_window_size(&mut self, num_bytes: u32) -> &mut Self { self.set(|cfg| cfg.set_receive_window(num_bytes)) @@ -330,7 +341,10 @@ impl Config { /// Sets the window update mode that determines when the remote /// is given new credit for sending more data. #[deprecated( - note = "`WindowUpdate::OnRead` is the default. `WindowUpdate::OnReceive` breaks backpressure, is thus not recommended, and will be removed in the next breaking release. Thus this method becomes obsolete and will be removed with the next breaking release." + note = "`WindowUpdate::OnRead` is the default. `WindowUpdate::OnReceive` breaks \ + backpressure, is thus not recommended, and will be removed in the next breaking \ + release. Thus this method becomes obsolete and will be removed with the next \ + breaking release." )] pub fn set_window_update_mode(&mut self, mode: WindowUpdateMode) -> &mut Self { self.set(|cfg| cfg.set_window_update_mode(mode.0)) @@ -446,8 +460,9 @@ mod test { use super::*; #[test] fn config_set_switches_to_v012() { - // By default we use yamux v0.13. Thus we provide the benefits of yamux v0.13 to all users - // that do not depend on any of the behaviors (i.e. configuration options) of v0.12. + // By default we use yamux v0.13. Thus we provide the benefits of yamux v0.13 to + // all users that do not depend on any of the behaviors (i.e. + // configuration options) of v0.12. let mut cfg = Config::default(); assert!(matches!( cfg, diff --git a/protocols/autonat/src/v1.rs b/protocols/autonat/src/v1.rs index c60e4805f40..3cd1ea8ab3c 100644 --- a/protocols/autonat/src/v1.rs +++ b/protocols/autonat/src/v1.rs @@ -21,22 +21,30 @@ //! Implementation of the [AutoNAT](https://github.com/libp2p/specs/blob/master/autonat/README.md) protocol. //! //! ## Eventual Deprecation -//! This version of the protocol will eventually be deprecated in favor of [v2](crate::v2). -//! We recommend using v2 for new projects. +//! This version of the protocol will eventually be deprecated in favor of +//! [v2](crate::v2). We recommend using v2 for new projects. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub(crate) mod behaviour; pub(crate) mod protocol; +pub use libp2p_request_response::{InboundFailure, OutboundFailure}; + pub use self::{ behaviour::{ - Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, NatStatus, - OutboundProbeError, OutboundProbeEvent, ProbeId, + Behaviour, + Config, + Event, + InboundProbeError, + InboundProbeEvent, + NatStatus, + OutboundProbeError, + OutboundProbeEvent, + ProbeId, }, protocol::{ResponseError, DEFAULT_PROTOCOL_NAME}, }; -pub use libp2p_request_response::{InboundFailure, OutboundFailure}; pub(crate) mod proto { #![allow(unreachable_pub)] diff --git a/protocols/autonat/src/v1/behaviour.rs b/protocols/autonat/src/v1/behaviour.rs index 7a717baed8d..98b6e916b3b 100644 --- a/protocols/autonat/src/v1/behaviour.rs +++ b/protocols/autonat/src/v1/behaviour.rs @@ -21,32 +21,45 @@ mod as_client; mod as_server; -use crate::protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}; -use crate::DEFAULT_PROTOCOL_NAME; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; + use as_client::AsClient; pub use as_client::{OutboundProbeError, OutboundProbeEvent}; use as_server::AsServer; pub use as_server::{InboundProbeError, InboundProbeEvent}; use futures_timer::Delay; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel, + self as request_response, + InboundRequestId, + OutboundRequestId, + ProtocolSupport, + ResponseChannel, }; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, - ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, -}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - iter, - task::{Context, Poll}, - time::Duration, + ConnectionDenied, + ConnectionId, + ListenAddresses, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use web_time::Instant; +use crate::{ + protocol::{AutoNatCodec, DialRequest, DialResponse, ResponseError}, + DEFAULT_PROTOCOL_NAME, +}; + /// Config for the [`Behaviour`]. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { @@ -56,10 +69,11 @@ pub struct Config { // Client Config /// Delay on init before starting the fist probe. pub boot_delay: Duration, - /// Interval in which the NAT should be tested again if max confidence was reached in a status. + /// Interval in which the NAT should be tested again if max confidence was + /// reached in a status. pub refresh_interval: Duration, - /// Interval in which the NAT status should be re-tried if it is currently unknown - /// or max confidence was not reached yet. + /// Interval in which the NAT status should be re-tried if it is currently + /// unknown or max confidence was not reached yet. pub retry_interval: Duration, /// Throttle period for re-using a peer as server for a dial-request. pub throttle_server_period: Duration, @@ -74,14 +88,15 @@ pub struct Config { pub max_peer_addresses: usize, /// Max total dial requests done in `[Config::throttle_clients_period`]. pub throttle_clients_global_max: usize, - /// Max dial requests done in `[Config::throttle_clients_period`] for a peer. + /// Max dial requests done in `[Config::throttle_clients_period`] for a + /// peer. pub throttle_clients_peer_max: usize, /// Period for throttling clients requests. pub throttle_clients_period: Duration, - /// As a server reject probes for clients that are observed at a non-global ip address. - /// Correspondingly as a client only pick peers as server that are not observed at a - /// private ip address. Note that this does not apply for servers that are added via - /// [`Behaviour::add_server`]. + /// As a server reject probes for clients that are observed at a non-global + /// ip address. Correspondingly as a client only pick peers as server + /// that are not observed at a private ip address. Note that this does + /// not apply for servers that are added via [`Behaviour::add_server`]. pub only_global_ips: bool, } @@ -148,17 +163,21 @@ pub enum Event { /// [`NetworkBehaviour`] for AutoNAT. /// -/// The behaviour frequently runs probes to determine whether the local peer is behind NAT and/ or a firewall, or -/// publicly reachable. -/// In a probe, a dial-back request is sent to a peer that is randomly selected from the list of fixed servers and -/// connected peers. Upon receiving a dial-back request, the remote tries to dial the included addresses. When a -/// first address was successfully dialed, a status Ok will be send back together with the dialed address. If no address -/// can be reached a dial-error is send back. -/// Based on the received response, the sender assumes themselves to be public or private. -/// The status is retried in a frequency of [`Config::retry_interval`] or [`Config::retry_interval`], depending on whether -/// enough confidence in the assumed NAT status was reached or not. -/// The confidence increases each time a probe confirms the assumed status, and decreases if a different status is reported. -/// If the confidence is 0, the status is flipped and the Behaviour will report the new status in an `OutEvent`. +/// The behaviour frequently runs probes to determine whether the local peer is +/// behind NAT and/ or a firewall, or publicly reachable. +/// In a probe, a dial-back request is sent to a peer that is randomly selected +/// from the list of fixed servers and connected peers. Upon receiving a +/// dial-back request, the remote tries to dial the included addresses. When a +/// first address was successfully dialed, a status Ok will be send back +/// together with the dialed address. If no address can be reached a dial-error +/// is send back. Based on the received response, the sender assumes themselves +/// to be public or private. The status is retried in a frequency of +/// [`Config::retry_interval`] or [`Config::retry_interval`], depending on +/// whether enough confidence in the assumed NAT status was reached or not. +/// The confidence increases each time a probe confirms the assumed status, and +/// decreases if a different status is reported. If the confidence is 0, the +/// status is flipped and the Behaviour will report the new status in an +/// `OutEvent`. pub struct Behaviour { // Local peer id local_peer_id: PeerId, @@ -195,11 +214,12 @@ pub struct Behaviour { ongoing_outbound: HashMap, // Connected peers with the observed address of each connection. - // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips), - // the observed address is `None`. + // If the endpoint of a connection is relayed or not global (in case of + // Config::only_global_ips), the observed address is `None`. connected: HashMap>>, - // Used servers in recent outbound probes that are throttled through Config::throttle_server_period. + // Used servers in recent outbound probes that are throttled through + // Config::throttle_server_period. throttled_servers: Vec<(PeerId, Instant)>, // Recent probes done for clients @@ -245,7 +265,8 @@ impl Behaviour { } /// Assumed public address of the local peer. - /// Returns `None` in case of status [`NatStatus::Private`] or [`NatStatus::Unknown`]. + /// Returns `None` in case of status [`NatStatus::Private`] or + /// [`NatStatus::Unknown`]. pub fn public_address(&self) -> Option<&Multiaddr> { match &self.nat_status { NatStatus::Public(address) => Some(address), @@ -264,8 +285,9 @@ impl Behaviour { } /// Add a peer to the list over servers that may be used for probes. - /// These peers are used for dial-request even if they are currently not connection, in which case a connection will be - /// establish before sending the dial-request. + /// These peers are used for dial-request even if they are currently not + /// connection, in which case a connection will be establish before + /// sending the dial-request. pub fn add_server(&mut self, peer: PeerId, address: Option) { self.servers.insert(peer); if let Some(addr) = address { @@ -351,9 +373,11 @@ impl Behaviour { role_override: Endpoint::Listener, port_use: _, } => { - // Outgoing connection was dialed as a listener. In other words outgoing connection - // was dialed as part of a hole punch. `libp2p-autonat` never attempts to hole - // punch, thus this connection has not been requested by this [`NetworkBehaviour`]. + // Outgoing connection was dialed as a listener. In other words + // outgoing connection was dialed as part of a + // hole punch. `libp2p-autonat` never attempts to hole + // punch, thus this connection has not been requested by this + // [`NetworkBehaviour`]. } ConnectedPoint::Listener { .. } => self.as_client().on_inbound_connection(), } @@ -564,7 +588,8 @@ impl NetworkBehaviour for Behaviour { type Action = ToSwarm<::ToSwarm, THandlerInEvent>; -// Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol. +// Trait implemented for `AsClient` and `AsServer` to handle events from the +// inner [`request_response::Behaviour`] Protocol. trait HandleInnerEvent { fn handle_event( &mut self, @@ -587,12 +612,12 @@ impl GlobalIp for Multiaddr { } impl GlobalIp for std::net::Ipv4Addr { - // NOTE: The below logic is copied from `std::net::Ipv4Addr::is_global`, which is at the time of - // writing behind the unstable `ip` feature. + // NOTE: The below logic is copied from `std::net::Ipv4Addr::is_global`, which + // is at the time of writing behind the unstable `ip` feature. // See https://github.com/rust-lang/rust/issues/27709 for more info. fn is_global_ip(&self) -> bool { - // Check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two - // globally routable addresses in the 192.0.0.0/24 range. + // Check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the + // only two globally routable addresses in the 192.0.0.0/24 range. if u32::from_be_bytes(self.octets()) == 0xc0000009 || u32::from_be_bytes(self.octets()) == 0xc000000a { @@ -635,12 +660,12 @@ impl GlobalIp for std::net::Ipv4Addr { } impl GlobalIp for std::net::Ipv6Addr { - // NOTE: The below logic is copied from `std::net::Ipv6Addr::is_global`, which is at the time of - // writing behind the unstable `ip` feature. + // NOTE: The below logic is copied from `std::net::Ipv6Addr::is_global`, which + // is at the time of writing behind the unstable `ip` feature. // See https://github.com/rust-lang/rust/issues/27709 for more info. // - // Note that contrary to `Ipv4Addr::is_global_ip` this currently checks for global scope - // rather than global reachability. + // Note that contrary to `Ipv4Addr::is_global_ip` this currently checks for + // global scope rather than global reachability. fn is_global_ip(&self) -> bool { // Copied from the unstable method `std::net::Ipv6Addr::is_unicast`. fn is_unicast(addr: &std::net::Ipv6Addr) -> bool { @@ -669,9 +694,11 @@ impl GlobalIp for std::net::Ipv6Addr { && !is_documentation(addr) } - // Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that instead of the - // `Ipv6MulticastScope` just returns if the scope is global or not. - // Equivalent to `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, Ipv6MulticastScope::Global))`. + // Variation of unstable method [`std::net::Ipv6Addr::multicast_scope`] that + // instead of the `Ipv6MulticastScope` just returns if the scope is + // global or not. Equivalent to + // `Ipv6Addr::multicast_scope(..).map(|scope| matches!(scope, + // Ipv6MulticastScope::Global))`. fn is_multicast_scope_global(addr: &std::net::Ipv6Addr) -> Option { match addr.segments()[0] & 0x000f { 14 => Some(true), // Global multicast scope. diff --git a/protocols/autonat/src/v1/behaviour/as_client.rs b/protocols/autonat/src/v1/behaviour/as_client.rs index 385dee50ee1..de413827b8f 100644 --- a/protocols/autonat/src/v1/behaviour/as_client.rs +++ b/protocols/autonat/src/v1/behaviour/as_client.rs @@ -18,12 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ResponseError; - -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, NatStatus, - ProbeId, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + task::{Context, Poll}, + time::Duration, }; + use futures::FutureExt; use futures_timer::Delay; use libp2p_core::Multiaddr; @@ -31,13 +31,21 @@ use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId}; use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm}; use rand::{seq::SliceRandom, thread_rng}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::{Context, Poll}, - time::Duration, -}; use web_time::Instant; +use super::{ + Action, + AutoNatCodec, + Config, + DialRequest, + DialResponse, + Event, + HandleInnerEvent, + NatStatus, + ProbeId, +}; +use crate::ResponseError; + /// Outbound probe failed or was aborted. #[derive(Debug)] pub enum OutboundProbeError { @@ -74,7 +82,8 @@ pub enum OutboundProbeEvent { Error { probe_id: ProbeId, /// Id of the peer used for the probe. - /// `None` if the probe was aborted due to no addresses or no qualified server. + /// `None` if the probe was aborted due to no addresses or no qualified + /// server. peer: Option, error: OutboundProbeError, }, @@ -210,7 +219,8 @@ impl AsClient<'_> { } } - // An inbound connection can indicate that we are public; adjust the delay to the next probe. + // An inbound connection can indicate that we are public; adjust the delay to + // the next probe. pub(crate) fn on_inbound_connection(&mut self) { if *self.confidence == self.config.confidence_max { if self.nat_status.is_public() { @@ -305,8 +315,8 @@ impl AsClient<'_> { .reset(schedule_next.saturating_duration_since(Instant::now())); } - // Adapt current confidence and NAT status to the status reported by the latest probe. - // Return the old status if it flipped. + // Adapt current confidence and NAT status to the status reported by the latest + // probe. Return the old status if it flipped. fn handle_reported_status(&mut self, reported_status: NatStatus) -> Option { self.schedule_next_probe(self.config.retry_interval); diff --git a/protocols/autonat/src/v1/behaviour/as_server.rs b/protocols/autonat/src/v1/behaviour/as_server.rs index 01148add6e8..d07d6f9f3e2 100644 --- a/protocols/autonat/src/v1/behaviour/as_server.rs +++ b/protocols/autonat/src/v1/behaviour/as_server.rs @@ -17,25 +17,39 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{ - Action, AutoNatCodec, Config, DialRequest, DialResponse, Event, HandleInnerEvent, ProbeId, - ResponseError, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + num::NonZeroU8, }; + use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, InboundFailure, InboundRequestId, ResponseChannel, + self as request_response, + InboundFailure, + InboundRequestId, + ResponseChannel, }; use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, - ConnectionId, DialError, ToSwarm, -}; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - num::NonZeroU8, + ConnectionId, + DialError, + ToSwarm, }; use web_time::Instant; +use super::{ + Action, + AutoNatCodec, + Config, + DialRequest, + DialResponse, + Event, + HandleInnerEvent, + ProbeId, + ResponseError, +}; + /// Inbound probe failed. #[derive(Debug)] pub enum InboundProbeError { @@ -379,10 +393,10 @@ impl AsServer<'_> { #[cfg(test)] mod test { - use super::*; - use std::net::Ipv4Addr; + use super::*; + fn random_ip<'a>() -> Protocol<'a> { Protocol::Ip4(Ipv4Addr::new( rand::random(), diff --git a/protocols/autonat/src/v1/protocol.rs b/protocols/autonat/src/v1/protocol.rs index 2ce538fddf4..5f6fd466624 100644 --- a/protocols/autonat/src/v1/protocol.rs +++ b/protocols/autonat/src/v1/protocol.rs @@ -18,16 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use async_trait::async_trait; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::{SinkExt, StreamExt}; +use futures::{ + io::{AsyncRead, AsyncWrite}, + SinkExt, + StreamExt, +}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use std::io; + +use crate::proto; /// The protocol name used for negotiating with multistream-select. pub const DEFAULT_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/autonat/1.0.0"); diff --git a/protocols/autonat/src/v2.rs b/protocols/autonat/src/v2.rs index 48e9f25f890..cc28d8b9d19 100644 --- a/protocols/autonat/src/v2.rs +++ b/protocols/autonat/src/v2.rs @@ -3,18 +3,19 @@ //! The implementation follows the [libp2p spec](https://github.com/libp2p/specs/blob/03718ef0f2dea4a756a85ba716ee33f97e4a6d6c/autonat/autonat-v2.md). //! //! The new version fixes the issues of the first version: -//! - The server now always dials back over a newly allocated port. This greatly reduces the risk of -//! false positives that often occurred in the first version, when the clinet-server connection -//! occurred over a hole-punched port. -//! - The server protects against DoS attacks by requiring the client to send more data to the -//! server then the dial back puts on the client, thus making the protocol unatractive for an -//! attacker. +//! - The server now always dials back over a newly allocated port. This greatly +//! reduces the risk of false positives that often occurred in the first +//! version, when the clinet-server connection occurred over a hole-punched +//! port. +//! - The server protects against DoS attacks by requiring the client to send +//! more data to the server then the dial back puts on the client, thus making +//! the protocol unatractive for an attacker. //! //! The protocol is separated into two parts: -//! - The client part, which is implemented in the `client` module. (The client is the party that -//! wants to check if it is reachable from the outside.) -//! - The server part, which is implemented in the `server` module. (The server is the party -//! performing reachability checks on behalf of the client.) +//! - The client part, which is implemented in the `client` module. (The client +//! is the party that wants to check if it is reachable from the outside.) +//! - The server part, which is implemented in the `server` module. (The server +//! is the party performing reachability checks on behalf of the client.) //! //! The two can be used together. diff --git a/protocols/autonat/src/v2/client.rs b/protocols/autonat/src/v2/client.rs index d3272512f35..11ddb792839 100644 --- a/protocols/autonat/src/v2/client.rs +++ b/protocols/autonat/src/v2/client.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Event; -pub use behaviour::{Behaviour, Config}; +pub use behaviour::{Behaviour, Config, Event}; diff --git a/protocols/autonat/src/v2/client/behaviour.rs b/protocols/autonat/src/v2/client/behaviour.rs index 97509c05443..4c5cc4ca049 100644 --- a/protocols/autonat/src/v2/client/behaviour.rs +++ b/protocols/autonat/src/v2/client/behaviour.rs @@ -1,5 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, + fmt::{Debug, Display, Formatter}, task::{Context, Poll}, time::Duration, }; @@ -10,26 +11,33 @@ use futures_timer::Delay; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - behaviour::ConnectionEstablished, ConnectionClosed, ConnectionDenied, ConnectionHandler, - ConnectionId, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, NotifyHandler, ToSwarm, + behaviour::ConnectionEstablished, + ConnectionClosed, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + FromSwarm, + NetworkBehaviour, + NewExternalAddrCandidate, + NotifyHandler, + ToSwarm, }; use rand::prelude::*; use rand_core::OsRng; -use std::fmt::{Debug, Display, Formatter}; - -use crate::v2::{protocol::DialRequest, Nonce}; use super::handler::{ dial_back::{self, IncomingNonce}, dial_request, }; +use crate::v2::{protocol::DialRequest, Nonce}; #[derive(Debug, Clone, Copy)] pub struct Config { /// How many candidates we will test at most. pub(crate) max_candidates: usize, - /// The interval at which we will attempt to confirm candidates as external addresses. + /// The interval at which we will attempt to confirm candidates as external + /// addresses. pub(crate) probe_interval: Duration, } @@ -281,10 +289,13 @@ where } } - /// Issues dial requests to random AutoNAT servers for the most frequently reported, untested candidates. + /// Issues dial requests to random AutoNAT servers for the most frequently + /// reported, untested candidates. /// - /// In the current implementation, we only send a single address to each AutoNAT server. - /// This spreads our candidates out across all servers we are connected to which should give us pretty fast feedback on all of them. + /// In the current implementation, we only send a single address to each + /// AutoNAT server. This spreads our candidates out across all servers + /// we are connected to which should give us pretty fast feedback on all of + /// them. fn issue_dial_requests_for_untested_candidates(&mut self) { for addr in self.untested_candidates() { let Some((conn_id, peer_id)) = self.random_autonat_server() else { @@ -309,9 +320,11 @@ where } } - /// Returns all untested candidates, sorted by the frequency they were reported at. + /// Returns all untested candidates, sorted by the frequency they were + /// reported at. /// - /// More frequently reported candidates are considered to more likely be external addresses and thus tested first. + /// More frequently reported candidates are considered to more likely be + /// external addresses and thus tested first. fn untested_candidates(&self) -> impl Iterator { let mut entries = self .address_candidates @@ -333,7 +346,9 @@ where .map(|(addr, _)| addr) } - /// Chooses an active connection to one of our peers that reported support for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) protocol. + /// Chooses an active connection to one of our peers that reported support + /// for the [`DIAL_REQUEST_PROTOCOL`](crate::v2::DIAL_REQUEST_PROTOCOL) + /// protocol. fn random_autonat_server(&mut self) -> Option<(ConnectionId, PeerId)> { let (conn_id, info) = self .peer_info diff --git a/protocols/autonat/src/v2/client/handler/dial_back.rs b/protocols/autonat/src/v2/client/handler/dial_back.rs index b3b3a59c02d..44346a340b2 100644 --- a/protocols/autonat/src/v2/client/handler/dial_back.rs +++ b/protocols/autonat/src/v2/client/handler/dial_back.rs @@ -1,4 +1,5 @@ use std::{ + convert::Infallible, io, task::{Context, Poll}, time::Duration, @@ -9,9 +10,11 @@ use futures_bounded::StreamSet; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError}, - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + SubstreamProtocol, }; -use std::convert::Infallible; use crate::v2::{protocol, Nonce, DIAL_BACK_PROTOCOL}; @@ -83,7 +86,8 @@ impl ConnectionHandler for Handler { tracing::warn!("Dial back request dropped, too many requests in flight"); } } - // TODO: remove when Rust 1.82 is MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs + // TODO: remove when Rust 1.82 is + // MSRVprotocols/autonat/src/v2/client/handler/dial_back.rs #[allow(unreachable_patterns)] ConnectionEvent::ListenUpgradeError(ListenUpgradeError { error, .. }) => { libp2p_core::util::unreachable(error); diff --git a/protocols/autonat/src/v2/client/handler/dial_request.rs b/protocols/autonat/src/v2/client/handler/dial_request.rs index 0f303167523..e4e78e9f3ef 100644 --- a/protocols/autonat/src/v2/client/handler/dial_request.rs +++ b/protocols/autonat/src/v2/client/handler/dial_request.rs @@ -1,34 +1,48 @@ +use std::{ + collections::VecDeque, + convert::Infallible, + io, + iter::{once, repeat}, + task::{Context, Poll}, + time::Duration, +}; + use futures::{channel::oneshot, AsyncWrite}; use futures_bounded::FuturesMap; use libp2p_core::{ upgrade::{DeniedUpgrade, ReadyUpgrade}, Multiaddr, }; - use libp2p_swarm::{ handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound, OutboundUpgradeSend, + ConnectionEvent, + DialUpgradeError, + FullyNegotiatedOutbound, + OutboundUpgradeSend, ProtocolsChange, }, - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + ConnectionHandler, + ConnectionHandlerEvent, + Stream, + StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; -use std::{ - collections::VecDeque, - convert::Infallible, - io, - iter::{once, repeat}, - task::{Context, Poll}, - time::Duration, -}; use crate::v2::{ generated::structs::{mod_DialResponse::ResponseStatus, DialStatus}, protocol::{ - Coder, DialDataRequest, DialDataResponse, DialRequest, Response, - DATA_FIELD_LEN_UPPER_BOUND, DATA_LEN_LOWER_BOUND, DATA_LEN_UPPER_BOUND, + Coder, + DialDataRequest, + DialDataResponse, + DialRequest, + Response, + DATA_FIELD_LEN_UPPER_BOUND, + DATA_LEN_LOWER_BOUND, + DATA_LEN_UPPER_BOUND, }, - Nonce, DIAL_REQUEST_PROTOCOL, + Nonce, + DIAL_REQUEST_PROTOCOL, }; #[derive(Debug)] @@ -261,8 +275,10 @@ async fn start_stream_handle( Ok(_) => {} Err(err) => { if err.kind() == io::ErrorKind::ConnectionReset { - // The AutoNAT server may have already closed the stream (this is normal because the probe is finished), in this case we have this error: - // Err(Custom { kind: ConnectionReset, error: Stopped(0) }) + // The AutoNAT server may have already closed the stream (this + // is normal because the probe is finished), in this case we + // have this error: Err(Custom { kind: + // ConnectionReset, error: Stopped(0) }) // so we silently ignore this error } else { return Err(err.into()); diff --git a/protocols/autonat/src/v2/protocol.rs b/protocols/autonat/src/v2/protocol.rs index 4077fd65f5d..52be84dba16 100644 --- a/protocols/autonat/src/v2/protocol.rs +++ b/protocols/autonat/src/v2/protocol.rs @@ -1,13 +1,10 @@ // change to quick-protobuf-codec -use std::io; -use std::io::ErrorKind; +use std::{io, io::ErrorKind}; use asynchronous_codec::{Framed, FramedRead, FramedWrite}; - use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::Multiaddr; - use quick_protobuf_codec::Codec; use rand::Rng; @@ -103,7 +100,9 @@ impl From for proto::Message { ); proto::Message { msg: proto::mod_Message::OneOfmsg::dialDataResponse(proto::DialDataResponse { - data: vec![0; val.data_count], // One could use Cow::Borrowed here, but it will require a modification of the generated code and that will fail the CI + data: vec![0; val.data_count], /* One could use Cow::Borrowed here, but it will + * require a modification of the generated code + * and that will fail the CI */ }), } } @@ -310,7 +309,9 @@ pub(crate) async fn recv_dial_back_response( #[cfg(test)] mod tests { use crate::v2::generated::structs::{ - mod_Message::OneOfmsg, DialDataResponse as GenDialDataResponse, Message, + mod_Message::OneOfmsg, + DialDataResponse as GenDialDataResponse, + Message, }; #[test] diff --git a/protocols/autonat/src/v2/server.rs b/protocols/autonat/src/v2/server.rs index 25819307784..cd9b1e46b18 100644 --- a/protocols/autonat/src/v2/server.rs +++ b/protocols/autonat/src/v2/server.rs @@ -1,5 +1,4 @@ mod behaviour; mod handler; -pub use behaviour::Behaviour; -pub use behaviour::Event; +pub use behaviour::{Behaviour, Event}; diff --git a/protocols/autonat/src/v2/server/behaviour.rs b/protocols/autonat/src/v2/server/behaviour.rs index 027cfff7c13..ed7faf66d7b 100644 --- a/protocols/autonat/src/v2/server/behaviour.rs +++ b/protocols/autonat/src/v2/server/behaviour.rs @@ -4,20 +4,25 @@ use std::{ task::{Context, Poll}, }; -use crate::v2::server::handler::dial_request::DialBackStatus; use either::Either; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; use libp2p_swarm::{ - dial_opts::DialOpts, dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, - FromSwarm, NetworkBehaviour, ToSwarm, + dial_opts::{DialOpts, PeerCondition}, + dummy, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + DialFailure, + FromSwarm, + NetworkBehaviour, + ToSwarm, }; use rand_core::{OsRng, RngCore}; use crate::v2::server::handler::{ dial_back, - dial_request::{self, DialBackCommand}, + dial_request::{self, DialBackCommand, DialBackStatus}, Handler, }; diff --git a/protocols/autonat/src/v2/server/handler/dial_back.rs b/protocols/autonat/src/v2/server/handler/dial_back.rs index 3cacd4ff32b..16e8f450053 100644 --- a/protocols/autonat/src/v2/server/handler/dial_back.rs +++ b/protocols/autonat/src/v2/server/handler/dial_back.rs @@ -10,17 +10,19 @@ use futures_bounded::FuturesSet; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedOutbound}, - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; +use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; use crate::v2::{ protocol::{dial_back, recv_dial_back_response}, DIAL_BACK_PROTOCOL, }; -use super::dial_request::{DialBackCommand, DialBackStatus as DialBackRes}; - pub(crate) type ToBehaviour = io::Result<()>; pub struct Handler { diff --git a/protocols/autonat/src/v2/server/handler/dial_request.rs b/protocols/autonat/src/v2/server/handler/dial_request.rs index 5058e0f3f42..9113a487314 100644 --- a/protocols/autonat/src/v2/server/handler/dial_request.rs +++ b/protocols/autonat/src/v2/server/handler/dial_request.rs @@ -8,7 +8,10 @@ use std::{ use either::Either; use futures::{ channel::{mpsc, oneshot}, - AsyncRead, AsyncWrite, SinkExt, StreamExt, + AsyncRead, + AsyncWrite, + SinkExt, + StreamExt, }; use futures_bounded::FuturesSet; use libp2p_core::{ @@ -18,7 +21,10 @@ use libp2p_core::{ use libp2p_identity::PeerId; use libp2p_swarm::{ handler::{ConnectionEvent, FullyNegotiatedInbound, ListenUpgradeError}, - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + SubstreamProtocol, }; use rand_core::RngCore; @@ -26,7 +32,8 @@ use crate::v2::{ generated::structs::{mod_DialResponse::ResponseStatus, DialStatus}, protocol::{Coder, DialDataRequest, DialRequest, DialResponse, Request, Response}, server::behaviour::Event, - Nonce, DIAL_REQUEST_PROTOCOL, + Nonce, + DIAL_REQUEST_PROTOCOL, }; #[derive(Debug, PartialEq)] @@ -225,7 +232,8 @@ async fn handle_request( data_amount, result: Err(io::Error::new( io::ErrorKind::Other, - "client is not conformint to protocol. the tested address is not the observed address", + "client is not conformint to protocol. the tested address is not the observed \ + address", )), }; }; diff --git a/protocols/autonat/tests/autonatv2.rs b/protocols/autonat/tests/autonatv2.rs index f22a2e51470..07a631c5b1c 100644 --- a/protocols/autonat/tests/autonatv2.rs +++ b/protocols/autonat/tests/autonatv2.rs @@ -1,15 +1,20 @@ -use libp2p_autonat::v2::client::{self, Config}; -use libp2p_autonat::v2::server; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::TransportError; -use libp2p_core::Multiaddr; +use std::{sync::Arc, time::Duration}; + +use libp2p_autonat::v2::{ + client::{self, Config}, + server, +}; +use libp2p_core::{multiaddr::Protocol, transport::TransportError, Multiaddr}; use libp2p_swarm::{ - DialError, FromSwarm, NetworkBehaviour, NewExternalAddrCandidate, Swarm, SwarmEvent, + DialError, + FromSwarm, + NetworkBehaviour, + NewExternalAddrCandidate, + Swarm, + SwarmEvent, }; use libp2p_swarm_test::SwarmExt; use rand_core::OsRng; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::oneshot; use tracing_subscriber::EnvFilter; diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index f5c18e3f34e..2fbe608dc8b 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -18,14 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::time::Duration; + use libp2p_autonat::{ - Behaviour, Config, Event, NatStatus, OutboundProbeError, OutboundProbeEvent, ResponseError, + Behaviour, + Config, + Event, + NatStatus, + OutboundProbeError, + OutboundProbeEvent, + ResponseError, }; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tokio::task::JoinHandle; const MAX_CONFIDENCE: usize = 3; @@ -115,8 +122,9 @@ async fn test_auto_probe() { } // It can happen that the server observed the established connection and - // returned a response before the inbound established connection was reported at the client. - // In this (rare) case the `ConnectionEstablished` event occurs after the `OutboundProbeEvent::Response`. + // returned a response before the inbound established connection was reported at + // the client. In this (rare) case the `ConnectionEstablished` event occurs + // after the `OutboundProbeEvent::Response`. if !had_connection_event { match client.next_swarm_event().await { SwarmEvent::ConnectionEstablished { diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index d43d14198d4..749154f3a4d 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -18,15 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{num::NonZeroU32, time::Duration}; + use libp2p_autonat::{ - Behaviour, Config, Event, InboundProbeError, InboundProbeEvent, ResponseError, + Behaviour, + Config, + Event, + InboundProbeError, + InboundProbeEvent, + ResponseError, }; use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::DialError; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::{num::NonZeroU32, time::Duration}; #[tokio::test] async fn test_dial_back() { @@ -340,7 +345,8 @@ async fn test_global_ips_config() { client.listen().await; tokio::spawn(client.loop_on_next()); - // Expect the probe to be refused as both peers run on the same machine and thus in the same local network. + // Expect the probe to be refused as both peers run on the same machine and thus + // in the same local network. match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Error { error, .. }) => assert!(matches!( error, diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 7d0366c98bc..f5b210c1e99 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -18,29 +18,45 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. +//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay +//! node. + +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::Infallible, + num::NonZeroUsize, + task::{Context, Poll}, +}; -use crate::{handler, protocol}; use either::Either; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{ + connection::ConnectedPoint, + multiaddr::Protocol, + transport::PortUse, + Endpoint, + Multiaddr, +}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm}; -use libp2p_swarm::dial_opts::{self, DialOpts}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, + behaviour::{ConnectionClosed, DialFailure, FromSwarm}, + dial_opts::{self, DialOpts}, + dummy, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + NetworkBehaviour, + NewExternalAddrCandidate, + NotifyHandler, + THandler, + THandlerInEvent, THandlerOutEvent, + ToSwarm, }; -use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; use lru::LruCache; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::Infallible; -use std::num::NonZeroUsize; -use std::task::{Context, Poll}; use thiserror::Error; +use crate::{handler, protocol}; + pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; /// The events produced by the [`Behaviour`]. @@ -184,7 +200,9 @@ impl NetworkBehaviour for Behaviour { handler::relayed::Handler::new(connected_point, self.observed_addresses()); handler.on_behaviour_event(handler::relayed::Command::Connect); - return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + return Ok(Either::Left(handler)); // TODO: We could make two + // `handler::relayed::Handler` + // here, one inbound one outbound. } self.direct_connections .entry(peer) @@ -217,7 +235,8 @@ impl NetworkBehaviour for Behaviour { port_use, }, self.observed_addresses(), - ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + ))); // TODO: We could make two `handler::relayed::Handler` here, + // one inbound one outbound. } self.direct_connections @@ -255,7 +274,8 @@ impl NetworkBehaviour for Behaviour { Either::Left(_) => connection_id, Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) { None => { - // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it. + // If the connection ID is unknown to us, it means we didn't create it so ignore + // any event coming from it. return; } Some(relayed_connection_id) => *relayed_connection_id, @@ -347,8 +367,9 @@ impl NetworkBehaviour for Behaviour { /// /// We use an [`LruCache`] to favor addresses that are reported more often. /// When attempting a hole-punch, we will try more frequent addresses first. -/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol). -/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch. +/// Most of these addresses will come from observations by other nodes (via e.g. +/// the identify protocol). More common observations mean a more likely stable +/// port-mapping and thus a higher chance of a successful hole-punch. struct Candidates { inner: LruCache, me: PeerId, diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index ad12a196cb9..52f82655561 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -18,28 +18,40 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. +//! [`ConnectionHandler`] handling relayed connection potentially upgraded to a +//! direct connection. + +use std::{ + collections::VecDeque, + io, + task::{Context, Poll}, + time::Duration, +}; -use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; -use crate::{protocol, PROTOCOL_NAME}; use either::Either; use futures::future; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; -use libp2p_core::ConnectedPoint; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, +use libp2p_core::{ + multiaddr::Multiaddr, + upgrade::{DeniedUpgrade, ReadyUpgrade}, + ConnectedPoint, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + handler::{ + ConnectionEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ListenUpgradeError, + }, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; use protocol::{inbound, outbound}; -use std::collections::VecDeque; -use std::io; -use std::task::{Context, Poll}; -use std::time::Duration; + +use crate::{behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS, protocol, PROTOCOL_NAME}; #[derive(Debug)] pub enum Command { @@ -71,7 +83,8 @@ pub struct Handler { // Outbound DCUtR handshake. outbound_stream: futures_bounded::FuturesSet, outbound::Error>>, - /// The addresses we will send to the other party for hole-punching attempts. + /// The addresses we will send to the other party for hole-punching + /// attempts. holepunch_candidates: Vec, attempts: u8, @@ -109,13 +122,14 @@ impl Handler { .is_err() { tracing::warn!( - "New inbound connect stream while still upgrading previous one. Replacing previous with new.", + "New inbound connect stream while still upgrading previous one. Replacing \ + previous with new.", ); } self.attempts += 1; } - // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. - // TODO: remove when Rust 1.82 is MSRV + // A connection listener denies all incoming substreams, thus none can ever be fully + // negotiated. TODO: remove when Rust 1.82 is MSRV #[allow(unreachable_patterns)] future::Either::Right(output) => libp2p_core::util::unreachable(output), } @@ -143,7 +157,8 @@ impl Handler { .is_err() { tracing::warn!( - "New outbound connect stream while still upgrading previous one. Replacing previous with new.", + "New outbound connect stream while still upgrading previous one. Replacing \ + previous with new.", ); } } @@ -198,10 +213,11 @@ impl ConnectionHandler for Handler { } ConnectedPoint::Listener { .. } => { // By the protocol specification the listening side of a relayed connection - // initiates the _direct connection upgrade_. In other words the listening side of - // the relayed connection opens a substream to the dialing side. (Connection roles - // and substream roles are reversed.) The listening side on a relayed connection - // never expects incoming substreams, hence the denied upgrade below. + // initiates the _direct connection upgrade_. In other words the listening side + // of the relayed connection opens a substream to the dialing + // side. (Connection roles and substream roles are reversed.) + // The listening side on a relayed connection never expects + // incoming substreams, hence the denied upgrade below. SubstreamProtocol::new(Either::Right(DeniedUpgrade), ()) } } diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 005d8394f5e..c5209930ca2 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -18,14 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::proto; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 8639ff4f053..cdd3d5fbf0b 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -18,17 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::PROTOCOL_NAME; +use std::io; + use asynchronous_codec::Framed; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; use web_time::Instant; +use crate::{proto, PROTOCOL_NAME}; + pub(crate) async fn handshake( stream: Stream, candidates: Vec, diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 36f168fb04a..a35c9a50cfe 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -18,9 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::upgrade::Version; -use libp2p_core::transport::{MemoryTransport, Transport}; +use std::time::Duration; + +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{upgrade::Version, MemoryTransport, Transport}, +}; use libp2p_dcutr as dcutr; use libp2p_identify as identify; use libp2p_identity as identity; @@ -29,7 +32,6 @@ use libp2p_plaintext as plaintext; use libp2p_relay as relay; use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 1a70d2213b2..6e159ed963d 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -18,27 +18,47 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{ - FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, - FloodsubSubscriptionAction, +use std::{ + collections::{ + hash_map::{DefaultHasher, HashMap}, + VecDeque, + }, + iter, + task::{Context, Poll}, }; -use crate::topic::Topic; -use crate::FloodsubConfig; + use bytes::Bytes; use cuckoofilter::{CuckooError, CuckooFilter}; use fnv::FnvHashSet; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::{ - dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, - NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, + CloseConnection, + ConnectionDenied, + ConnectionId, + NetworkBehaviour, + NotifyHandler, + OneShotHandler, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{DefaultHasher, HashMap}; -use std::task::{Context, Poll}; -use std::{collections::VecDeque, iter}; + +use crate::{ + protocol::{ + FloodsubMessage, + FloodsubProtocol, + FloodsubRpc, + FloodsubSubscription, + FloodsubSubscriptionAction, + }, + topic::Topic, + FloodsubConfig, +}; /// Network behaviour that handles the floodsub protocol. pub struct Floodsub { @@ -50,7 +70,8 @@ pub struct Floodsub { /// List of peers to send messages to. target_peers: FnvHashSet, - /// List of peers the network is connected to, and the topics that they're subscribed to. + /// List of peers the network is connected to, and the topics that they're + /// subscribed to. // TODO: filter out peers that don't support floodsub, so that we avoid hammering them with // opened substreams connected_peers: HashMap>, @@ -117,7 +138,8 @@ impl Floodsub { /// Subscribes to a topic. /// - /// Returns true if the subscription worked. Returns false if we were already subscribed. + /// Returns true if the subscription worked. Returns false if we were + /// already subscribed. pub fn subscribe(&mut self, topic: Topic) -> bool { if self.subscribed_topics.iter().any(|t| t.id() == topic.id()) { return false; @@ -170,12 +192,14 @@ impl Floodsub { true } - /// Publishes a message to the network, if we're subscribed to the topic only. + /// Publishes a message to the network, if we're subscribed to the topic + /// only. pub fn publish(&mut self, topic: impl Into, data: impl Into) { self.publish_many(iter::once(topic), data) } - /// Publishes a message to the network, even if we're not subscribed to the topic. + /// Publishes a message to the network, even if we're not subscribed to the + /// topic. pub fn publish_any(&mut self, topic: impl Into, data: impl Into) { self.publish_many_any(iter::once(topic), data) } @@ -183,7 +207,8 @@ impl Floodsub { /// Publishes a message with multiple topics to the network. /// /// - /// > **Note**: Doesn't do anything if we're not subscribed to any of the topics. + /// > **Note**: Doesn't do anything if we're not subscribed to any of the + /// > topics. pub fn publish_many( &mut self, topic: impl IntoIterator>, @@ -192,7 +217,8 @@ impl Floodsub { self.publish_many_inner(topic, data, true) } - /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics. + /// Publishes a message with multiple topics to the network, even if we're + /// not subscribed to any of the topics. pub fn publish_many_any( &mut self, topic: impl IntoIterator>, @@ -224,8 +250,8 @@ impl Floodsub { if self_subscribed { if let Err(e @ CuckooError::NotEnoughSpace) = self.received.add(&message) { tracing::warn!( - "Message was added to 'received' Cuckoofilter but some \ - other message was removed as a consequence: {}", + "Message was added to 'received' Cuckoofilter but some other message was \ + removed as a consequence: {}", e, ); } @@ -317,8 +343,8 @@ impl Floodsub { let was_in = self.connected_peers.remove(&peer_id); debug_assert!(was_in.is_some()); - // We can be disconnected by the remote in case of inactivity for example, so we always - // try to reconnect. + // We can be disconnected by the remote in case of inactivity for example, so we + // always try to reconnect. if self.target_peers.contains(&peer_id) { self.events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(peer_id).build(), @@ -374,9 +400,10 @@ impl NetworkBehaviour for Floodsub { // Update connected peers topics for subscription in event.subscriptions { - let remote_peer_topics = self.connected_peers - .get_mut(&propagation_source) - .expect("connected_peers is kept in sync with the peers we are connected to; we are guaranteed to only receive events from connected peers; QED"); + let remote_peer_topics = self.connected_peers.get_mut(&propagation_source).expect( + "connected_peers is kept in sync with the peers we are connected to; we are \ + guaranteed to only receive events from connected peers; QED", + ); match subscription.action { FloodsubSubscriptionAction::Subscribe => { if !remote_peer_topics.contains(&subscription.topic) { @@ -408,16 +435,16 @@ impl NetworkBehaviour for Floodsub { let mut rpcs_to_dispatch: Vec<(PeerId, FloodsubRpc)> = Vec::new(); for message in event.messages { - // Use `self.received` to skip the messages that we have already received in the past. - // Note that this can result in false positives. + // Use `self.received` to skip the messages that we have already received in the + // past. Note that this can result in false positives. match self.received.test_and_add(&message) { Ok(true) => {} // Message was added. Ok(false) => continue, // Message already existed. Err(e @ CuckooError::NotEnoughSpace) => { // Message added, but some other removed. tracing::warn!( - "Message was added to 'received' Cuckoofilter but some \ - other message was removed as a consequence: {}", + "Message was added to 'received' Cuckoofilter but some other message was \ + removed as a consequence: {}", e, ); } @@ -433,7 +460,8 @@ impl NetworkBehaviour for Floodsub { self.events.push_back(ToSwarm::GenerateEvent(event)); } - // Propagate the message to everyone else who is subscribed to any of the topics. + // Propagate the message to everyone else who is subscribed to any of the + // topics. for (peer_id, subscr_topics) in self.connected_peers.iter() { if peer_id == &propagation_source { continue; diff --git a/protocols/floodsub/src/lib.rs b/protocols/floodsub/src/lib.rs index 94766d5fdca..6a699a00a60 100644 --- a/protocols/floodsub/src/lib.rs +++ b/protocols/floodsub/src/lib.rs @@ -35,18 +35,21 @@ mod proto { pub(crate) use self::floodsub::pb::{mod_RPC::SubOpts, Message, RPC}; } -pub use self::layer::{Floodsub, FloodsubEvent}; -pub use self::protocol::{FloodsubMessage, FloodsubRpc}; -pub use self::topic::Topic; +pub use self::{ + layer::{Floodsub, FloodsubEvent}, + protocol::{FloodsubMessage, FloodsubRpc}, + topic::Topic, +}; /// Configuration options for the Floodsub protocol. #[derive(Debug, Clone)] pub struct FloodsubConfig { - /// Peer id of the local node. Used for the source of the messages that we publish. + /// Peer id of the local node. Used for the source of the messages that we + /// publish. pub local_peer_id: PeerId, - /// `true` if messages published by local node should be propagated as messages received from - /// the network, `false` by default. + /// `true` if messages published by local node should be propagated as + /// messages received from the network, `false` by default. pub subscribe_local_messages: bool, } diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index edc842be8ce..57bd76f6dec 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -18,19 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::topic::Topic; +use std::{io, iter, pin::Pin}; + use asynchronous_codec::Framed; use bytes::Bytes; use futures::{ io::{AsyncRead, AsyncWrite}, Future, + SinkExt, + StreamExt, }; -use futures::{SinkExt, StreamExt}; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::{io, iter, pin::Pin}; + +use crate::{proto, topic::Topic}; const MAX_MESSAGE_LEN_BYTES: usize = 2048; diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index 4414ffb00e6..0360cfe21ff 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -19,26 +19,32 @@ // DEALINGS IN THE SOFTWARE. //! Data structure for efficiently storing known back-off's when pruning peers. -use crate::topic::TopicHash; -use libp2p_identity::PeerId; -use std::collections::{ - hash_map::{Entry, HashMap}, - HashSet, +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + time::Duration, }; -use std::time::Duration; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::topic::TopicHash; + #[derive(Copy, Clone)] struct HeartbeatIndex(usize); /// Stores backoffs in an efficient manner. pub(crate) struct BackoffStorage { - /// Stores backoffs and the index in backoffs_by_heartbeat per peer per topic. + /// Stores backoffs and the index in backoffs_by_heartbeat per peer per + /// topic. backoffs: HashMap>, - /// Stores peer topic pairs per heartbeat (this is cyclic the current index is - /// heartbeat_index). + /// Stores peer topic pairs per heartbeat (this is cyclic the current index + /// is heartbeat_index). backoffs_by_heartbeat: Vec>, - /// The index in the backoffs_by_heartbeat vector corresponding to the current heartbeat. + /// The index in the backoffs_by_heartbeat vector corresponding to the + /// current heartbeat. heartbeat_index: HeartbeatIndex, /// The heartbeat interval duration from the config. heartbeat_interval: Duration, @@ -68,8 +74,8 @@ impl BackoffStorage { } } - /// Updates the backoff for a peer (if there is already a more restrictive backoff then this call - /// doesn't change anything). + /// Updates the backoff for a peer (if there is already a more restrictive + /// backoff then this call doesn't change anything). pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) { let instant = Instant::now() + time; let insert_into_backoffs_by_heartbeat = @@ -114,13 +120,14 @@ impl BackoffStorage { }; } - /// Checks if a given peer is backoffed for the given topic. This method respects the - /// configured BACKOFF_SLACK and may return true even if the backup is already over. - /// It is guaranteed to return false if the backoff is not over and eventually if enough time - /// passed true if the backoff is over. + /// Checks if a given peer is backoffed for the given topic. This method + /// respects the configured BACKOFF_SLACK and may return true even if + /// the backup is already over. It is guaranteed to return false if the + /// backoff is not over and eventually if enough time passed true if the + /// backoff is over. /// - /// This method should be used for deciding if we can already send a GRAFT to a previously - /// backoffed peer. + /// This method should be used for deciding if we can already send a GRAFT + /// to a previously backoffed peer. pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) @@ -141,8 +148,8 @@ impl BackoffStorage { .and_then(|m| m.get(peer).map(|(i, _)| *i)) } - /// Applies a heartbeat. That should be called regularly in intervals of length - /// `heartbeat_interval`. + /// Applies a heartbeat. That should be called regularly in intervals of + /// length `heartbeat_interval`. pub(crate) fn heartbeat(&mut self) { // Clean up backoffs_by_heartbeat if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index.0) { @@ -155,7 +162,7 @@ impl BackoffStorage { None => false, }; if !keep { - //remove from backoffs + // remove from backoffs if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) { if m.get_mut().remove(peer).is_some() && m.get().is_empty() { m.remove(); diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index fae45ed452e..df9ee9d8ac7 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -19,11 +19,10 @@ // DEALINGS IN THE SOFTWARE. use std::{ - cmp::{max, Ordering}, - collections::HashSet, - collections::VecDeque, - collections::{BTreeSet, HashMap}, + cmp::{max, Ordering, Ordering::Equal}, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, fmt, + fmt::Debug, net::IpAddr, task::{Context, Poll}, time::Duration, @@ -31,52 +30,68 @@ use std::{ use futures::FutureExt; use futures_timer::Delay; -use prometheus_client::registry::Registry; -use rand::{seq::SliceRandom, thread_rng}; - use libp2p_core::{ - multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr, + multiaddr::Protocol::{Ip4, Ip6}, + transport::PortUse, + Endpoint, + Multiaddr, }; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; +use libp2p_identity::{Keypair, PeerId}; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + ConnectionDenied, + ConnectionId, + NetworkBehaviour, + NotifyHandler, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; +use prometheus_client::registry::Registry; +use quick_protobuf::{MessageWrite, Writer}; +use rand::{seq::SliceRandom, thread_rng}; use web_time::{Instant, SystemTime}; -use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; -use crate::protocol::SIGNING_PREFIX; -use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; -use crate::time_cache::DuplicateCache; -use crate::topic::{Hasher, Topic, TopicHash}; -use crate::transform::{DataTransform, IdentityTransform}; -use crate::types::{ - ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, - SubscriptionAction, -}; -use crate::types::{PeerConnections, PeerKind, RpcOut}; -use crate::{backoff::BackoffStorage, FailedMessages}; use crate::{ + backoff::BackoffStorage, config::{Config, ValidationMode}, - types::Graft, -}; -use crate::{gossip_promises::GossipPromises, types::Prune}; -use crate::{ + gossip_promises::GossipPromises, handler::{Handler, HandlerEvent, HandlerIn}, - types::IWant, -}; -use crate::{mcache::MessageCache, types::IHave}; -use crate::{ + mcache::MessageCache, metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}, + peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}, + protocol::SIGNING_PREFIX, rpc::Sender, + rpc_proto::proto, + subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, + time_cache::DuplicateCache, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{ + ControlAction, + Graft, + IHave, + IWant, + Message, + MessageAcceptance, + MessageId, + PeerConnections, + PeerInfo, + PeerKind, + Prune, + RawMessage, + RpcOut, + Subscription, + SubscriptionAction, + }, + FailedMessages, + PublishError, + SubscriptionError, + TopicScoreParams, + ValidationError, }; -use crate::{rpc_proto::proto, TopicScoreParams}; -use crate::{PublishError, SubscriptionError, ValidationError}; -use quick_protobuf::{MessageWrite, Writer}; -use std::{cmp::Ordering::Equal, fmt::Debug}; #[cfg(test)] mod tests; @@ -85,32 +100,34 @@ mod tests; /// /// Without signing, a number of privacy preserving modes can be selected. /// -/// NOTE: The default validation settings are to require signatures. The [`ValidationMode`] -/// should be updated in the [`Config`] to allow for unsigned messages. +/// NOTE: The default validation settings are to require signatures. The +/// [`ValidationMode`] should be updated in the [`Config`] to allow for unsigned +/// messages. #[derive(Clone)] pub enum MessageAuthenticity { - /// Message signing is enabled. The author will be the owner of the key and the sequence number - /// will be linearly increasing. + /// Message signing is enabled. The author will be the owner of the key and + /// the sequence number will be linearly increasing. Signed(Keypair), /// Message signing is disabled. /// - /// The specified [`PeerId`] will be used as the author of all published messages. The sequence - /// number will be randomized. + /// The specified [`PeerId`] will be used as the author of all published + /// messages. The sequence number will be randomized. Author(PeerId), /// Message signing is disabled. /// - /// A random [`PeerId`] will be used when publishing each message. The sequence number will be - /// randomized. + /// A random [`PeerId`] will be used when publishing each message. The + /// sequence number will be randomized. RandomAuthor, /// Message signing is disabled. /// - /// The author of the message and the sequence numbers are excluded from the message. + /// The author of the message and the sequence numbers are excluded from the + /// message. /// - /// NOTE: Excluding these fields may make these messages invalid by other nodes who - /// enforce validation of these fields. See [`ValidationMode`] in the [`Config`] - /// for how to customise this for rust-libp2p gossipsub. A custom `message_id` - /// function will need to be set to prevent all messages from a peer being filtered - /// as duplicates. + /// NOTE: Excluding these fields may make these messages invalid by other + /// nodes who enforce validation of these fields. See [`ValidationMode`] + /// in the [`Config`] for how to customise this for rust-libp2p + /// gossipsub. A custom `message_id` function will need to be set to + /// prevent all messages from a peer being filtered as duplicates. Anonymous, } @@ -132,8 +149,8 @@ pub enum Event { Message { /// The peer that forwarded us this message. propagation_source: PeerId, - /// The [`MessageId`] of the message. This should be referenced by the application when - /// validating a message (if required). + /// The [`MessageId`] of the message. This should be referenced by the + /// application when validating a message (if required). message_id: MessageId, /// The decompressed message itself. message: Message, @@ -158,13 +175,14 @@ pub enum Event { SlowPeer { /// The peer_id peer_id: PeerId, - /// The types and amounts of failed messages that are occurring for this peer. + /// The types and amounts of failed messages that are occurring for this + /// peer. failed_messages: FailedMessages, }, } -/// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`] -/// for further details. +/// A data structure for storing configuration for publishing messages. See +/// [`MessageAuthenticity`] for further details. #[allow(clippy::large_enum_variant)] enum PublishConfig { Signing { @@ -221,8 +239,9 @@ impl From for PublishConfig { let public_key = keypair.public(); let key_enc = public_key.encode_protobuf(); let key = if key_enc.len() <= 42 { - // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it - // specifically in the [`rpc_proto::proto::Message::key`] field. + // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we + // don't include it specifically in the + // [`rpc_proto::proto::Message::key`] field. None } else { // Include the protobuf encoding of the public key in the message. @@ -245,15 +264,18 @@ impl From for PublishConfig { /// Network behaviour that handles the gossipsub protocol. /// -/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] instance. If -/// message signing is disabled, the [`ValidationMode`] in the config should be adjusted to an -/// appropriate level to accept unsigned messages. +/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] +/// instance. If message signing is disabled, the [`ValidationMode`] in the +/// config should be adjusted to an appropriate level to accept unsigned +/// messages. /// -/// The DataTransform trait allows applications to optionally add extra encoding/decoding -/// functionality to the underlying messages. This is intended for custom compression algorithms. +/// The DataTransform trait allows applications to optionally add extra +/// encoding/decoding functionality to the underlying messages. This is intended +/// for custom compression algorithms. /// -/// The TopicSubscriptionFilter allows applications to implement specific filters on topics to -/// prevent unwanted messages being propagated and evaluated. +/// The TopicSubscriptionFilter allows applications to implement specific +/// filters on topics to prevent unwanted messages being propagated and +/// evaluated. pub struct Behaviour { /// Configuration providing gossipsub performance parameters. config: Config, @@ -264,32 +286,36 @@ pub struct Behaviour { /// Information used for publishing messages. publish_config: PublishConfig, - /// An LRU Time cache for storing seen messages (based on their ID). This cache prevents - /// duplicates from being propagated to the application and on the network. + /// An LRU Time cache for storing seen messages (based on their ID). This + /// cache prevents duplicates from being propagated to the application + /// and on the network. duplicate_cache: DuplicateCache, - /// A set of connected peers, indexed by their [`PeerId`] tracking both the [`PeerKind`] and - /// the set of [`ConnectionId`]s. + /// A set of connected peers, indexed by their [`PeerId`] tracking both the + /// [`PeerKind`] and the set of [`ConnectionId`]s. connected_peers: HashMap, - /// A set of all explicit peers. These are peers that remain connected and we unconditionally - /// forward messages to, outside of the scoring system. + /// A set of all explicit peers. These are peers that remain connected and + /// we unconditionally forward messages to, outside of the scoring + /// system. explicit_peers: HashSet, /// A list of peers that have been blacklisted by the user. /// Messages are not sent to and are rejected from these peers. blacklisted_peers: HashSet, - /// Overlay network of connected peers - Maps topics to connected gossipsub peers. + /// Overlay network of connected peers - Maps topics to connected gossipsub + /// peers. mesh: HashMap>, - /// Map of topics to list of peers that we publish to, but don't subscribe to. + /// Map of topics to list of peers that we publish to, but don't subscribe + /// to. fanout: HashMap>, /// The last publish time for fanout topics. fanout_last_pub: HashMap, - ///Storage for backoffs + /// Storage for backoffs backoffs: BackoffStorage, /// Message cache for the last few heartbeats. @@ -298,40 +324,45 @@ pub struct Behaviour { /// Heartbeat interval stream. heartbeat: Delay, - /// Number of heartbeats since the beginning of time; this allows us to amortize some resource - /// clean up -- eg backoff clean up. + /// Number of heartbeats since the beginning of time; this allows us to + /// amortize some resource clean up -- eg backoff clean up. heartbeat_ticks: u64, - /// We remember all peers we found through peer exchange, since those peers are not considered - /// as safe as randomly discovered outbound peers. This behaviour diverges from the go - /// implementation to avoid possible love bombing attacks in PX. When disconnecting peers will - /// be removed from this list which may result in a true outbound rediscovery. + /// We remember all peers we found through peer exchange, since those peers + /// are not considered as safe as randomly discovered outbound peers. + /// This behaviour diverges from the go implementation to avoid possible + /// love bombing attacks in PX. When disconnecting peers will be removed + /// from this list which may result in a true outbound rediscovery. px_peers: HashSet, - /// Set of connected outbound peers (we only consider true outbound peers found through - /// discovery and not by PX). + /// Set of connected outbound peers (we only consider true outbound peers + /// found through discovery and not by PX). outbound_peers: HashSet, - /// Stores optional peer score data together with thresholds, decay interval and gossip - /// promises. + /// Stores optional peer score data together with thresholds, decay interval + /// and gossip promises. peer_score: Option<(PeerScore, PeerScoreThresholds, Delay, GossipPromises)>, - /// Counts the number of `IHAVE` received from each peer since the last heartbeat. + /// Counts the number of `IHAVE` received from each peer since the last + /// heartbeat. count_received_ihave: HashMap, - /// Counts the number of `IWANT` that we sent the each peer since the last heartbeat. + /// Counts the number of `IWANT` that we sent the each peer since the last + /// heartbeat. count_sent_iwant: HashMap, - /// Short term cache for published message ids. This is used for penalizing peers sending - /// our own messages back if the messages are anonymous or use a random author. + /// Short term cache for published message ids. This is used for penalizing + /// peers sending our own messages back if the messages are anonymous or + /// use a random author. published_message_ids: DuplicateCache, /// The filter used to handle message subscriptions. subscription_filter: F, - /// A general transformation function that can be applied to data received from the wire before - /// calculating the message-id and sending to the application. This is designed to allow the - /// user to implement arbitrary topic-based compression algorithms. + /// A general transformation function that can be applied to data received + /// from the wire before calculating the message-id and sending to the + /// application. This is designed to allow the user to implement + /// arbitrary topic-based compression algorithms. data_transform: D, /// Keep track of a set of internal metrics relating to gossipsub. @@ -346,8 +377,9 @@ where D: DataTransform + Default, F: TopicSubscriptionFilter + Default, { - /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a - /// [`Config`]. This has no subscription filter and uses no compression. + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters + /// specified via a [`Config`]. This has no subscription filter and uses + /// no compression. pub fn new(privacy: MessageAuthenticity, config: Config) -> Result { Self::new_with_subscription_filter_and_transform( privacy, @@ -358,9 +390,10 @@ where ) } - /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a - /// [`Config`]. This has no subscription filter and uses no compression. - /// Metrics can be evaluated by passing a reference to a [`Registry`]. + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters + /// specified via a [`Config`]. This has no subscription filter and uses + /// no compression. Metrics can be evaluated by passing a reference to a + /// [`Registry`]. pub fn new_with_metrics( privacy: MessageAuthenticity, config: Config, @@ -382,8 +415,8 @@ where D: DataTransform + Default, F: TopicSubscriptionFilter, { - /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a - /// [`Config`] and a custom subscription filter. + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters + /// specified via a [`Config`] and a custom subscription filter. pub fn new_with_subscription_filter( privacy: MessageAuthenticity, config: Config, @@ -405,8 +438,8 @@ where D: DataTransform, F: TopicSubscriptionFilter + Default, { - /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a - /// [`Config`] and a custom data transform. + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters + /// specified via a [`Config`] and a custom data transform. pub fn new_with_transform( privacy: MessageAuthenticity, config: Config, @@ -428,8 +461,9 @@ where D: DataTransform, F: TopicSubscriptionFilter, { - /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a - /// [`Config`] and a custom subscription filter and data transform. + /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters + /// specified via a [`Config`] and a custom subscription filter and data + /// transform. pub fn new_with_subscription_filter_and_transform( privacy: MessageAuthenticity, config: Config, @@ -439,8 +473,8 @@ where ) -> Result { // Set up the router given the configuration settings. - // We do not allow configurations where a published message would also be rejected if it - // were received locally. + // We do not allow configurations where a published message would also be + // rejected if it were received locally. validate_config(&privacy, config.validation_mode())?; Ok(Behaviour { @@ -520,8 +554,8 @@ where /// Subscribe to a topic. /// - /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already - /// subscribed. + /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] + /// if we were already subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { tracing::debug!(%topic, "Subscribing to topic"); let topic_hash = topic.hash(); @@ -607,8 +641,8 @@ where // Check the if the message has been published before if self.duplicate_cache.contains(&msg_id) { - // This message has already been seen. We don't re-publish messages that have already - // been published on the network. + // This message has already been seen. We don't re-publish messages that have + // already been published on the network. tracing::warn!( message=%msg_id, "Not publishing a message that has already been published" @@ -717,13 +751,13 @@ where } } - // If the message isn't a duplicate and we have sent it to some peers add it to the - // duplicate cache and memcache. + // If the message isn't a duplicate and we have sent it to some peers add it to + // the duplicate cache and memcache. self.duplicate_cache.insert(msg_id.clone()); self.mcache.put(&msg_id, raw_message.clone()); - // If the message is anonymous or has a random author add it to the published message ids - // cache. + // If the message is anonymous or has a random author add it to the published + // message ids cache. if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { if !self.config.allow_self_origin() { self.published_message_ids.insert(msg_id.clone()); @@ -762,23 +796,27 @@ where Ok(msg_id) } - /// This function should be called when [`Config::validate_messages()`] is `true` after - /// the message got validated by the caller. Messages are stored in the ['Memcache'] and - /// validation is expected to be fast enough that the messages should still exist in the cache. - /// There are three possible validation outcomes and the outcome is given in acceptance. + /// This function should be called when [`Config::validate_messages()`] is + /// `true` after the message got validated by the caller. Messages are + /// stored in the ['Memcache'] and validation is expected to be fast + /// enough that the messages should still exist in the cache. + /// There are three possible validation outcomes and the outcome is given in + /// acceptance. /// - /// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the - /// network. The `propagation_source` parameter indicates who the message was received by and - /// will not be forwarded back to that peer. + /// If acceptance = [`MessageAcceptance::Accept`] the message will get + /// propagated to the network. The `propagation_source` parameter + /// indicates who the message was received by and will not be forwarded + /// back to that peer. /// - /// If acceptance = [`MessageAcceptance::Reject`] the message will be deleted from the memcache - /// and the P₄ penalty will be applied to the `propagation_source`. + /// If acceptance = [`MessageAcceptance::Reject`] the message will be + /// deleted from the memcache and the P₄ penalty will be applied to the + /// `propagation_source`. // - /// If acceptance = [`MessageAcceptance::Ignore`] the message will be deleted from the memcache - /// but no P₄ penalty will be applied. + /// If acceptance = [`MessageAcceptance::Ignore`] the message will be + /// deleted from the memcache but no P₄ penalty will be applied. /// - /// This function will return true if the message was found in the cache and false if was not - /// in the cache anymore. + /// This function will return true if the message was found in the cache and + /// false if was not in the cache anymore. /// /// This should only be called once per message. pub fn report_message_validation_result( @@ -855,15 +893,15 @@ where self.check_explicit_peer_connection(peer_id); } - /// This removes the peer from explicitly connected peers, note that this does not disconnect - /// the peer. + /// This removes the peer from explicitly connected peers, note that this + /// does not disconnect the peer. pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { tracing::debug!(peer=%peer_id, "Removing explicit peer"); self.explicit_peers.remove(peer_id); } - /// Blacklists a peer. All messages from this peer will be rejected and any message that was - /// created by this peer will be rejected. + /// Blacklists a peer. All messages from this peer will be rejected and any + /// message that was created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.insert(*peer_id) { tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); @@ -877,9 +915,10 @@ where } } - /// Activates the peer scoring system with the given parameters. This will reset all scores - /// if there was already another peer scoring system activated. Returns an error if the - /// params are not valid or if they got already set. + /// Activates the peer scoring system with the given parameters. This will + /// reset all scores if there was already another peer scoring system + /// activated. Returns an error if the params are not valid or if they + /// got already set. pub fn with_peer_score( &mut self, params: PeerScoreParams, @@ -888,8 +927,9 @@ where self.with_peer_score_and_message_delivery_time_callback(params, threshold, None) } - /// Activates the peer scoring system with the given parameters and a message delivery time - /// callback. Returns an error if the parameters got already set. + /// Activates the peer scoring system with the given parameters and a + /// message delivery time callback. Returns an error if the parameters + /// got already set. pub fn with_peer_score_and_message_delivery_time_callback( &mut self, params: PeerScoreParams, @@ -911,7 +951,8 @@ where /// Sets scoring parameters for a topic. /// - /// The [`Self::with_peer_score()`] must first be called to initialise peer scoring. + /// The [`Self::with_peer_score()`] must first be called to initialise peer + /// scoring. pub fn set_topic_params( &mut self, topic: Topic, @@ -930,8 +971,9 @@ where self.peer_score.as_ref()?.0.get_topic_params(&topic.hash()) } - /// Sets the application specific score for a peer. Returns true if scoring is active and - /// the peer is connected or if the score of the peer is not yet expired, false otherwise. + /// Sets the application specific score for a peer. Returns true if scoring + /// is active and the peer is connected or if the score of the peer is + /// not yet expired, false otherwise. pub fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.set_application_score(peer_id, new_score) @@ -940,7 +982,8 @@ where } } - /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. + /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT + /// messages. fn join(&mut self, topic_hash: &TopicHash) { tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); @@ -956,8 +999,8 @@ where m.joined(topic_hash) } - // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, - // removing the fanout entry. + // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we + // do, removing the fanout entry. if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { tracing::debug!( topic=%topic_hash, @@ -1116,11 +1159,13 @@ where } } - /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. + /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE + /// messages. fn leave(&mut self, topic_hash: &TopicHash) { tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); - // If our mesh contains the topic, send prune to peers and delete it from the mesh + // If our mesh contains the topic, send prune to peers and delete it from the + // mesh if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { if let Some(m) = self.metrics.as_mut() { m.left(topic_hash) @@ -1147,7 +1192,8 @@ where tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); } - /// Checks if the given peer is still connected and if not dials the peer again. + /// Checks if the given peer is still connected and if not dials the peer + /// again. fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { if !self.connected_peers.contains_key(peer_id) { // Connect to peer @@ -1158,8 +1204,8 @@ where } } - /// Determines if a peer's score is below a given `PeerScoreThreshold` chosen via the - /// `threshold` parameter. + /// Determines if a peer's score is below a given `PeerScoreThreshold` + /// chosen via the `threshold` parameter. fn score_below_threshold( &self, peer_id: &PeerId, @@ -1184,10 +1230,11 @@ where } } - /// Handles an IHAVE control message. Checks our cache of messages. If the message is unknown, - /// requests it with an IWANT control message. + /// Handles an IHAVE control message. Checks our cache of messages. If the + /// message is unknown, requests it with an IWANT control message. fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { - // We ignore IHAVE gossip from any peer whose score is below the gossip threshold + // We ignore IHAVE gossip from any peer whose score is below the gossip + // threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { tracing::debug!( peer=%peer_id, @@ -1303,10 +1350,11 @@ where tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); } - /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is - /// forwarded to the requesting peer. + /// Handles an IWANT control message. Checks our cache of messages. If the + /// message exists it is forwarded to the requesting peer. fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { - // We ignore IWANT gossip from any peer whose score is below the gossip threshold + // We ignore IWANT gossip from any peer whose score is below the gossip + // threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { tracing::debug!( peer=%peer_id, @@ -1347,8 +1395,8 @@ where tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); } - /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, - /// responds with PRUNE messages. + /// Handles GRAFT control messages. If subscribed to the topic, adds the + /// peer to mesh, if not, responds with PRUNE messages. fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); @@ -1361,8 +1409,9 @@ where return; }; - // For each topic, if a peer has grafted us, then we necessarily must be in their mesh - // and they must be subscribed to the topic. Ensure we have recorded the mapping. + // For each topic, if a peer has grafted us, then we necessarily must be in + // their mesh and they must be subscribed to the topic. Ensure we have + // recorded the mapping. for topic in &topics { if connected_peer.topics.insert(topic.clone()) { if let Some(m) = self.metrics.as_mut() { @@ -1374,7 +1423,8 @@ where // we don't GRAFT to/from explicit peers; complain loudly if this happens if self.explicit_peers.contains(peer_id) { tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); - // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics + // this is possibly a bug from non-reciprocal configuration; send a PRUNE for + // all topics to_prune_topics = topics.into_iter().collect(); // but don't PX do_px = false @@ -1415,7 +1465,7 @@ where + self.config.graft_flood_threshold()) - self.config.prune_backoff(); if flood_cutoff > now { - //extra penalty + // extra penalty peer_score.add_penalty(peer_id, 1); } } @@ -1436,15 +1486,16 @@ where topic=%topic_hash, "GRAFT: ignoring peer with negative score" ); - // we do send them PRUNE however, because it's a matter of protocol correctness + // we do send them PRUNE however, because it's a matter of protocol + // correctness to_prune_topics.insert(topic_hash.clone()); // but we won't PX to them do_px = false; continue; } - // check mesh upper bound and only allow graft if the upper bound is not reached or - // if it is an outbound peer + // check mesh upper bound and only allow graft if the upper bound is not reached + // or if it is an outbound peer if peers.len() >= self.config.mesh_n_high() && !self.outbound_peers.contains(peer_id) { @@ -1572,7 +1623,7 @@ where self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune); if self.mesh.contains_key(&topic_hash) { - //connect to px peers + // connect to px peers if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { @@ -1604,8 +1655,8 @@ where let n = self.config.prune_peers(); // Ignore peerInfo with no ID // - //TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a - // signed peer record? + // TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if + // they have a signed peer record? px.retain(|p| p.peer_id.is_some()); if px.len() > n { // only use at most prune_peers many random peers @@ -1615,8 +1666,8 @@ where } for p in px { - // TODO: Once signed records are spec'd: extract signed peer record if given and handle - // it, see https://github.com/libp2p/specs/pull/217 + // TODO: Once signed records are spec'd: extract signed peer record if given and + // handle it, see https://github.com/libp2p/specs/pull/217 if let Some(peer_id) = p.peer_id { // mark as px peer self.px_peers.insert(peer_id); @@ -1629,8 +1680,8 @@ where } } - /// Applies some basic checks to whether this message is valid. Does not apply user validation - /// checks. + /// Applies some basic checks to whether this message is valid. Does not + /// apply user validation checks. fn message_is_valid( &mut self, msg_id: &MessageId, @@ -1720,7 +1771,8 @@ where metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); } - // Try and perform the data transform to the message. If it fails, consider it invalid. + // Try and perform the data transform to the message. If it fails, consider it + // invalid. let message = match self.data_transform.inbound_transform(raw_message.clone()) { Ok(message) => message, Err(e) => { @@ -1739,8 +1791,9 @@ where let msg_id = self.config.message_id(&message); // Check the validity of the message - // Peers get penalized if this message is invalid. We don't add it to the duplicate cache - // and instead continually penalize peers that repeatedly send this message. + // Peers get penalized if this message is invalid. We don't add it to the + // duplicate cache and instead continually penalize peers that + // repeatedly send this message. if !self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { return; } @@ -1826,9 +1879,10 @@ where gossip_promises.reject_message(&message_id, &reject_reason); } else { - // The message is invalid, we reject it ignoring any gossip promises. If a peer is - // advertising this message via an IHAVE and it's invalid it will be double - // penalized, one for sending us an invalid and again for breaking a promise. + // The message is invalid, we reject it ignoring any gossip promises. If a peer + // is advertising this message via an IHAVE and it's invalid it + // will be double penalized, one for sending us an invalid and + // again for breaking a promise. peer_score.reject_invalid_message(propagation_source, &raw_message.topic); } } @@ -1878,7 +1932,8 @@ where }; for subscription in filtered_topics { - // get the peers from the mapping, or insert empty lists if the topic doesn't exist + // get the peers from the mapping, or insert empty lists if the topic doesn't + // exist let topic_hash = &subscription.topic_hash; match subscription.action { @@ -1967,7 +2022,8 @@ where self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); } - // Potentially inform the handler if we have added this peer to a mesh for the first time. + // Potentially inform the handler if we have added this peer to a mesh for the + // first time. let topics_joined = topics_to_graft.iter().collect::>(); if !topics_joined.is_empty() { peer_added_to_mesh( @@ -1979,8 +2035,8 @@ where ); } - // If we need to send grafts to peer, do so immediately, rather than waiting for the - // heartbeat. + // If we need to send grafts to peer, do so immediately, rather than waiting for + // the heartbeat. for topic_hash in topics_to_graft.into_iter() { self.send_message(*propagation_source, RpcOut::Graft(Graft { topic_hash })); } @@ -2013,9 +2069,9 @@ where tracing::debug!("Starting heartbeat"); let start = Instant::now(); - // Every heartbeat we sample the send queues to add to our metrics. We do this intentionally - // before we add all the gossip from this heartbeat in order to gain a true measure of - // steady-state size of the queues. + // Every heartbeat we sample the send queues to add to our metrics. We do this + // intentionally before we add all the gossip from this heartbeat in + // order to gain a true measure of steady-state size of the queues. if let Some(m) = &mut self.metrics { for sender_queue in self.connected_peers.values().map(|v| &v.sender) { m.observe_priority_queue_size(sender_queue.priority_queue_len()); @@ -2046,7 +2102,8 @@ where } } - // Cache the scores of all connected peers, and record metrics for current penalties. + // Cache the scores of all connected peers, and record metrics for current + // penalties. let mut scores = HashMap::with_capacity(self.connected_peers.len()); if let Some((peer_score, ..)) = &self.peer_score { for peer_id in self.connected_peers.keys() { @@ -2063,8 +2120,8 @@ where let outbound_peers = &self.outbound_peers; // drop all peers with negative score, without PX - // if there is at some point a stable retain method for BTreeSet the following can be - // written more efficiently with retain. + // if there is at some point a stable retain method for BTreeSet the following + // can be written more efficiently with retain. let mut to_remove_peers = Vec::new(); for peer_id in peers.iter() { let peer_score = *scores.get(peer_id).unwrap_or(&0.0); @@ -2158,8 +2215,8 @@ where .count() }; - // remove the first excess_peer_no allowed (by outbound restrictions) peers adding - // them to to_prune + // remove the first excess_peer_no allowed (by outbound restrictions) peers + // adding them to to_prune let mut removed = 0; for peer in shuffled { if removed == excess_peer_no { @@ -2413,8 +2470,8 @@ where } } - /// Emits gossip - Send IHAVE messages to a random set of gossip peers. This is applied to mesh - /// and fanout peers + /// Emits gossip - Send IHAVE messages to a random set of gossip peers. This + /// is applied to mesh and fanout peers fn emit_gossip(&mut self) { let mut rng = thread_rng(); let mut messages = Vec::new(); @@ -2424,7 +2481,8 @@ where continue; } - // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list + // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate + // the list if message_ids.len() > self.config.max_ihave_length() { // we do the truncation (with shuffling) per peer below tracing::debug!( @@ -2436,7 +2494,8 @@ where message_ids.shuffle(&mut rng); } - // dynamic number of peers to gossip based on `gossip_factor` with minimum `gossip_lazy` + // dynamic number of peers to gossip based on `gossip_factor` with minimum + // `gossip_lazy` let n_map = |m| { max( self.config.gossip_lazy(), @@ -2479,8 +2538,8 @@ where } } - /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control - /// messages. + /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked + /// gossip control messages. fn send_graft_prune( &mut self, to_graft: HashMap>, @@ -2512,9 +2571,9 @@ where }); // If there are prunes associated with the same peer add them. - // NOTE: In this case a peer has been added to a topic mesh, and removed from another. - // It therefore must be in at least one mesh and we do not need to inform the handler - // of its removal from another. + // NOTE: In this case a peer has been added to a topic mesh, and removed from + // another. It therefore must be in at least one mesh and we do not + // need to inform the handler of its removal from another. // The following prunes are not due to unsubscribing. let prune_msgs = to_prune @@ -2724,8 +2783,8 @@ where /// /// Returns `true` if sending was successful, `false` otherwise. /// The method will update the peer score and failed message counter if - /// sending the message failed due to the channel to the connection handler being - /// full (which indicates a slow peer). + /// sending the message failed due to the channel to the connection handler + /// being full (which indicates a slow peer). fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) -> bool { if let Some(m) = self.metrics.as_mut() { if let RpcOut::Publish { ref message, .. } | RpcOut::Forward { ref message, .. } = rpc { @@ -2765,7 +2824,10 @@ where | RpcOut::Prune(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { - unreachable!("Channel for highpriority contorl messages is unbounded and should always be open.") + unreachable!( + "Channel for highpriority contorl messages is unbounded and should \ + always be open." + ) } } @@ -2788,12 +2850,12 @@ where .. }: ConnectionEstablished, ) { - // Diverging from the go implementation we only want to consider a peer as outbound peer - // if its first connection is outbound. + // Diverging from the go implementation we only want to consider a peer as + // outbound peer if its first connection is outbound. if endpoint.is_dialer() && other_established == 0 && !self.px_peers.contains(&peer_id) { - // The first connection is outbound and it is not a peer from peer exchange => mark - // it as outbound peer + // The first connection is outbound and it is not a peer from peer exchange => + // mark it as outbound peer self.outbound_peers.insert(peer_id); } @@ -2811,7 +2873,8 @@ where } if other_established > 0 { - return; // Not our first connection to this peer, hence nothing to do. + return; // Not our first connection to this peer, hence nothing to + // do. } if let Some((peer_score, ..)) = &mut self.peer_score { @@ -2864,8 +2927,8 @@ where .expect("Previously established connection to peer must be present"); peer.connections.remove(index); - // If there are more connections and this peer is in a mesh, inform the first connection - // handler. + // If there are more connections and this peer is in a mesh, inform the first + // connection handler. if !peer.connections.is_empty() { for topic in &peer.topics { if let Some(mesh_peers) = self.mesh.get(topic) { @@ -2916,7 +2979,8 @@ where self.px_peers.remove(&peer_id); self.outbound_peers.remove(&peer_id); - // If metrics are enabled, register the disconnection of a peer based on its protocol. + // If metrics are enabled, register the disconnection of a peer based on its + // protocol. if let Some(metrics) = self.metrics.as_mut() { metrics.peer_protocol_disconnected(connected_peer.kind.clone()); } @@ -2987,9 +3051,9 @@ where ) -> Result, ConnectionDenied> { // By default we assume a peer is only a floodsub peer. // - // The protocol negotiation occurs once a message is sent/received. Once this happens we - // update the type of peer that this is in order to determine which kind of routing should - // occur. + // The protocol negotiation occurs once a message is sent/received. Once this + // happens we update the type of peer that this is in order to determine + // which kind of routing should occur. let connected_peer = self .connected_peers .entry(peer_id) @@ -3152,14 +3216,19 @@ where if self.config.max_messages_per_rpc().is_some() && Some(count) >= self.config.max_messages_per_rpc() { - tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + tracing::warn!( + "Received more messages than permitted. Ignoring further messages. \ + Processed: {}", + count + ); break; } self.handle_received_message(raw_message, &propagation_source); } // Handle control messages - // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) + // group some control messages, this minimises SendEvents (code is simplified to + // handle each event at a time however) let mut ihave_msgs = vec![]; let mut graft_msgs = vec![]; let mut prune_msgs = vec![]; @@ -3234,9 +3303,10 @@ where } } -/// This is called when peers are added to any mesh. It checks if the peer existed -/// in any other mesh. If this is the first mesh they have joined, it queues a message to notify -/// the appropriate connection handler to maintain a connection. +/// This is called when peers are added to any mesh. It checks if the peer +/// existed in any other mesh. If this is the first mesh they have joined, it +/// queues a message to notify the appropriate connection handler to maintain a +/// connection. fn peer_added_to_mesh( peer_id: PeerId, new_topics: Vec<&TopicHash>, @@ -3276,9 +3346,10 @@ fn peer_added_to_mesh( }); } -/// This is called when peers are removed from a mesh. It checks if the peer exists -/// in any other mesh. If this is the last mesh they have joined, we return true, in order to -/// notify the handler to no longer maintain a connection. +/// This is called when peers are removed from a mesh. It checks if the peer +/// exists in any other mesh. If this is the last mesh they have joined, we +/// return true, in order to notify the handler to no longer maintain a +/// connection. fn peer_removed_from_mesh( peer_id: PeerId, old_topic: &TopicHash, @@ -3319,8 +3390,8 @@ fn peer_removed_from_mesh( } /// Helper function to get a subset of random gossipsub peers for a `topic_hash` -/// filtered by the function `f`. The number of peers to get equals the output of `n_map` -/// that gets as input the number of filtered peers. +/// filtered by the function `f`. The number of peers to get equals the output +/// of `n_map` that gets as input the number of filtered peers. fn get_random_peers_dynamic( connected_peers: &HashMap, topic_hash: &TopicHash, @@ -3352,8 +3423,8 @@ fn get_random_peers_dynamic( gossip_peers.into_iter().take(n).collect() } -/// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` -/// filtered by the function `f`. +/// Helper function to get a set of `n` random gossipsub peers for a +/// `topic_hash` filtered by the function `f`. fn get_random_peers( connected_peers: &HashMap, topic_hash: &TopicHash, @@ -3363,8 +3434,8 @@ fn get_random_peers( get_random_peers_dynamic(connected_peers, topic_hash, |_| n, f) } -/// Validates the combination of signing, privacy and message validation to ensure the -/// configuration will not reject published messages. +/// Validates the combination of signing, privacy and message validation to +/// ensure the configuration will not reject published messages. fn validate_config( authenticity: &MessageAuthenticity, validation_mode: &ValidationMode, @@ -3372,20 +3443,26 @@ fn validate_config( match validation_mode { ValidationMode::Anonymous => { if authenticity.is_signing() { - return Err("Cannot enable message signing with an Anonymous validation mode. Consider changing either the ValidationMode or MessageAuthenticity"); + return Err( + "Cannot enable message signing with an Anonymous validation mode. Consider \ + changing either the ValidationMode or MessageAuthenticity", + ); } if !authenticity.is_anonymous() { - return Err("Published messages contain an author but incoming messages with an author will be rejected. Consider adjusting the validation or privacy settings in the config"); + return Err( + "Published messages contain an author but incoming messages with an author \ + will be rejected. Consider adjusting the validation or privacy settings in \ + the config", + ); } } ValidationMode::Strict => { if !authenticity.is_signing() { - return Err( - "Messages will be - published unsigned and incoming unsigned messages will be rejected. Consider adjusting - the validation or privacy settings in the config" - ); + return Err("Messages will be + published unsigned and incoming unsigned messages will be rejected. Consider \ + adjusting + the validation or privacy settings in the config"); } } _ => {} diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 9567150382a..fd3f8480afe 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,16 +20,20 @@ // Collection of tests for the gossipsub network behaviour -use super::*; -use crate::rpc::Receiver; -use crate::subscription_filter::WhitelistSubscriptionFilter; -use crate::{config::ConfigBuilder, types::Rpc, IdentTopic as Topic}; +use std::{future, net::Ipv4Addr, thread::sleep}; + use byteorder::{BigEndian, ByteOrder}; use libp2p_core::ConnectedPoint; use rand::Rng; -use std::future; -use std::net::Ipv4Addr; -use std::thread::sleep; + +use super::*; +use crate::{ + config::ConfigBuilder, + rpc::Receiver, + subscription_filter::WhitelistSubscriptionFilter, + types::Rpc, + IdentTopic as Topic, +}; #[derive(Default, Debug)] struct InjectNodes @@ -302,7 +306,8 @@ where } } -// Converts a protobuf message into a gossipsub message for reading the Gossipsub event queue. +// Converts a protobuf message into a gossipsub message for reading the +// Gossipsub event queue. fn proto_to_message(rpc: &proto::RPC) -> Rpc { // Store valid messages. let mut messages = Vec::with_capacity(rpc.publish.len()); @@ -311,7 +316,8 @@ fn proto_to_message(rpc: &proto::RPC) -> Rpc { messages.push(RawMessage { source: message.from.map(|x| PeerId::from_bytes(&x).unwrap()), data: message.data.unwrap_or_default(), - sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), // don't inform the application + sequence_number: message.seqno.map(|x| BigEndian::read_u64(&x)), /* don't inform the + * application */ topic: TopicHash::from_raw(message.topic), signature: message.signature, // don't inform the application key: None, @@ -677,7 +683,7 @@ fn test_publish_without_flood_publishing() { // - Send publish message to all peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test old behaviour + // turn off flood publish to test old behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -757,7 +763,7 @@ fn test_fanout() { // - Send publish message to fanout peers // - Insert message into gs.mcache and gs.received - //turn off flood publish to test fanout behaviour + // turn off flood publish to test fanout behaviour let config = ConfigBuilder::default() .flood_publish(false) .build() @@ -869,7 +875,8 @@ fn test_inject_connected() { // check that there are 20 send events created assert_eq!(subscriptions.len(), 20); - // should add the new peers to `peer_topics` with an empty vec as a gossipsub node + // should add the new peers to `peer_topics` with an empty vec as a gossipsub + // node for peer in peers { let peer = gs.connected_peers.get(&peer).unwrap(); assert!( @@ -1047,7 +1054,8 @@ fn test_get_random_peers() { assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); } -/// Tests that the correct message is sent when a peer asks for a message in our cache. +/// Tests that the correct message is sent when a peer asks for a message in our +/// cache. #[test] fn test_handle_iwant_msg_cached() { let (mut gs, peers, receivers, _) = inject_nodes1() @@ -1099,7 +1107,8 @@ fn test_handle_iwant_msg_cached() { ); } -/// Tests that messages are sent correctly depending on the shifting of the message cache. +/// Tests that messages are sent correctly depending on the shifting of the +/// message cache. #[test] fn test_handle_iwant_msg_cached_shifted() { let (mut gs, peers, mut receivers, _) = inject_nodes1() @@ -1173,7 +1182,8 @@ fn test_handle_iwant_msg_cached_shifted() { } } -/// tests that an event is not created when a peers asks for a message not in our cache +/// tests that an event is not created when a peers asks for a message not in +/// our cache #[test] fn test_handle_iwant_msg_not_cached() { let (mut gs, peers, _, _) = inject_nodes1() @@ -1192,7 +1202,8 @@ fn test_handle_iwant_msg_not_cached() { ); } -/// tests that an event is created when a peer shares that it has a message we want +/// tests that an event is created when a peer shares that it has a message we +/// want #[test] fn test_handle_ihave_subscribed_and_msg_not_cached() { let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() @@ -1228,8 +1239,8 @@ fn test_handle_ihave_subscribed_and_msg_not_cached() { ); } -/// tests that an event is not created when a peer shares that it has a message that -/// we already have +/// tests that an event is not created when a peer shares that it has a message +/// that we already have #[test] fn test_handle_ihave_subscribed_and_msg_cached() { let (mut gs, peers, _, topic_hashes) = inject_nodes1() @@ -1250,8 +1261,8 @@ fn test_handle_ihave_subscribed_and_msg_cached() { ) } -/// test that an event is not created when a peer shares that it has a message in -/// a topic that we are not subscribed to +/// test that an event is not created when a peer shares that it has a message +/// in a topic that we are not subscribed to #[test] fn test_handle_ihave_not_subscribed() { let (mut gs, peers, _, _) = inject_nodes1() @@ -1447,10 +1458,10 @@ fn test_explicit_peer_gets_connected() { .to_subscribe(true) .create_network(); - //create new peer + // create new peer let peer = PeerId::random(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(&peer); let num_events = gs @@ -1483,17 +1494,18 @@ fn test_explicit_peer_reconnects() { let peer = others.first().unwrap(); - //add peer as explicit peer + // add peer as explicit peer gs.add_explicit_peer(peer); flush_events(&mut gs, receivers); - //disconnect peer + // disconnect peer disconnect_peer(&mut gs, peer); gs.heartbeat(); - //check that no reconnect after first heartbeat since `explicit_peer_ticks == 2` + // check that no reconnect after first heartbeat since `explicit_peer_ticks == + // 2` assert_eq!( gs.events .iter() @@ -1508,7 +1520,7 @@ fn test_explicit_peer_reconnects() { gs.heartbeat(); - //check that there is a reconnect after second heartbeat + // check that there is a reconnect after second heartbeat assert!( gs.events .iter() @@ -1536,11 +1548,11 @@ fn test_handle_graft_explicit_peer() { gs.handle_graft(peer, topic_hashes.clone()); - //peer got not added to mesh + // peer got not added to mesh assert!(gs.mesh[&topic_hashes[0]].is_empty()); assert!(gs.mesh[&topic_hashes[1]].is_empty()); - //check prunes + // check prunes let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == peer && match m { @@ -1566,13 +1578,13 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { .explicit(1) .create_network(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!( gs.mesh[&topic_hashes[0]], vec![peers[1]].into_iter().collect() ); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1581,7 +1593,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1603,10 +1615,10 @@ fn do_not_graft_explicit_peer() { gs.heartbeat(); - //mesh stays empty + // mesh stays empty assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1663,7 +1675,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1676,13 +1688,13 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { ); } - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1691,7 +1703,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1711,7 +1723,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { .explicit(1) .create_network(); - //create new topic, both peers subscribing to it but we do not subscribe to it + // create new topic, both peers subscribing to it but we do not subscribe to it let topic = Topic::new(String::from("t")); let topic_hash = topic.hash(); for peer in peers.iter().take(2) { @@ -1724,16 +1736,16 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { ); } - //we send a message for this topic => this will initialize the fanout + // we send a message for this topic => this will initialize the fanout gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); - //subscribe now to topic + // subscribe now to topic gs.subscribe(&topic).unwrap(); - //only peer 1 is in the mesh not peer 0 (which is an explicit peer) + // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); - //assert that graft gets created to non-explicit peer + // assert that graft gets created to non-explicit peer let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); @@ -1742,7 +1754,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { "No graft message got created to non-explicit peer" ); - //assert that no graft gets created to explicit peer + // assert that no graft gets created to explicit peer let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); @@ -1774,15 +1786,15 @@ fn no_gossip_gets_sent_to_explicit_peers() { validated: true, }; - //forward the message + // forward the message gs.handle_received_message(message, &local_id); - //simulate multiple gossip calls (for randomness) + // simulate multiple gossip calls (for randomness) for _ in 0..3 { gs.emit_gossip(); } - //assert that no gossip gets sent to explicit peer + // assert that no gossip gets sent to explicit peer let receiver = receivers.remove(&peers[0]).unwrap(); let mut gossips = 0; let non_priority = receiver.non_priority.get_ref(); @@ -1835,7 +1847,7 @@ fn test_mesh_subtraction() { // Adds mesh_low peers and PRUNE 2 giving us a deficit. let n = config.mesh_n_high() + 10; - //make all outbound connections so that we allow grafting to all + // make all outbound connections so that we allow grafting to all let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -1866,10 +1878,10 @@ fn test_connect_to_px_peers_on_handle_prune() { .to_subscribe(true) .create_network(); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let mut px = Vec::new(); - //propose more px peers than config.prune_peers() + // propose more px peers than config.prune_peers() for _ in 0..config.prune_peers() + 5 { px.push(PeerInfo { peer_id: Some(PeerId::random()), @@ -1885,7 +1897,7 @@ fn test_connect_to_px_peers_on_handle_prune() { )], ); - //Check DialPeer events for px peers + // Check DialPeer events for px peers let dials: Vec<_> = gs .events .iter() @@ -1903,7 +1915,7 @@ fn test_connect_to_px_peers_on_handle_prune() { // No duplicates assert_eq!(dials_set.len(), config.prune_peers()); - //all dial peers must be in px + // all dial peers must be in px assert!(dials_set.is_subset( &px.iter() .map(|i| *i.peer_id.as_ref().unwrap()) @@ -1915,14 +1927,14 @@ fn test_connect_to_px_peers_on_handle_prune() { fn test_send_px_and_backoff_in_prune() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //send prune to peer + // send prune to peer gs.send_graft_prune( HashMap::new(), vec![(peers[0], vec![topics[0].clone()])] @@ -1931,7 +1943,7 @@ fn test_send_px_and_backoff_in_prune() { HashSet::new(), ); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -1957,14 +1969,15 @@ fn test_send_px_and_backoff_in_prune() { fn test_prune_backoffed_peer_on_graft() { let config: Config = Config::default(); - //build mesh with enough peers for px + // build mesh with enough peers for px let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //remove peer from mesh and send prune to peer => this adds a backoff for this peer + // remove peer from mesh and send prune to peer => this adds a backoff for this + // peer gs.mesh.get_mut(&topics[0]).unwrap().remove(&peers[0]); gs.send_graft_prune( HashMap::new(), @@ -1974,13 +1987,13 @@ fn test_prune_backoffed_peer_on_graft() { HashSet::new(), ); - //ignore all messages until now + // ignore all messages until now let receivers = flush_events(&mut gs, receivers); - //handle graft + // handle graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2007,7 +2020,7 @@ fn test_do_not_graft_within_backoff_period() { .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2015,23 +2028,23 @@ fn test_do_not_graft_within_backoff_period() { .gs_config(config) .create_network(); - //handle prune from peer with backoff of one second + // handle prune from peer with backoff of one second gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Sleep for one second and apply 10 regular heartbeats (interval = 100ms). + // Sleep for one second and apply 10 regular heartbeats (interval = 100ms). for _ in 0..10 { sleep(Duration::from_millis(100)); gs.heartbeat(); } - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). + // Check that no graft got created (we have backoff_slack = 1 therefore one more + // heartbeat is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( @@ -2039,11 +2052,11 @@ fn test_do_not_graft_within_backoff_period() { "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2053,14 +2066,14 @@ fn test_do_not_graft_within_backoff_period() { #[test] fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without_backoff() { - //set default backoff period to 1 second + // set default backoff period to 1 second let config = ConfigBuilder::default() .prune_backoff(Duration::from_millis(90)) .backoff_slack(1) .heartbeat_interval(Duration::from_millis(100)) .build() .unwrap(); - //only one peer => mesh too small and will try to regraft as early as possible + // only one peer => mesh too small and will try to regraft as early as possible let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2068,21 +2081,21 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without .gs_config(config) .create_network(); - //handle prune from peer without a specified backoff + // handle prune from peer without a specified backoff gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); - //forget all events until now + // forget all events until now let receivers = flush_events(&mut gs, receivers); - //call heartbeat + // call heartbeat gs.heartbeat(); - //Apply one more heartbeat + // Apply one more heartbeat sleep(Duration::from_millis(100)); gs.heartbeat(); - //Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). + // Check that no graft got created (we have backoff_slack = 1 therefore one more + // heartbeat is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( @@ -2090,11 +2103,11 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without "Graft message created too early within backoff period" ); - //Heartbeat one more time this should graft now + // Heartbeat one more time this should graft now sleep(Duration::from_millis(100)); gs.heartbeat(); - //check that graft got created + // check that graft got created let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, @@ -2148,8 +2161,8 @@ fn test_unsubscribe_backoff() { gs.heartbeat(); } - // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat - // is needed). + // Check that no graft got created (we have backoff_slack = 1 therefore one more + // heartbeat is needed). let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( @@ -2181,7 +2194,7 @@ fn test_flood_publish() { .to_subscribe(true) .create_network(); - //publish message + // publish message let publish_data = vec![0; 42]; gs.publish(Topic::new(topic), publish_data).unwrap(); @@ -2228,15 +2241,15 @@ fn test_flood_publish() { fn test_gossip_to_at_least_gossip_lazy_peers() { let config: Config = Config::default(); - //add more peers than in mesh to test gossipping - //by default only mesh_n_low peers will get added to mesh + // add more peers than in mesh to test gossipping + // by default only mesh_n_low peers will get added to mesh let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2248,7 +2261,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message @@ -2256,7 +2269,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2271,7 +2284,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { fn test_gossip_to_at_most_gossip_factor_peers() { let config: Config = Config::default(); - //add a lot of peers + // add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; let (mut gs, _, receivers, topic_hashes) = inject_nodes1() .peer_no(m) @@ -2279,7 +2292,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { .to_subscribe(true) .create_network(); - //receive message + // receive message let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2291,14 +2304,14 @@ fn test_gossip_to_at_most_gossip_factor_peers() { }; gs.handle_received_message(raw_message.clone(), &PeerId::random()); - //emit gossip + // emit gossip gs.emit_gossip(); // Transform the inbound message let message = &gs.data_transform.inbound_transform(raw_message).unwrap(); let msg_id = gs.config.message_id(message); - //check that exactly config.gossip_lazy() many gossip messages were sent. + // check that exactly config.gossip_lazy() many gossip messages were sent. let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { RpcOut::IHave(IHave { topic_hash, @@ -2316,7 +2329,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { fn test_accept_only_outbound_peer_grafts_when_mesh_full() { let config: Config = Config::default(); - //enough peers to fill the mesh + // enough peers to fill the mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2328,30 +2341,30 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { gs.handle_graft(&peer, topics.clone()); } - //assert current mesh size + // assert current mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - //create an outbound and an inbound peer + // create an outbound and an inbound peer let (inbound, _in_reciver) = add_peer(&mut gs, &topics, false, false); let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); - //send grafts + // send grafts gs.handle_graft(&inbound, vec![topics[0].clone()]); gs.handle_graft(&outbound, vec![topics[0].clone()]); - //assert mesh size + // assert mesh size assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - //inbound is not in mesh + // inbound is not in mesh assert!(!gs.mesh[&topics[0]].contains(&inbound)); - //outbound is in mesh + // outbound is in mesh assert!(gs.mesh[&topics[0]].contains(&outbound)); } #[test] fn test_do_not_remove_too_many_outbound_peers() { - //use an extreme case to catch errors with high probability + // use an extreme case to catch errors with high probability let m = 50; let n = 2 * m; let config = ConfigBuilder::default() @@ -2362,7 +2375,7 @@ fn test_do_not_remove_too_many_outbound_peers() { .build() .unwrap(); - //fill the mesh with inbound connections + // fill the mesh with inbound connections let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) @@ -2375,7 +2388,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //create m outbound connections and graft (we will accept the graft) + // create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); for _ in 0..m { let (peer, _) = add_peer(&mut gs, &topics, true, false); @@ -2383,7 +2396,7 @@ fn test_do_not_remove_too_many_outbound_peers() { gs.handle_graft(&peer, topics.clone()); } - //mesh is overly full + // mesh is overly full assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); // run a heartbeat @@ -2392,7 +2405,7 @@ fn test_do_not_remove_too_many_outbound_peers() { // Peers should be removed to reach n assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); - //all outbound peers are still in the mesh + // all outbound peers are still in the mesh assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); } @@ -2412,7 +2425,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { gs.handle_graft(&peer, topics.clone()); } - //create config.mesh_outbound_min() many outbound connections without grafting + // create config.mesh_outbound_min() many outbound connections without grafting let mut peers = vec![]; for _ in 0..config.mesh_outbound_min() { peers.push(add_peer(&mut gs, &topics, true, false)); @@ -2435,7 +2448,7 @@ fn test_add_outbound_peers_if_min_is_not_satisfied() { fn test_prune_negative_scored_peers() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2449,16 +2462,16 @@ fn test_prune_negative_scored_peers() { ))) .create_network(); - //add penalty to peer + // add penalty to peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //execute heartbeat + // execute heartbeat gs.heartbeat(); - //peer should not be in mesh anymore + // peer should not be in mesh anymore assert!(gs.mesh[&topics[0]].is_empty()); - //check prune message + // check prune message let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { peer_id == &peers[0] && match m { @@ -2481,7 +2494,7 @@ fn test_prune_negative_scored_peers() { #[test] fn test_dont_graft_to_negative_scored_peers() { let config = Config::default(); - //init full mesh + // init full mesh let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2493,34 +2506,35 @@ fn test_dont_graft_to_negative_scored_peers() { ))) .create_network(); - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 to negative + // reduce score of p1 to negative gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 1); - //handle prunes of all other peers + // handle prunes of all other peers for p in peers { gs.handle_prune(&p, vec![(topics[0].clone(), Vec::new(), None)]); } - //heartbeat + // heartbeat gs.heartbeat(); - //assert that mesh only contains p2 + // assert that mesh only contains p2 assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), 1); assert!(gs.mesh.get(&topics[0]).unwrap().contains(&p2)); } -///Note that in this test also without a penalty the px would be ignored because of the -/// acceptPXThreshold, but the spec still explicitly states the rule that px from negative -/// peers should get ignored, therefore we test it here. +/// Note that in this test also without a penalty the px would be ignored +/// because of the acceptPXThreshold, but the spec still explicitly states the +/// rule that px from negative peers should get ignored, therefore we test it +/// here. #[test] fn test_ignore_px_from_negative_scored_peer() { let config = Config::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -2532,10 +2546,10 @@ fn test_ignore_px_from_negative_scored_peer() { ))) .create_network(); - //penalize peer + // penalize peer gs.peer_score.as_mut().unwrap().0.add_penalty(&peers[0], 1); - //handle prune from single peer with px peers + // handle prune from single peer with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -2549,7 +2563,7 @@ fn test_ignore_px_from_negative_scored_peer() { )], ); - //assert no dials + // assert no dials assert_eq!( gs.events .iter() @@ -2646,7 +2660,8 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + // Reduce score of p2 below 0 but not below + // peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); // Receive message @@ -2723,7 +2738,8 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - // Reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + // Reduce score of p2 below 0 but not below + // peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); // Receive message @@ -2760,7 +2776,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { collected_messages }); - //the message got sent to p2 + // the message got sent to p2 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2768,7 +2784,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.data_transform.inbound_transform(msg.clone()).unwrap() )) .any(|(peer_id, msg)| peer_id == &p2 && gs.config.message_id(&msg) == msg_id)); - //the message got not sent to p1 + // the message got not sent to p1 assert!(sent_messages .iter() .map(|(peer_id, msg)| ( @@ -2786,7 +2802,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gossip_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build full mesh + // build full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -2802,21 +2818,22 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_graft(&peer, topics.clone()); } - //add two additional peers that will not be part of the mesh + // add two additional peers that will not be part of the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.gossip_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.gossip_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.gossip_threshold + // reduce score of p2 below 0 but not below + // peer_score_thresholds.gossip_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //message that other peers have + // message that other peers have let raw_message = RawMessage { source: Some(PeerId::random()), data: vec![], @@ -2863,31 +2880,32 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers and no subscribed topics + // build mesh with no peers and no subscribed topics let (mut gs, _, mut receivers, _) = inject_nodes1() .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //create a new topic for which we are not subscribed + // create a new topic for which we are not subscribed let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below + // peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2907,7 +2925,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert_eq!(publishes[0].0, p2); } @@ -2921,28 +2939,29 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { publish_threshold: 3.0 * peer_score_params.behaviour_penalty_weight, ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, mut receivers, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //reduce score of p1 below peer_score_thresholds.publish_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.publish_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below 0 but not below peer_score_thresholds.publish_threshold + // reduce score of p2 below 0 but not below + // peer_score_thresholds.publish_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); - //a heartbeat will remove the peers from the mesh + // a heartbeat will remove the peers from the mesh gs.heartbeat(); // publish on topic @@ -2962,7 +2981,7 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { collected_publish }); - //assert only published to p2 + // assert only published to p2 assert_eq!(publishes.len(), 1); assert!(publishes[0].0 == p2); } @@ -2978,23 +2997,23 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { ..PeerScoreThresholds::default() }; - //build mesh with no peers + // build mesh with no peers let (mut gs, _, _, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config.clone()) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //add two additional peers that will be added to the mesh + // add two additional peers that will be added to the mesh let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); - //reduce score of p1 below peer_score_thresholds.graylist_threshold - //note that penalties get squared so two penalties means a score of + // reduce score of p1 below peer_score_thresholds.graylist_threshold + // note that penalties get squared so two penalties means a score of // 4 * peer_score_params.behaviour_penalty_weight. gs.peer_score.as_mut().unwrap().0.add_penalty(&p1, 2); - //reduce score of p2 below publish_threshold but not below graylist_threshold + // reduce score of p2 below publish_threshold but not below graylist_threshold gs.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let raw_message1 = RawMessage { @@ -3053,10 +3072,10 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message2)], }); - //clear events + // clear events gs.events.clear(); - //receive from p1 + // receive from p1 gs.on_connection_handler_event( p1, ConnectionId::new_unchecked(0), @@ -3070,7 +3089,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //only the subscription event gets processed, the rest is dropped + // only the subscription event gets processed, the rest is dropped assert_eq!(gs.events.len(), 1); assert!(matches!( gs.events[0], @@ -3082,7 +3101,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { message_ids: vec![config.message_id(message4)], }); - //receive from p2 + // receive from p2 gs.on_connection_handler_event( p2, ConnectionId::new_unchecked(0), @@ -3096,7 +3115,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { }, ); - //events got processed + // events got processed assert!(gs.events.len() > 1); } @@ -3145,7 +3164,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { 0 ); - //handle prune from peer peers[1] with px peers + // handle prune from peer peers[1] with px peers let px = vec![PeerInfo { peer_id: Some(PeerId::random()), }]; @@ -3158,7 +3177,7 @@ fn test_ignore_px_from_peers_below_accept_px_threshold() { )], ); - //assert there are dials now + // assert there are dials now assert!( gs.events .iter() @@ -3178,7 +3197,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { .build() .unwrap(); - //build mesh with more peers than mesh can hold + // build mesh with more peers than mesh can hold let n = config.mesh_n_high() + 1; let (mut gs, peers, _receivers, topics) = inject_nodes1() .peer_no(n) @@ -3198,21 +3217,21 @@ fn test_keep_best_scoring_peers_on_oversubscription() { gs.handle_graft(peer, topics.clone()); } - //assign scores to peers equalling their index + // assign scores to peers equalling their index - //set random positive scores + // set random positive scores for (index, peer) in peers.iter().enumerate() { gs.set_application_score(peer, index as f64); } assert_eq!(gs.mesh[&topics[0]].len(), n); - //heartbeat to prune some peers + // heartbeat to prune some peers gs.heartbeat(); assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n()); - //mesh contains retain_scores best peers + // mesh contains retain_scores best peers assert!(gs.mesh[&topics[0]].is_superset( &peers[(n - config.retain_scores())..] .iter() @@ -3239,7 +3258,7 @@ fn test_scoring_p1() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3250,9 +3269,9 @@ fn test_scoring_p1() { .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); - //sleep for 2 times the mesh_quantum + // sleep for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3265,9 +3284,9 @@ fn test_scoring_p1() { "score should be less than 3 * time_in_mesh_weight * topic_weight" ); - //sleep again for 2 times the mesh_quantum + // sleep again for 2 times the mesh_quantum sleep(topic_params.time_in_mesh_quantum * 2); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]) @@ -3275,9 +3294,9 @@ fn test_scoring_p1() { "score should be at least 4 * time_in_mesh_weight * topic_weight" ); - //sleep for enough periods to reach maximum + // sleep for enough periods to reach maximum sleep(topic_params.time_in_mesh_quantum * (topic_params.time_in_mesh_cap - 3.0) as u32); - //refresh scores + // refresh scores gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), @@ -3309,7 +3328,7 @@ fn test_scoring_p2() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh + time_in_mesh_weight: 0.0, // deactivate time in mesh first_message_deliveries_weight: 2.0, first_message_deliveries_cap: 10.0, first_message_deliveries_decay: 0.9, @@ -3321,7 +3340,7 @@ fn test_scoring_p2() { .insert(topic_hash, topic_params.clone()); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3338,9 +3357,9 @@ fn test_scoring_p2() { }; let m1 = random_message(&mut seq, &topics); - //peer 0 delivers message first + // peer 0 delivers message first deliver_message(&mut gs, 0, m1.clone()); - //peer 1 delivers message second + // peer 1 delivers message second deliver_message(&mut gs, 1, m1); assert_eq!( @@ -3355,7 +3374,7 @@ fn test_scoring_p2() { "there should be no score for second message deliveries * topic_weight" ); - //peer 2 delivers two new messages + // peer 2 delivers two new messages deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); assert_eq!( @@ -3364,7 +3383,7 @@ fn test_scoring_p2() { "score should be exactly 2 * first_message_deliveries_weight * topic_weight" ); - //test decaying + // test decaying gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -3372,8 +3391,8 @@ fn test_scoring_p2() { 1.0 * topic_params.first_message_deliveries_decay * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" + "score should be exactly first_message_deliveries_decay * first_message_deliveries_weight \ + * topic_weight" ); assert_eq!( @@ -3382,10 +3401,10 @@ fn test_scoring_p2() { * topic_params.first_message_deliveries_weight * topic_params.topic_weight, "score should be exactly 2 * first_message_deliveries_decay * \ - first_message_deliveries_weight * topic_weight" + first_message_deliveries_weight * topic_weight" ); - //test cap + // test cap for _ in 0..topic_params.first_message_deliveries_cap as u64 { deliver_message(&mut gs, 1, random_message(&mut seq, &topics)); } @@ -3395,8 +3414,8 @@ fn test_scoring_p2() { topic_params.first_message_deliveries_cap * topic_params.first_message_deliveries_weight * topic_params.topic_weight, - "score should be exactly first_message_deliveries_cap * \ - first_message_deliveries_weight * topic_weight" + "score should be exactly first_message_deliveries_cap * first_message_deliveries_weight * \ + topic_weight" ); } @@ -3407,8 +3426,8 @@ fn test_scoring_p3() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3421,7 +3440,7 @@ fn test_scoring_p3() { peer_score_params.topics.insert(topic_hash, topic_params); let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3439,42 +3458,43 @@ fn test_scoring_p3() { let mut expected_message_deliveries = 0.0; - //messages used to test window + // messages used to test window let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); - //peer 1 delivers m1 + // peer 1 delivers m1 deliver_message(&mut gs, 1, m1.clone()); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(60)); - //peer 1 delivers m2 + // peer 1 delivers m2 deliver_message(&mut gs, 1, m2.clone()); sleep(Duration::from_millis(70)); - //peer 0 delivers m1 and m2 only m2 gets counted + // peer 0 delivers m1 and m2 only m2 gets counted deliver_message(&mut gs, 0, m1); deliver_message(&mut gs, 0, m2); expected_message_deliveries += 1.0; sleep(Duration::from_millis(900)); - //message deliveries penalties get activated, peer 0 has only delivered 3 messages and - // therefore gets a penalty + // message deliveries penalties get activated, peer 0 has only delivered 3 + // messages and therefore gets a penalty gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), (5f64 - expected_message_deliveries).powi(2) * -2.0 * 0.7 ); - // peer 0 delivers a lot of messages => message_deliveries should be capped at 10 + // peer 0 delivers a lot of messages => message_deliveries should be capped at + // 10 for _ in 0..20 { deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); } @@ -3483,10 +3503,10 @@ fn test_scoring_p3() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //apply 10 decays + // apply 10 decays for _ in 0..10 { gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay } assert_eq!( @@ -3505,8 +3525,8 @@ fn test_scoring_p3b() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries mesh_message_deliveries_weight: -2.0, mesh_message_deliveries_decay: 0.9, mesh_message_deliveries_cap: 10.0, @@ -3522,7 +3542,7 @@ fn test_scoring_p3b() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3540,49 +3560,49 @@ fn test_scoring_p3b() { let mut expected_message_deliveries = 0.0; - //add some positive score + // add some positive score gs.peer_score .as_mut() .unwrap() .0 .set_application_score(&peers[0], 100.0); - //peer 0 delivers two message + // peer 0 delivers two message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 2.0; sleep(Duration::from_millis(1050)); - //activation kicks in + // activation kicks in gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay - //prune peer + // prune peer gs.handle_prune(&peers[0], vec![(topics[0].clone(), vec![], None)]); - //wait backoff + // wait backoff sleep(Duration::from_millis(130)); - //regraft peer + // regraft peer gs.handle_graft(&peers[0], topics.clone()); - //the score should now consider p3b + // the score should now consider p3b let mut expected_b3 = (5f64 - expected_message_deliveries).powi(2); assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 100.0 + expected_b3 * -3.0 * 0.7 ); - //we can also add a new p3 to the score + // we can also add a new p3 to the score - //peer 0 delivers one message + // peer 0 delivers one message deliver_message(&mut gs, 0, random_message(&mut seq, &topics)); expected_message_deliveries += 1.0; sleep(Duration::from_millis(1050)); gs.peer_score.as_mut().unwrap().0.refresh_scores(); - expected_message_deliveries *= 0.9; //decay + expected_message_deliveries *= 0.9; // decay expected_b3 *= 0.95; assert_eq!( @@ -3601,10 +3621,10 @@ fn test_scoring_p4_valid_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3614,7 +3634,7 @@ fn test_scoring_p4_valid_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3630,7 +3650,7 @@ fn test_scoring_p4_valid_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers valid message + // peer 0 delivers valid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3639,7 +3659,7 @@ fn test_scoring_p4_valid_message() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets validated + // message m1 gets validated gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3659,10 +3679,10 @@ fn test_scoring_p4_invalid_signature() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3672,7 +3692,7 @@ fn test_scoring_p4_invalid_signature() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3685,7 +3705,7 @@ fn test_scoring_p4_invalid_signature() { let mut seq = 0; - //peer 0 delivers message with invalid signature + // peer 0 delivers message with invalid signature let m = random_message(&mut seq, &topics); gs.on_connection_handler_event( @@ -3717,10 +3737,10 @@ fn test_scoring_p4_message_from_self() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3730,7 +3750,7 @@ fn test_scoring_p4_message_from_self() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3746,7 +3766,7 @@ fn test_scoring_p4_message_from_self() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message from self + // peer 0 delivers invalid message from self let mut m = random_message(&mut seq, &topics); m.source = Some(*gs.publish_config.get_own_id().unwrap()); @@ -3767,10 +3787,10 @@ fn test_scoring_p4_ignored_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3780,7 +3800,7 @@ fn test_scoring_p4_ignored_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3796,7 +3816,7 @@ fn test_scoring_p4_ignored_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers ignored message + // peer 0 delivers ignored message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3805,7 +3825,7 @@ fn test_scoring_p4_ignored_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets ignored + // message m1 gets ignored gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3825,10 +3845,10 @@ fn test_scoring_p4_application_invalidated_message() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3838,7 +3858,7 @@ fn test_scoring_p4_application_invalidated_message() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3854,7 +3874,7 @@ fn test_scoring_p4_application_invalidated_message() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -3863,7 +3883,7 @@ fn test_scoring_p4_application_invalidated_message() { // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3886,10 +3906,10 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3899,7 +3919,7 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with two peers + // build mesh with two peers let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) @@ -3915,20 +3935,20 @@ fn test_scoring_p4_application_invalid_message_from_two_peers() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); // Transform the inbound message let message1 = &gs.data_transform.inbound_transform(m1.clone()).unwrap(); - //peer 1 delivers same message + // peer 1 delivers same message deliver_message(&mut gs, 1, m1); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -3955,10 +3975,10 @@ fn test_scoring_p4_three_application_invalid_messages() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -3968,7 +3988,7 @@ fn test_scoring_p4_three_application_invalid_messages() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -3984,7 +4004,7 @@ fn test_scoring_p4_three_application_invalid_messages() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers two invalid message + // peer 0 delivers two invalid message let m1 = random_message(&mut seq, &topics); let m2 = random_message(&mut seq, &topics); let m3 = random_message(&mut seq, &topics); @@ -4002,7 +4022,7 @@ fn test_scoring_p4_three_application_invalid_messages() { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //messages gets rejected + // messages gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4021,7 +4041,7 @@ fn test_scoring_p4_three_application_invalid_messages() { MessageAcceptance::Reject, ); - //number of invalid messages gets squared + // number of invalid messages gets squared assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 9.0 * -2.0 * 0.7 @@ -4038,10 +4058,10 @@ fn test_scoring_p4_decay() { let topic = Topic::new("test"); let topic_hash = topic.hash(); let topic_params = TopicScoreParams { - time_in_mesh_weight: 0.0, //deactivate time in mesh - first_message_deliveries_weight: 0.0, //deactivate first time deliveries - mesh_message_deliveries_weight: 0.0, //deactivate message deliveries - mesh_failure_penalty_weight: 0.0, //deactivate mesh failure penalties + time_in_mesh_weight: 0.0, // deactivate time in mesh + first_message_deliveries_weight: 0.0, // deactivate first time deliveries + mesh_message_deliveries_weight: 0.0, // deactivate message deliveries + mesh_failure_penalty_weight: 0.0, // deactivate mesh failure penalties invalid_message_deliveries_weight: -2.0, invalid_message_deliveries_decay: 0.9, topic_weight: 0.7, @@ -4051,7 +4071,7 @@ fn test_scoring_p4_decay() { peer_score_params.app_specific_weight = 1.0; let peer_score_thresholds = PeerScoreThresholds::default(); - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4067,7 +4087,7 @@ fn test_scoring_p4_decay() { gs.handle_received_message(msg, &peers[index]); }; - //peer 0 delivers invalid message + // peer 0 delivers invalid message let m1 = random_message(&mut seq, &topics); deliver_message(&mut gs, 0, m1.clone()); @@ -4075,7 +4095,7 @@ fn test_scoring_p4_decay() { let message1 = &gs.data_transform.inbound_transform(m1).unwrap(); assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 0.0); - //message m1 gets rejected + // message m1 gets rejected gs.report_message_validation_result( &config.message_id(message1), &peers[0], @@ -4087,7 +4107,7 @@ fn test_scoring_p4_decay() { -2.0 * 0.7 ); - //we decay + // we decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); // the number of invalids gets decayed to 0.9 and then squared in the score @@ -4104,7 +4124,7 @@ fn test_scoring_p5() { ..PeerScoreParams::default() }; - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, _, _) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) @@ -4141,7 +4161,7 @@ fn test_scoring_p6() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //create 5 peers with the same ip + // create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, @@ -4151,7 +4171,7 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, ]; - //create 4 other peers with other ip + // create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, @@ -4160,12 +4180,12 @@ fn test_scoring_p6() { add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, ]; - //no penalties yet + // no penalties yet for peer in peers.iter().chain(others.iter()) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 0.0); } - //add additional connection for 3 others with addr + // add additional connection for 3 others with addr for id in others.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *id, @@ -4180,14 +4200,14 @@ fn test_scoring_p6() { })); } - //penalties apply squared + // penalties apply squared for peer in peers.iter().chain(others.iter().take(3)) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } - //fourth other peer still no penalty + // fourth other peer still no penalty assert_eq!(gs.peer_score.as_ref().unwrap().0.score(&others[3]), 0.0); - //add additional connection for 3 of the peers to addr2 + // add additional connection for 3 of the peers to addr2 for peer in peers.iter().take(3) { gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: *peer, @@ -4202,7 +4222,7 @@ fn test_scoring_p6() { })); } - //double penalties for the first three of each + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4210,7 +4230,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4219,7 +4239,7 @@ fn test_scoring_p6() { 4.0 * -2.0 ); - //two times same ip doesn't count twice + // two times same ip doesn't count twice gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: peers[0], connection_id: ConnectionId::new_unchecked(0), @@ -4232,8 +4252,8 @@ fn test_scoring_p6() { other_established: 2, })); - //nothing changed - //double penalties for the first three of each + // nothing changed + // double penalties for the first three of each for peer in peers.iter().take(3).chain(others.iter().take(3)) { assert_eq!( gs.peer_score.as_ref().unwrap().0.score(peer), @@ -4241,7 +4261,7 @@ fn test_scoring_p6() { ); } - //single penalties for the rest + // single penalties for the rest for peer in peers.iter().skip(3) { assert_eq!(gs.peer_score.as_ref().unwrap().0.score(peer), 9.0 * -2.0); } @@ -4274,7 +4294,8 @@ fn test_scoring_p7_grafts_before_backoff() { .scoring(Some((peer_score_params, PeerScoreThresholds::default()))) .create_network(); - //remove peers from mesh and send prune to them => this adds a backoff for the peers + // remove peers from mesh and send prune to them => this adds a backoff for the + // peers for peer in peers.iter().take(2) { gs.mesh.get_mut(&topics[0]).unwrap().remove(peer); gs.send_graft_prune( @@ -4284,31 +4305,31 @@ fn test_scoring_p7_grafts_before_backoff() { ); } - //wait 50 millisecs + // wait 50 millisecs sleep(Duration::from_millis(50)); - //first peer tries to graft + // first peer tries to graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); - //double behaviour penalty for first peer (squared) + // double behaviour penalty for first peer (squared) assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[0]), 4.0 * -2.0 ); - //wait 100 millisecs + // wait 100 millisecs sleep(Duration::from_millis(100)); - //second peer tries to graft + // second peer tries to graft gs.handle_graft(&peers[1], vec![topics[0].clone()]); - //single behaviour penalty for second peer + // single behaviour penalty for second peer assert_eq!( gs.peer_score.as_ref().unwrap().0.score(&peers[1]), 1.0 * -2.0 ); - //test decay + // test decay gs.peer_score.as_mut().unwrap().0.refresh_scores(); assert_eq!( @@ -4327,7 +4348,7 @@ fn test_opportunistic_grafting() { .mesh_n_low(3) .mesh_n(5) .mesh_n_high(7) - .mesh_outbound_min(0) //deactivate outbound handling + .mesh_outbound_min(0) // deactivate outbound handling .opportunistic_graft_ticks(2) .opportunistic_graft_peers(2) .build() @@ -4351,30 +4372,30 @@ fn test_opportunistic_grafting() { .scoring(Some((peer_score_params, thresholds))) .create_network(); - //fill mesh with 5 peers + // fill mesh with 5 peers for peer in &peers { gs.handle_graft(peer, topics.clone()); } - //add additional 5 peers + // add additional 5 peers let others: Vec<_> = (0..5) .map(|_| add_peer(&mut gs, &topics, false, false)) .collect(); - //currently mesh equals peers + // currently mesh equals peers assert_eq!(gs.mesh[&topics[0]], peers.iter().cloned().collect()); - //give others high scores (but the first two have not high enough scores) + // give others high scores (but the first two have not high enough scores) for (i, peer) in peers.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //set scores for peers in the mesh + // set scores for peers in the mesh for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } - //this gives a median of exactly 2.0 => should not apply opportunistic grafting + // this gives a median of exactly 2.0 => should not apply opportunistic grafting gs.heartbeat(); gs.heartbeat(); @@ -4384,10 +4405,10 @@ fn test_opportunistic_grafting() { "should not apply opportunistic grafting" ); - //reduce middle score to 1.0 giving a median of 1.0 + // reduce middle score to 1.0 giving a median of 1.0 gs.set_application_score(&peers[2], 1.0); - //opportunistic grafting after two heartbeats + // opportunistic grafting after two heartbeats gs.heartbeat(); assert_eq!( @@ -4417,17 +4438,17 @@ fn test_opportunistic_grafting() { #[test] fn test_ignore_graft_from_unknown_topic() { - //build gossipsub without subscribing to any topics + // build gossipsub without subscribing to any topics let (mut gs, peers, receivers, _) = inject_nodes1() .peer_no(1) .topics(vec![]) .to_subscribe(false) .create_network(); - //handle an incoming graft for some topic + // handle an incoming graft for some topic gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); - //assert that no prune got created + // assert that no prune got created let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); assert_eq!( control_msgs, 0, @@ -4438,18 +4459,18 @@ fn test_ignore_graft_from_unknown_topic() { #[test] fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = Config::default(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //receive a message + // receive a message let mut seq = 0; let m1 = random_message(&mut seq, &topics); @@ -4460,11 +4481,11 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { gs.handle_received_message(m1, &PeerId::random()); - //clear events + // clear events let receivers = flush_events(&mut gs, receivers); - //the first gossip_retransimission many iwants return the valid message, all others are - // ignored. + // the first gossip_retransimission many iwants return the valid message, all + // others are ignored. for _ in 0..(2 * config.gossip_retransimission() + 10) { gs.handle_iwant(&peer, vec![id.clone()]); } @@ -4490,7 +4511,7 @@ fn test_ignore_too_many_ihaves() { .max_ihave_messages(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4498,15 +4519,15 @@ fn test_ignore_too_many_ihaves() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 20 messages let mut seq = 0; let messages: Vec<_> = (0..20).map(|_| random_message(&mut seq, &topics)).collect(); - //peer sends us one ihave for each message in order + // peer sends us one ihave for each message in order for raw_message in &messages { // Transform the inbound message let message = &gs @@ -4527,7 +4548,7 @@ fn test_ignore_too_many_ihaves() { .map(|m| config.message_id(&m)) .collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) @@ -4537,7 +4558,7 @@ fn test_ignore_too_many_ihaves() { "exactly the first ten ihaves should be processed and one iwant for each created" ); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); for raw_message in messages[10..].iter() { @@ -4553,7 +4574,7 @@ fn test_ignore_too_many_ihaves() { ); } - //we sent iwant for all 10 messages + // we sent iwant for all 10 messages let (control_msgs, _) = count_control_msgs(receivers, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) @@ -4568,7 +4589,7 @@ fn test_ignore_too_many_messages_in_ihave() { .max_ihave_length(10) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, _, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4576,11 +4597,11 @@ fn test_ignore_too_many_messages_in_ihave() { .gs_config(config.clone()) .create_network(); - //add another peer not in the mesh + // add another peer not in the mesh let (peer, receiver) = add_peer(&mut gs, &topics, false, false); receivers.insert(peer, receiver); - //peer has 20 messages + // peer has 20 messages let mut seq = 0; let message_ids: Vec<_> = (0..20) .map(|_| random_message(&mut seq, &topics)) @@ -4588,7 +4609,7 @@ fn test_ignore_too_many_messages_in_ihave() { .map(|msg| config.message_id(&msg)) .collect(); - //peer sends us three ihaves + // peer sends us three ihaves gs.handle_ihave(&peer, vec![(topics[0].clone(), message_ids[0..8].to_vec())]); gs.handle_ihave( &peer, @@ -4601,7 +4622,7 @@ fn test_ignore_too_many_messages_in_ihave() { let first_twelve: HashSet<_> = message_ids.iter().take(12).collect(); - //we send iwant only for the first 10 messages + // we send iwant only for the first 10 messages let mut sum = 0; let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4620,14 +4641,14 @@ fn test_ignore_too_many_messages_in_ihave() { assert_eq!(sum, 10, "exactly the first ten ihaves should be processed"); - //after a heartbeat everything is forgotten + // after a heartbeat everything is forgotten gs.heartbeat(); gs.handle_ihave( &peer, vec![(topics[0].clone(), message_ids[10..20].to_vec())], ); - //we sent 10 iwant messages ids via a IWANT rpc. + // we sent 10 iwant messages ids via a IWANT rpc. let mut sum = 0; let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { @@ -4649,7 +4670,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { .max_ihave_length(100) .build() .unwrap(); - //build gossipsub with full mesh + // build gossipsub with full mesh let (mut gs, peers, mut receivers, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) @@ -4657,29 +4678,30 @@ fn test_limit_number_of_message_ids_inside_ihave() { .gs_config(config) .create_network(); - //graft to all peers to really fill the mesh with all the peers + // graft to all peers to really fill the mesh with all the peers for peer in peers { gs.handle_graft(&peer, topics.clone()); } - //add two other peers not in the mesh + // add two other peers not in the mesh let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); receivers.insert(p1, receiver1); let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); receivers.insert(p2, receiver2); - //receive 200 messages from another peer + // receive 200 messages from another peer let mut seq = 0; for _ in 0..200 { gs.handle_received_message(random_message(&mut seq, &topics), &PeerId::random()); } - //emit gossip + // emit gossip gs.emit_gossip(); - // both peers should have gotten 100 random ihave messages, to assert the randomness, we - // assert that both have not gotten the same set of messages, but have an intersection - // (which is the case with very high probability, the probabiltity of failure is < 10^-58). + // both peers should have gotten 100 random ihave messages, to assert the + // randomness, we assert that both have not gotten the same set of messages, + // but have an intersection (which is the case with very high probability, + // the probabiltity of failure is < 10^-58). let mut ihaves1 = HashSet::new(); let mut ihaves2 = HashSet::new(); @@ -4715,24 +4737,22 @@ fn test_limit_number_of_message_ids_inside_ihave() { ); assert!( ihaves1 != ihaves2, - "should have sent different random messages to p1 and p2 \ - (this may fail with a probability < 10^-58" + "should have sent different random messages to p1 and p2 (this may fail with a \ + probability < 10^-58" ); assert!( ihaves1.intersection(&ihaves2).count() > 0, - "should have sent random messages with some common messages to p1 and p2 \ - (this may fail with a probability < 10^-58" + "should have sent random messages with some common messages to p1 and p2 (this may fail \ + with a probability < 10^-58" ); } #[test] fn test_iwant_penalties() { - /* - use tracing_subscriber::EnvFilter; - let _ = tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .try_init(); - */ + // use tracing_subscriber::EnvFilter; + // let _ = tracing_subscriber::fmt() + // .with_env_filter(EnvFilter::from_default_env()) + // .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) .build() @@ -4862,7 +4882,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .gs_config(config) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4877,10 +4897,10 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //p1 and p2 are not in the mesh + // p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4921,7 +4941,7 @@ fn test_do_not_use_floodsub_in_fanout() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let (p1, receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -4936,7 +4956,7 @@ fn test_do_not_use_floodsub_in_fanout() { add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); receivers.insert(p2, receiver2); - //publish a message + // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); @@ -4977,7 +4997,7 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { let topic = Topic::new("test"); let topics = vec![topic.hash()]; - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5004,7 +5024,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() { .to_subscribe(false) .create_network(); - //add an old gossipsub peer + // add an old gossipsub peer let (p1, _receiver1) = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5014,14 +5034,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { Some(PeerKind::Gossipsub), ); - //prune the peer + // prune the peer gs.send_graft_prune( HashMap::new(), vec![(p1, topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that prune does not contain px + // check that prune does not contain px let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5031,14 +5051,14 @@ fn test_dont_send_px_to_old_gossipsub_peers() { #[test] fn test_dont_send_floodsub_peers_in_px() { - //build mesh with one peer + // build mesh with one peer let (mut gs, peers, receivers, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) .create_network(); - //add two floodsub peers + // add two floodsub peers let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5049,14 +5069,14 @@ fn test_dont_send_floodsub_peers_in_px() { ); let _p2 = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - //prune only mesh node + // prune only mesh node gs.send_graft_prune( HashMap::new(), vec![(peers[0], topics.clone())].into_iter().collect(), HashSet::new(), ); - //check that px in prune message is empty + // check that px in prune message is empty let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, @@ -5072,7 +5092,7 @@ fn test_dont_add_floodsub_peers_to_mesh_in_heartbeat() { .to_subscribe(false) .create_network(); - //add two floodsub peer, one explicit, one implicit + // add two floodsub peer, one explicit, one implicit let _p1 = add_peer_with_addr_and_kind( &mut gs, &topics, @@ -5139,7 +5159,7 @@ fn test_subscribe_to_invalid_topic() { #[test] fn test_subscribe_and_graft_with_negative_score() { - //simulate a communication between two gossipsub instances + // simulate a communication between two gossipsub instances let (mut gs1, _, _, topic_hashes) = inject_nodes1() .topics(vec!["test".into()]) .scoring(Some(( @@ -5157,12 +5177,12 @@ fn test_subscribe_and_graft_with_negative_score() { let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); - //add penalty to peer p2 + // add penalty to peer p2 gs1.peer_score.as_mut().unwrap().0.add_penalty(&p2, 1); let original_score = gs1.peer_score.as_ref().unwrap().0.score(&p2); - //subscribe to topic in gs2 + // subscribe to topic in gs2 gs2.subscribe(&topic).unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, @@ -5191,17 +5211,17 @@ fn test_subscribe_and_graft_with_negative_score() { new_receivers }; - //forward the subscribe message + // forward the subscribe message let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //heartbeats on both + // heartbeats on both gs1.heartbeat(); gs2.heartbeat(); - //forward messages again + // forward messages again forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); - //nobody got penalized + // nobody got penalized assert!(gs1.peer_score.as_ref().unwrap().0.score(&p2) >= original_score); } diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 6e7861bae10..0f97daad4a0 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -18,37 +18,42 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::borrow::Cow; -use std::sync::Arc; -use std::time::Duration; - -use crate::error::ConfigBuilderError; -use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; -use crate::types::{Message, MessageId, PeerKind}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; +use crate::{ + error::ConfigBuilderError, + protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}, + types::{Message, MessageId, PeerKind}, +}; + /// The types of message validation that can be employed by gossipsub. #[derive(Debug, Clone)] pub enum ValidationMode { - /// This is the default setting. This requires the message author to be a valid [`PeerId`] and to - /// be present as well as the sequence number. All messages must have valid signatures. + /// This is the default setting. This requires the message author to be a + /// valid [`PeerId`] and to be present as well as the sequence number. + /// All messages must have valid signatures. /// /// NOTE: This setting will reject messages from nodes using - /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages that do not have - /// signatures. + /// [`crate::behaviour::MessageAuthenticity::Anonymous`] and all messages + /// that do not have signatures. Strict, - /// This setting permits messages that have no author, sequence number or signature. If any of - /// these fields exist in the message these are validated. + /// This setting permits messages that have no author, sequence number or + /// signature. If any of these fields exist in the message these are + /// validated. Permissive, - /// This setting requires the author, sequence number and signature fields of a message to be - /// empty. Any message that contains these fields is considered invalid. + /// This setting requires the author, sequence number and signature fields + /// of a message to be empty. Any message that contains these fields is + /// considered invalid. Anonymous, - /// This setting does not check the author, sequence number or signature fields of incoming - /// messages. If these fields contain data, they are simply ignored. + /// This setting does not check the author, sequence number or signature + /// fields of incoming messages. If these fields contain data, they are + /// simply ignored. /// - /// NOTE: This setting will consider messages with invalid signatures as valid messages. + /// NOTE: This setting will consider messages with invalid signatures as + /// valid messages. None, } @@ -59,7 +64,8 @@ pub enum Version { V1_1, } -/// Configuration parameters that define the performance of the gossipsub network. +/// Configuration parameters that define the performance of the gossipsub +/// network. #[derive(Clone)] pub struct Config { protocol: ProtocolConfig, @@ -116,40 +122,45 @@ impl Config { self.history_gossip } - /// Target number of peers for the mesh network (D in the spec, default is 6). + /// Target number of peers for the mesh network (D in the spec, default is + /// 6). pub fn mesh_n(&self) -> usize { self.mesh_n } - /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 5). + /// Minimum number of peers in mesh network before adding more (D_lo in the + /// spec, default is 5). pub fn mesh_n_low(&self) -> usize { self.mesh_n_low } - /// Maximum number of peers in mesh network before removing some (D_high in the spec, default - /// is 12). + /// Maximum number of peers in mesh network before removing some (D_high in + /// the spec, default is 12). pub fn mesh_n_high(&self) -> usize { self.mesh_n_high } - /// Affects how peers are selected when pruning a mesh due to over subscription. + /// Affects how peers are selected when pruning a mesh due to over + /// subscription. /// - /// At least `retain_scores` of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least `retain_scores` of the retained peers will be high-scoring, + /// while the remainder are chosen randomly (D_score in the spec, + /// default is 4). pub fn retain_scores(&self) -> usize { self.retain_scores } - /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, - /// default is 6). + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in + /// the spec, default is 6). pub fn gossip_lazy(&self) -> usize { self.gossip_lazy } /// Affects how many peers we will emit gossip to at each heartbeat. /// - /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or - /// `gossip_lazy`, whichever is greater. The default is 0.25. + /// We will send gossip to `gossip_factor * (total number of non-mesh + /// peers)`, or `gossip_lazy`, whichever is greater. The default is + /// 0.25. pub fn gossip_factor(&self) -> f64 { self.gossip_factor } @@ -169,67 +180,74 @@ impl Config { self.fanout_ttl } - /// The number of heartbeat ticks until we recheck the connection to explicit peers and - /// reconnecting if necessary (default 300). + /// The number of heartbeat ticks until we recheck the connection to + /// explicit peers and reconnecting if necessary (default 300). pub fn check_explicit_peers_ticks(&self) -> u64 { self.check_explicit_peers_ticks } /// The maximum byte size for each gossipsub RPC (default is 65536 bytes). /// - /// This represents the maximum size of the published message. It is additionally wrapped - /// in a protobuf struct, so the actual wire size may be a bit larger. It must be at least - /// large enough to support basic control messages. If Peer eXchange is enabled, this - /// must be large enough to transmit the desired peer information on pruning. It must be at - /// least 100 bytes. Default is 65536 bytes. + /// This represents the maximum size of the published message. It is + /// additionally wrapped in a protobuf struct, so the actual wire size + /// may be a bit larger. It must be at least large enough to support + /// basic control messages. If Peer eXchange is enabled, this + /// must be large enough to transmit the desired peer information on + /// pruning. It must be at least 100 bytes. Default is 65536 bytes. pub fn max_transmit_size(&self) -> usize { self.protocol.max_transmit_size } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. - /// This settings sets the time period that messages are stored in the cache. Duplicates can be - /// received if duplicate messages are sent at a time greater than this setting apart. The + /// Duplicates are prevented by storing message id's of known messages in an + /// LRU time cache. This settings sets the time period that messages are + /// stored in the cache. Duplicates can be received if duplicate + /// messages are sent at a time greater than this setting apart. The /// default is 1 minute. pub fn duplicate_cache_time(&self) -> Duration { self.duplicate_cache_time } - /// When set to `true`, prevents automatic forwarding of all received messages. This setting - /// allows a user to validate the messages before propagating them to their peers. If set to - /// true, the user must manually call [`crate::Behaviour::report_message_validation_result()`] + /// When set to `true`, prevents automatic forwarding of all received + /// messages. This setting allows a user to validate the messages before + /// propagating them to their peers. If set to true, the user must + /// manually call [`crate::Behaviour::report_message_validation_result()`] /// on the behaviour to forward message once validated (default is `false`). /// The default is `false`. pub fn validate_messages(&self) -> bool { self.validate_messages } - /// Determines the level of validation used when receiving messages. See [`ValidationMode`] - /// for the available types. The default is ValidationMode::Strict. + /// Determines the level of validation used when receiving messages. See + /// [`ValidationMode`] for the available types. The default is + /// ValidationMode::Strict. pub fn validation_mode(&self) -> &ValidationMode { &self.protocol.validation_mode } - /// A user-defined function allowing the user to specify the message id of a gossipsub message. - /// The default value is to concatenate the source peer id with a sequence number. Setting this - /// parameter allows the user to address packets arbitrarily. One example is content based - /// addressing, where this function may be set to `hash(message)`. This would prevent messages - /// of the same content from being duplicated. + /// A user-defined function allowing the user to specify the message id of a + /// gossipsub message. The default value is to concatenate the source + /// peer id with a sequence number. Setting this parameter allows the + /// user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This + /// would prevent messages of the same content from being duplicated. /// - /// The function takes a [`Message`] as input and outputs a String to be interpreted as - /// the message id. + /// The function takes a [`Message`] as input and outputs a String to be + /// interpreted as the message id. pub fn message_id(&self, message: &Message) -> MessageId { (self.message_id_fn)(message) } - /// By default, gossipsub will reject messages that are sent to us that have the same message - /// source as we have specified locally. Enabling this, allows these messages and prevents - /// penalizing the peer that sent us the message. Default is false. + /// By default, gossipsub will reject messages that are sent to us that have + /// the same message source as we have specified locally. Enabling this, + /// allows these messages and prevents penalizing the peer that sent us + /// the message. Default is false. pub fn allow_self_origin(&self) -> bool { self.allow_self_origin } - /// Whether Peer eXchange is enabled; this should be enabled in bootstrappers and other well - /// connected/trusted nodes. The default is false. + /// Whether Peer eXchange is enabled; this should be enabled in + /// bootstrappers and other well connected/trusted nodes. The default is + /// false. /// /// Note: Peer exchange is not implemented today, see /// . @@ -238,20 +256,22 @@ impl Config { } /// Controls the number of peers to include in prune Peer eXchange. - /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to - /// send them signed peer records for up to `prune_peers` other peers that we - /// know of. It is recommended that this value is larger than `mesh_n_high` so that the pruned - /// peer can reliably form a full mesh. The default is typically 16 however until signed + /// When we prune a peer that's eligible for PX (has a good score, etc), we + /// will try to send them signed peer records for up to `prune_peers` + /// other peers that we know of. It is recommended that this value is + /// larger than `mesh_n_high` so that the pruned peer can reliably form + /// a full mesh. The default is typically 16 however until signed /// records are spec'd this is disabled and set to 0. pub fn prune_peers(&self) -> usize { self.prune_peers } /// Controls the backoff time for pruned peers. This is how long - /// a peer must wait before attempting to graft into our mesh again after being pruned. - /// When pruning a peer, we send them our value of `prune_backoff` so they know - /// the minimum time to wait. Peers running older versions may not send a backoff time, - /// so if we receive a prune message without one, we will wait at least `prune_backoff` + /// a peer must wait before attempting to graft into our mesh again after + /// being pruned. When pruning a peer, we send them our value of + /// `prune_backoff` so they know the minimum time to wait. Peers running + /// older versions may not send a backoff time, so if we receive a prune + /// message without one, we will wait at least `prune_backoff` /// before attempting to re-graft. The default is one minute. pub fn prune_backoff(&self) -> Duration { self.prune_backoff @@ -259,88 +279,96 @@ impl Config { /// Controls the backoff time when unsubscribing from a topic. /// - /// This is how long to wait before resubscribing to the topic. A short backoff period in case - /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default - /// is 10 seconds. + /// This is how long to wait before resubscribing to the topic. A short + /// backoff period in case of an unsubscribe event allows reaching a + /// healthy mesh in a more timely manner. The default is 10 seconds. pub fn unsubscribe_backoff(&self) -> Duration { self.unsubscribe_backoff } - /// Number of heartbeat slots considered as slack for backoffs. This guarantees that we wait - /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This - /// solves problems occurring through high latencies. In particular if - /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing - /// prunes on our side and processing prunes on the receiving side this guarantees that we - /// get not punished for too early grafting. The default is 1. + /// Number of heartbeat slots considered as slack for backoffs. This + /// guarantees that we wait at least backoff_slack heartbeats after a + /// backoff is over before we try to graft. This solves problems + /// occurring through high latencies. In particular if `backoff_slack * + /// heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this + /// guarantees that we get not punished for too early grafting. The + /// default is 1. pub fn backoff_slack(&self) -> u32 { self.backoff_slack } - /// Whether to do flood publishing or not. If enabled newly created messages will always be - /// sent to all peers that are subscribed to the topic and have a good enough score. - /// The default is true. + /// Whether to do flood publishing or not. If enabled newly created messages + /// will always be sent to all peers that are subscribed to the topic + /// and have a good enough score. The default is true. pub fn flood_publish(&self) -> bool { self.flood_publish } - /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, - /// then there is an extra score penalty applied to the peer through P7. + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the + /// last PRUNE, then there is an extra score penalty applied to the peer + /// through P7. pub fn graft_flood_threshold(&self) -> Duration { self.graft_flood_threshold } - /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). - /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. - /// The default is 2. + /// Minimum number of outbound peers in the mesh network before adding more + /// (D_out in the spec). This value must be smaller or equal than + /// `mesh_n / 2` and smaller than `mesh_n_low`. The default is 2. pub fn mesh_outbound_min(&self) -> usize { self.mesh_outbound_min } - /// Number of heartbeat ticks that specify the interval in which opportunistic grafting is - /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh - /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a - /// threshold (see ). + /// Number of heartbeat ticks that specify the interval in which + /// opportunistic grafting is applied. Every `opportunistic_graft_ticks` + /// we will attempt to select some high-scoring mesh peers to replace + /// lower-scoring ones, if the median score of our mesh peers falls below a threshold (see ). /// The default is 60. pub fn opportunistic_graft_ticks(&self) -> u64 { self.opportunistic_graft_ticks } - /// Controls how many times we will allow a peer to request the same message id through IWANT - /// gossip before we start ignoring them. This is designed to prevent peers from spamming us - /// with requests and wasting our resources. The default is 3. + /// Controls how many times we will allow a peer to request the same message + /// id through IWANT gossip before we start ignoring them. This is + /// designed to prevent peers from spamming us with requests and wasting + /// our resources. The default is 3. pub fn gossip_retransimission(&self) -> u32 { self.gossip_retransimission } - /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + /// The maximum number of new peers to graft to during opportunistic + /// grafting. The default is 2. pub fn opportunistic_graft_peers(&self) -> usize { self.opportunistic_graft_peers } - /// The maximum number of messages we will process in a given RPC. If this is unset, there is - /// no limit. The default is None. + /// The maximum number of messages we will process in a given RPC. If this + /// is unset, there is no limit. The default is None. pub fn max_messages_per_rpc(&self) -> Option { self.max_messages_per_rpc } /// The maximum number of messages to include in an IHAVE message. - /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a - /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the - /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip - /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + /// Also controls the maximum number of IHAVE ids we will accept and request + /// with IWANT from a peer within a heartbeat, to protect from IHAVE + /// floods. You should adjust this value from the default if your system + /// is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is + /// 5000. pub fn max_ihave_length(&self) -> usize { self.max_ihave_length } - /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer - /// within a heartbeat. + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to + /// accept from a peer within a heartbeat. pub fn max_ihave_messages(&self) -> usize { self.max_ihave_messages } - /// Time to wait for a message requested through IWANT following an IHAVE advertisement. - /// If the message is not received within this window, a broken promise is declared and - /// the router may apply behavioural penalties. The default is 3 seconds. + /// Time to wait for a message requested through IWANT following an IHAVE + /// advertisement. If the message is not received within this window, a + /// broken promise is declared and the router may apply behavioural + /// penalties. The default is 3 seconds. pub fn iwant_followup_time(&self) -> Duration { self.iwant_followup_time } @@ -355,19 +383,20 @@ impl Config { self.published_message_ids_cache_time } - /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + /// The max number of messages a `ConnectionHandler` can buffer. The default + /// is 5000. pub fn connection_handler_queue_len(&self) -> usize { self.connection_handler_queue_len } - /// The duration a message to be published can wait to be sent before it is abandoned. The - /// default is 5 seconds. + /// The duration a message to be published can wait to be sent before it is + /// abandoned. The default is 5 seconds. pub fn publish_queue_duration(&self) -> Duration { self.connection_handler_publish_duration } - /// The duration a message to be forwarded can wait to be sent before it is abandoned. The - /// default is 1s. + /// The duration a message to be forwarded can wait to be sent before it is + /// abandoned. The default is 1s. pub fn forward_queue_duration(&self) -> Duration { self.connection_handler_forward_duration } @@ -423,7 +452,8 @@ impl Default for ConfigBuilder { }), allow_self_origin: false, do_px: false, - prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. + prune_peers: 0, /* NOTE: Increasing this currently has little effect until Signed + * records are implemented. */ prune_backoff: Duration::from_secs(60), unsubscribe_backoff: Duration::from_secs(10), backoff_slack: 1, @@ -457,7 +487,8 @@ impl From for ConfigBuilder { } impl ConfigBuilder { - /// The protocol id prefix to negotiate this protocol (default is `/meshsub/1.1.0` and `/meshsub/1.0.0`). + /// The protocol id prefix to negotiate this protocol (default is + /// `/meshsub/1.1.0` and `/meshsub/1.0.0`). pub fn protocol_id_prefix( &mut self, protocol_id_prefix: impl Into>, @@ -488,7 +519,8 @@ impl ConfigBuilder { self } - /// The full protocol id to negotiate this protocol (does not append `/1.0.0` or `/1.1.0`). + /// The full protocol id to negotiate this protocol (does not append + /// `/1.0.0` or `/1.1.0`). pub fn protocol_id( &mut self, protocol_id: impl Into>, @@ -526,36 +558,40 @@ impl ConfigBuilder { self } - /// Target number of peers for the mesh network (D in the spec, default is 6). + /// Target number of peers for the mesh network (D in the spec, default is + /// 6). pub fn mesh_n(&mut self, mesh_n: usize) -> &mut Self { self.config.mesh_n = mesh_n; self } - /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 4). + /// Minimum number of peers in mesh network before adding more (D_lo in the + /// spec, default is 4). pub fn mesh_n_low(&mut self, mesh_n_low: usize) -> &mut Self { self.config.mesh_n_low = mesh_n_low; self } - /// Maximum number of peers in mesh network before removing some (D_high in the spec, default - /// is 12). + /// Maximum number of peers in mesh network before removing some (D_high in + /// the spec, default is 12). pub fn mesh_n_high(&mut self, mesh_n_high: usize) -> &mut Self { self.config.mesh_n_high = mesh_n_high; self } - /// Affects how peers are selected when pruning a mesh due to over subscription. + /// Affects how peers are selected when pruning a mesh due to over + /// subscription. /// - /// At least [`Self::retain_scores`] of the retained peers will be high-scoring, while the remainder are - /// chosen randomly (D_score in the spec, default is 4). + /// At least [`Self::retain_scores`] of the retained peers will be + /// high-scoring, while the remainder are chosen randomly (D_score in + /// the spec, default is 4). pub fn retain_scores(&mut self, retain_scores: usize) -> &mut Self { self.config.retain_scores = retain_scores; self } - /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in the spec, - /// default is 6). + /// Minimum number of peers to emit gossip to during a heartbeat (D_lazy in + /// the spec, default is 6). pub fn gossip_lazy(&mut self, gossip_lazy: usize) -> &mut Self { self.config.gossip_lazy = gossip_lazy; self @@ -563,8 +599,9 @@ impl ConfigBuilder { /// Affects how many peers we will emit gossip to at each heartbeat. /// - /// We will send gossip to `gossip_factor * (total number of non-mesh peers)`, or - /// `gossip_lazy`, whichever is greater. The default is 0.25. + /// We will send gossip to `gossip_factor * (total number of non-mesh + /// peers)`, or `gossip_lazy`, whichever is greater. The default is + /// 0.25. pub fn gossip_factor(&mut self, gossip_factor: f64) -> &mut Self { self.config.gossip_factor = gossip_factor; self @@ -582,8 +619,8 @@ impl ConfigBuilder { self } - /// The number of heartbeat ticks until we recheck the connection to explicit peers and - /// reconnecting if necessary (default 300). + /// The number of heartbeat ticks until we recheck the connection to + /// explicit peers and reconnecting if necessary (default 300). pub fn check_explicit_peers_ticks(&mut self, check_explicit_peers_ticks: u64) -> &mut Self { self.config.check_explicit_peers_ticks = check_explicit_peers_ticks; self @@ -601,36 +638,40 @@ impl ConfigBuilder { self } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. - /// This settings sets the time period that messages are stored in the cache. Duplicates can be - /// received if duplicate messages are sent at a time greater than this setting apart. The + /// Duplicates are prevented by storing message id's of known messages in an + /// LRU time cache. This settings sets the time period that messages are + /// stored in the cache. Duplicates can be received if duplicate + /// messages are sent at a time greater than this setting apart. The /// default is 1 minute. pub fn duplicate_cache_time(&mut self, cache_size: Duration) -> &mut Self { self.config.duplicate_cache_time = cache_size; self } - /// When set, prevents automatic forwarding of all received messages. This setting - /// allows a user to validate the messages before propagating them to their peers. If set, - /// the user must manually call [`crate::Behaviour::report_message_validation_result()`] on the + /// When set, prevents automatic forwarding of all received messages. This + /// setting allows a user to validate the messages before propagating + /// them to their peers. If set, the user must manually call + /// [`crate::Behaviour::report_message_validation_result()`] on the /// behaviour to forward a message once validated. pub fn validate_messages(&mut self) -> &mut Self { self.config.validate_messages = true; self } - /// Determines the level of validation used when receiving messages. See [`ValidationMode`] - /// for the available types. The default is ValidationMode::Strict. + /// Determines the level of validation used when receiving messages. See + /// [`ValidationMode`] for the available types. The default is + /// ValidationMode::Strict. pub fn validation_mode(&mut self, validation_mode: ValidationMode) -> &mut Self { self.config.protocol.validation_mode = validation_mode; self } - /// A user-defined function allowing the user to specify the message id of a gossipsub message. - /// The default value is to concatenate the source peer id with a sequence number. Setting this - /// parameter allows the user to address packets arbitrarily. One example is content based - /// addressing, where this function may be set to `hash(message)`. This would prevent messages - /// of the same content from being duplicated. + /// A user-defined function allowing the user to specify the message id of a + /// gossipsub message. The default value is to concatenate the source + /// peer id with a sequence number. Setting this parameter allows the + /// user to address packets arbitrarily. One example is content based + /// addressing, where this function may be set to `hash(message)`. This + /// would prevent messages of the same content from being duplicated. /// /// The function takes a [`Message`] as input and outputs a String to be /// interpreted as the message id. @@ -642,8 +683,8 @@ impl ConfigBuilder { self } - /// Enables Peer eXchange. This should be enabled in bootstrappers and other well - /// connected/trusted nodes. The default is false. + /// Enables Peer eXchange. This should be enabled in bootstrappers and other + /// well connected/trusted nodes. The default is false. /// /// Note: Peer exchange is not implemented today, see /// . @@ -654,9 +695,10 @@ impl ConfigBuilder { /// Controls the number of peers to include in prune Peer eXchange. /// - /// When we prune a peer that's eligible for PX (has a good score, etc), we will try to - /// send them signed peer records for up to [`Self::prune_peers] other peers that we - /// know of. It is recommended that this value is larger than [`Self::mesh_n_high`] so that the + /// When we prune a peer that's eligible for PX (has a good score, etc), we + /// will try to send them signed peer records for up to + /// [`Self::prune_peers] other peers that we know of. It is recommended + /// that this value is larger than [`Self::mesh_n_high`] so that the /// pruned peer can reliably form a full mesh. The default is 16. pub fn prune_peers(&mut self, prune_peers: usize) -> &mut Self { self.config.prune_peers = prune_peers; @@ -664,11 +706,13 @@ impl ConfigBuilder { } /// Controls the backoff time for pruned peers. This is how long - /// a peer must wait before attempting to graft into our mesh again after being pruned. - /// When pruning a peer, we send them our value of [`Self::prune_backoff`] so they know - /// the minimum time to wait. Peers running older versions may not send a backoff time, - /// so if we receive a prune message without one, we will wait at least [`Self::prune_backoff`] - /// before attempting to re-graft. The default is one minute. + /// a peer must wait before attempting to graft into our mesh again after + /// being pruned. When pruning a peer, we send them our value of + /// [`Self::prune_backoff`] so they know the minimum time to wait. Peers + /// running older versions may not send a backoff time, so if we receive + /// a prune message without one, we will wait at least + /// [`Self::prune_backoff`] before attempting to re-graft. The default + /// is one minute. pub fn prune_backoff(&mut self, prune_backoff: Duration) -> &mut Self { self.config.prune_backoff = prune_backoff; self @@ -676,107 +720,116 @@ impl ConfigBuilder { /// Controls the backoff time when unsubscribing from a topic. /// - /// This is how long to wait before resubscribing to the topic. A short backoff period in case - /// of an unsubscribe event allows reaching a healthy mesh in a more timely manner. The default - /// is 10 seconds. + /// This is how long to wait before resubscribing to the topic. A short + /// backoff period in case of an unsubscribe event allows reaching a + /// healthy mesh in a more timely manner. The default is 10 seconds. pub fn unsubscribe_backoff(&mut self, unsubscribe_backoff: u64) -> &mut Self { self.config.unsubscribe_backoff = Duration::from_secs(unsubscribe_backoff); self } - /// Number of heartbeat slots considered as slack for backoffs. This guarantees that we wait - /// at least backoff_slack heartbeats after a backoff is over before we try to graft. This - /// solves problems occurring through high latencies. In particular if - /// `backoff_slack * heartbeat_interval` is longer than any latencies between processing - /// prunes on our side and processing prunes on the receiving side this guarantees that we - /// get not punished for too early grafting. The default is 1. + /// Number of heartbeat slots considered as slack for backoffs. This + /// guarantees that we wait at least backoff_slack heartbeats after a + /// backoff is over before we try to graft. This solves problems + /// occurring through high latencies. In particular if `backoff_slack * + /// heartbeat_interval` is longer than any latencies between processing + /// prunes on our side and processing prunes on the receiving side this + /// guarantees that we get not punished for too early grafting. The + /// default is 1. pub fn backoff_slack(&mut self, backoff_slack: u32) -> &mut Self { self.config.backoff_slack = backoff_slack; self } - /// Whether to do flood publishing or not. If enabled newly created messages will always be - /// sent to all peers that are subscribed to the topic and have a good enough score. - /// The default is true. + /// Whether to do flood publishing or not. If enabled newly created messages + /// will always be sent to all peers that are subscribed to the topic + /// and have a good enough score. The default is true. pub fn flood_publish(&mut self, flood_publish: bool) -> &mut Self { self.config.flood_publish = flood_publish; self } - /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, - /// then there is an extra score penalty applied to the peer through P7. + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the + /// last PRUNE, then there is an extra score penalty applied to the peer + /// through P7. pub fn graft_flood_threshold(&mut self, graft_flood_threshold: Duration) -> &mut Self { self.config.graft_flood_threshold = graft_flood_threshold; self } - /// Minimum number of outbound peers in the mesh network before adding more (D_out in the spec). - /// This value must be smaller or equal than `mesh_n / 2` and smaller than `mesh_n_low`. - /// The default is 2. + /// Minimum number of outbound peers in the mesh network before adding more + /// (D_out in the spec). This value must be smaller or equal than + /// `mesh_n / 2` and smaller than `mesh_n_low`. The default is 2. pub fn mesh_outbound_min(&mut self, mesh_outbound_min: usize) -> &mut Self { self.config.mesh_outbound_min = mesh_outbound_min; self } - /// Number of heartbeat ticks that specify the interval in which opportunistic grafting is - /// applied. Every `opportunistic_graft_ticks` we will attempt to select some high-scoring mesh - /// peers to replace lower-scoring ones, if the median score of our mesh peers falls below a - /// threshold (see ). + /// Number of heartbeat ticks that specify the interval in which + /// opportunistic grafting is applied. Every `opportunistic_graft_ticks` + /// we will attempt to select some high-scoring mesh peers to replace + /// lower-scoring ones, if the median score of our mesh peers falls below a threshold (see ). /// The default is 60. pub fn opportunistic_graft_ticks(&mut self, opportunistic_graft_ticks: u64) -> &mut Self { self.config.opportunistic_graft_ticks = opportunistic_graft_ticks; self } - /// Controls how many times we will allow a peer to request the same message id through IWANT - /// gossip before we start ignoring them. This is designed to prevent peers from spamming us - /// with requests and wasting our resources. + /// Controls how many times we will allow a peer to request the same message + /// id through IWANT gossip before we start ignoring them. This is + /// designed to prevent peers from spamming us with requests and wasting + /// our resources. pub fn gossip_retransimission(&mut self, gossip_retransimission: u32) -> &mut Self { self.config.gossip_retransimission = gossip_retransimission; self } - /// The maximum number of new peers to graft to during opportunistic grafting. The default is 2. + /// The maximum number of new peers to graft to during opportunistic + /// grafting. The default is 2. pub fn opportunistic_graft_peers(&mut self, opportunistic_graft_peers: usize) -> &mut Self { self.config.opportunistic_graft_peers = opportunistic_graft_peers; self } - /// The maximum number of messages we will process in a given RPC. If this is unset, there is - /// no limit. The default is None. + /// The maximum number of messages we will process in a given RPC. If this + /// is unset, there is no limit. The default is None. pub fn max_messages_per_rpc(&mut self, max: Option) -> &mut Self { self.config.max_messages_per_rpc = max; self } /// The maximum number of messages to include in an IHAVE message. - /// Also controls the maximum number of IHAVE ids we will accept and request with IWANT from a - /// peer within a heartbeat, to protect from IHAVE floods. You should adjust this value from the - /// default if your system is pushing more than 5000 messages in GossipSubHistoryGossip - /// heartbeats; with the defaults this is 1666 messages/s. The default is 5000. + /// Also controls the maximum number of IHAVE ids we will accept and request + /// with IWANT from a peer within a heartbeat, to protect from IHAVE + /// floods. You should adjust this value from the default if your system + /// is pushing more than 5000 messages in GossipSubHistoryGossip + /// heartbeats; with the defaults this is 1666 messages/s. The default is + /// 5000. pub fn max_ihave_length(&mut self, max_ihave_length: usize) -> &mut Self { self.config.max_ihave_length = max_ihave_length; self } - /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer - /// within a heartbeat. + /// GossipSubMaxIHaveMessages is the maximum number of IHAVE messages to + /// accept from a peer within a heartbeat. pub fn max_ihave_messages(&mut self, max_ihave_messages: usize) -> &mut Self { self.config.max_ihave_messages = max_ihave_messages; self } - /// By default, gossipsub will reject messages that are sent to us that has the same message - /// source as we have specified locally. Enabling this, allows these messages and prevents - /// penalizing the peer that sent us the message. Default is false. + /// By default, gossipsub will reject messages that are sent to us that has + /// the same message source as we have specified locally. Enabling this, + /// allows these messages and prevents penalizing the peer that sent us + /// the message. Default is false. pub fn allow_self_origin(&mut self, allow_self_origin: bool) -> &mut Self { self.config.allow_self_origin = allow_self_origin; self } - /// Time to wait for a message requested through IWANT following an IHAVE advertisement. - /// If the message is not received within this window, a broken promise is declared and - /// the router may apply behavioural penalties. The default is 3 seconds. + /// Time to wait for a message requested through IWANT following an IHAVE + /// advertisement. If the message is not received within this window, a + /// broken promise is declared and the router may apply behavioural + /// penalties. The default is 3 seconds. pub fn iwant_followup_time(&mut self, iwant_followup_time: Duration) -> &mut Self { self.config.iwant_followup_time = iwant_followup_time; self @@ -806,27 +859,29 @@ impl ConfigBuilder { self } - /// The max number of messages a `ConnectionHandler` can buffer. The default is 5000. + /// The max number of messages a `ConnectionHandler` can buffer. The default + /// is 5000. pub fn connection_handler_queue_len(&mut self, len: usize) -> &mut Self { self.config.connection_handler_queue_len = len; self } - /// The duration a message to be published can wait to be sent before it is abandoned. The - /// default is 5 seconds. + /// The duration a message to be published can wait to be sent before it is + /// abandoned. The default is 5 seconds. pub fn publish_queue_duration(&mut self, duration: Duration) -> &mut Self { self.config.connection_handler_publish_duration = duration; self } - /// The duration a message to be forwarded can wait to be sent before it is abandoned. The - /// default is 1s. + /// The duration a message to be forwarded can wait to be sent before it is + /// abandoned. The default is 1s. pub fn forward_queue_duration(&mut self, duration: Duration) -> &mut Self { self.config.connection_handler_forward_duration = duration; self } - /// Constructs a [`Config`] from the given configuration and validates the settings. + /// Constructs a [`Config`] from the given configuration and validates the + /// settings. pub fn build(&self) -> Result { // check all constraints on config @@ -902,12 +957,15 @@ impl std::fmt::Debug for Config { #[cfg(test)] mod test { - use super::*; - use crate::topic::IdentityHash; - use crate::Topic; + use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; + use libp2p_core::UpgradeInfo; - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; + + use super::*; + use crate::{topic::IdentityHash, Topic}; #[test] fn create_config_with_message_id_as_plain_function() { diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 047d50f2338..f70a169c317 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -31,13 +31,13 @@ pub enum PublishError { SigningError(SigningError), /// There were no peers to send this message to. InsufficientPeers, - /// The overall message was too large. This could be due to excessive topics or an excessive - /// message size. + /// The overall message was too large. This could be due to excessive topics + /// or an excessive message size. MessageTooLarge, /// The compression algorithm failed. TransformFailed(std::io::Error), - /// Messages could not be sent because the queues for all peers were full. The usize represents the - /// number of peers that were attempted. + /// Messages could not be sent because the queues for all peers were full. + /// The usize represents the number of peers that were attempted. AllQueuesFull(usize), } @@ -131,7 +131,8 @@ pub enum ConfigBuilderError { MaxTransmissionSizeTooSmall, /// History length less than history gossip length. HistoryLengthTooSmall, - /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high + /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= + /// mesh_n_high MeshParametersInvalid, /// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2 MeshOutboundInvalid, @@ -149,9 +150,18 @@ impl std::fmt::Display for ConfigBuilderError { Self::MaxTransmissionSizeTooSmall => { write!(f, "Maximum transmission size is too small") } - Self::HistoryLengthTooSmall => write!(f, "History length less than history gossip length"), - Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"), - Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"), + Self::HistoryLengthTooSmall => { + write!(f, "History length less than history gossip length") + } + Self::MeshParametersInvalid => write!( + f, + "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= \ + mesh_n_high" + ), + Self::MeshOutboundInvalid => write!( + f, + "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2" + ), Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"), Self::InvalidProtocol => write!(f, "Invalid protocol"), } diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index bdf58b74fc2..8dabcc2214f 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -18,20 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::peer_score::RejectReason; -use crate::MessageId; -use crate::ValidationError; -use libp2p_identity::PeerId; use std::collections::HashMap; + +use libp2p_identity::PeerId; use web_time::Instant; +use crate::{peer_score::RejectReason, MessageId, ValidationError}; + /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] pub(crate) struct GossipPromises { - /// Stores for each tracked message id and peer the instant when this promise expires. + /// Stores for each tracked message id and peer the instant when this + /// promise expires. /// - /// If the peer didn't respond until then we consider the promise as broken and penalize the - /// peer. + /// If the peer didn't respond until then we consider the promise as broken + /// and penalize the peer. promises: HashMap>, } @@ -41,10 +42,12 @@ impl GossipPromises { self.promises.contains_key(message) } - /// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting. + /// Track a promise to deliver a message from a list of [`MessageId`]s we + /// are requesting. pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) { for message_id in messages { - // If a promise for this message id and peer already exists we don't update the expiry! + // If a promise for this message id and peer already exists we don't update the + // expiry! self.promises .entry(message_id.clone()) .or_default() @@ -59,10 +62,10 @@ impl GossipPromises { } pub(crate) fn reject_message(&mut self, message_id: &MessageId, reason: &RejectReason) { - // A message got rejected, so we can stop tracking promises and let the score penalty apply - // from invalid message delivery. - // We do take exception and apply promise penalty regardless in the following cases, where - // the peer delivered an obviously invalid message. + // A message got rejected, so we can stop tracking promises and let the score + // penalty apply from invalid message delivery. + // We do take exception and apply promise penalty regardless in the following + // cases, where the peer delivered an obviously invalid message. match reason { RejectReason::ValidationError(ValidationError::InvalidSignature) => (), RejectReason::SelfOrigin => (), @@ -72,10 +75,10 @@ impl GossipPromises { }; } - /// Returns the number of broken promises for each peer who didn't follow up on an IWANT - /// request. - /// This should be called not too often relative to the expire times, since it iterates over - /// the whole stored data. + /// Returns the number of broken promises for each peer who didn't follow up + /// on an IWANT request. + /// This should be called not too often relative to the expire times, since + /// it iterates over the whole stored data. pub(crate) fn get_broken_promises(&mut self) -> HashMap { let now = Instant::now(); let mut result = HashMap::new(); diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 5f9669c02c2..88a0c18edef 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -18,44 +18,56 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{GossipsubCodec, ProtocolConfig}; -use crate::rpc::Receiver; -use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; -use crate::ValidationError; -use asynchronous_codec::Framed; -use futures::future::Either; -use futures::prelude::*; -use futures::StreamExt; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, -}; -use libp2p_swarm::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + +use asynchronous_codec::Framed; +use futures::{future::Either, prelude::*, StreamExt}; +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + StreamUpgradeError, + SubstreamProtocol, + }, + Stream, +}; use web_time::Instant; -/// The event emitted by the Handler. This informs the behaviour of various events created -/// by the handler. +use crate::{ + protocol::{GossipsubCodec, ProtocolConfig}, + rpc::Receiver, + rpc_proto::proto, + types::{PeerKind, RawMessage, Rpc, RpcOut}, + ValidationError, +}; + +/// The event emitted by the Handler. This informs the behaviour of various +/// events created by the handler. #[derive(Debug)] pub enum HandlerEvent { - /// A GossipsubRPC message has been received. This also contains a list of invalid messages (if - /// any) that were received. + /// A GossipsubRPC message has been received. This also contains a list of + /// invalid messages (if any) that were received. Message { /// The GossipsubRPC message excluding any invalid messages. rpc: Rpc, - /// Any invalid messages that were received in the RPC, along with the associated - /// validation error. + /// Any invalid messages that were received in the RPC, along with the + /// associated validation error. invalid_messages: Vec<(RawMessage, ValidationError)>, }, - /// An inbound or outbound substream has been established with the peer and this informs over - /// which protocol. This message only occurs once per connection. + /// An inbound or outbound substream has been established with the peer and + /// this informs over which protocol. This message only occurs once per + /// connection. PeerKind(PeerKind), - /// A message to be published was dropped because it could not be sent in time. + /// A message to be published was dropped because it could not be sent in + /// time. MessageDropped(RpcOut), } @@ -71,10 +83,10 @@ pub enum HandlerIn { /// The maximum number of inbound or outbound substreams attempts we allow. /// -/// Gossipsub is supposed to have a single long-lived inbound and outbound substream. On failure we -/// attempt to recreate these. This imposes an upper bound of new substreams before we consider the -/// connection faulty and disable the handler. This also prevents against potential substream -/// creation loops. +/// Gossipsub is supposed to have a single long-lived inbound and outbound +/// substream. On failure we attempt to recreate these. This imposes an upper +/// bound of new substreams before we consider the connection faulty and disable +/// the handler. This also prevents against potential substream creation loops. const MAX_SUBSTREAM_ATTEMPTS: usize = 5; #[allow(clippy::large_enum_variant)] @@ -97,8 +109,8 @@ pub struct EnabledHandler { /// Queue of values that we want to send to the remote send_queue: Receiver, - /// Flag indicating that an outbound substream is being established to prevent duplicate - /// requests. + /// Flag indicating that an outbound substream is being established to + /// prevent duplicate requests. outbound_substream_establishing: bool, /// The number of outbound substreams we have requested. @@ -111,33 +123,34 @@ pub struct EnabledHandler { peer_kind: Option, /// Keeps track on whether we have sent the peer kind to the behaviour. - // // NOTE: Use this flag rather than checking the substream count each poll. peer_kind_sent: bool, last_io_activity: Instant, - /// Keeps track of whether this connection is for a peer in the mesh. This is used to make - /// decisions about the keep alive state for this connection. + /// Keeps track of whether this connection is for a peer in the mesh. This + /// is used to make decisions about the keep alive state for this + /// connection. in_mesh: bool, } pub enum DisabledHandler { - /// If the peer doesn't support the gossipsub protocol we do not immediately disconnect. - /// Rather, we disable the handler and prevent any incoming or outgoing substreams from being - /// established. + /// If the peer doesn't support the gossipsub protocol we do not immediately + /// disconnect. Rather, we disable the handler and prevent any incoming + /// or outgoing substreams from being established. ProtocolUnsupported { /// Keeps track on whether we have sent the peer kind to the behaviour. peer_kind_sent: bool, }, - /// The maximum number of inbound or outbound substream attempts have happened and thereby the - /// handler has been disabled. + /// The maximum number of inbound or outbound substream attempts have + /// happened and thereby the handler has been disabled. MaxSubstreamAttempts, } /// State of the inbound substream, opened either by us or by the remote. enum InboundSubstreamState { - /// Waiting for a message from the remote. The idle state for an inbound substream. + /// Waiting for a message from the remote. The idle state for an inbound + /// substream. WaitingInput(Framed), /// The substream is being closed. Closing(Framed), @@ -147,7 +160,8 @@ enum InboundSubstreamState { /// State of the outbound substream, opened either by us or by the remote. enum OutboundSubstreamState { - /// Waiting for the user to send a message. The idle state for an outbound substream. + /// Waiting for the user to send a message. The idle state for an outbound + /// substream. WaitingOutput(Framed), /// Waiting to send a message to the remote. PendingSend(Framed, proto::RPC), diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index f6a51da4a51..c7f9293b0ca 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -20,8 +20,8 @@ //! Implementation of the [Gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md) protocol. //! -//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon -//! floodsub and meshsub routing protocols. +//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to +//! extend upon floodsub and meshsub routing protocols. //! //! # Overview //! @@ -29,37 +29,43 @@ //! () provide an outline for the //! routing protocol. They should be consulted for further detail.* //! -//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded -//! degree and amplification factor with the meshsub construction and augments it using gossip -//! propagation of metadata with the randomsub technique. +//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. +//! It provides bounded degree and amplification factor with the meshsub +//! construction and augments it using gossip propagation of metadata with the +//! randomsub technique. //! -//! The router maintains an overlay mesh network of peers on which to efficiently send messages and -//! metadata. Peers use control messages to broadcast and request known messages and -//! subscribe/unsubscribe from topics in the mesh network. +//! The router maintains an overlay mesh network of peers on which to +//! efficiently send messages and metadata. Peers use control messages to +//! broadcast and request known messages and subscribe/unsubscribe from topics +//! in the mesh network. //! //! # Important Discrepancies //! -//! This section outlines the current implementation's potential discrepancies from that of other -//! implementations, due to undefined elements in the current specification. -//! -//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. -//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this -//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 -//! encoded) by setting the `hash_topics` configuration parameter to true. -//! -//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in -//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned -//! integers. When messages are signed, they are monotonically increasing integers starting from a -//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. -//! NOTE: These numbers are sequential in the current go implementation. +//! This section outlines the current implementation's potential discrepancies +//! from that of other implementations, due to undefined elements in the current +//! specification. +//! +//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` +//! configuration parameter. Topics are of type [`TopicHash`]. The current go +//! implementation uses raw utf-8 strings, and this is default configuration +//! in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 encoded) +//! by setting the `hash_topics` configuration parameter to true. +//! +//! - **Sequence Numbers** - A message on the gossipsub network is identified by +//! the source [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence +//! number) of the message. The sequence numbers in this implementation are +//! sent as raw bytes across the wire. They are 64-bit big-endian unsigned +//! integers. When messages are signed, they are monotonically increasing +//! integers starting from a random value and wrapping around u64::MAX. When +//! messages are unsigned, they are chosen at random. NOTE: These numbers are +//! sequential in the current go implementation. //! //! # Peer Discovery //! -//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which -//! peers in a p2p network exchange information about each other among other reasons to become resistant -//! against the failure or replacement of the -//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. +//! Gossipsub does not provide peer discovery by itself. Peer discovery is the +//! process by which peers in a p2p network exchange information about each +//! other among other reasons to become resistant against the failure or +//! replacement of the [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network. //! //! Peer //! discovery can e.g. be implemented with the help of the [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) protocol @@ -70,8 +76,8 @@ //! //! ## Gossipsub Config //! -//! The [`Config`] struct specifies various network performance/tuning configuration -//! parameters. Specifically it specifies: +//! The [`Config`] struct specifies various network performance/tuning +//! configuration parameters. Specifically it specifies: //! //! [`Config`]: struct.Config.html //! @@ -81,8 +87,9 @@ //! //! ## Behaviour //! -//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to -//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of +//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] +//! trait allowing it to act as the routing behaviour in a +//! [`libp2p_swarm::Swarm`]. This struct requires an instance of //! [`PeerId`](libp2p_identity::PeerId) and [`Config`]. //! //! [`Behaviour`]: struct.Behaviour.html @@ -111,22 +118,31 @@ mod topic; mod transform; mod types; -pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; -pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; -pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; -pub use self::metrics::Config as MetricsConfig; -pub use self::peer_score::{ - score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, - TopicScoreParams, -}; -pub use self::subscription_filter::{ - AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters, - MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter, - WhitelistSubscriptionFilter, +pub use self::{ + behaviour::{Behaviour, Event, MessageAuthenticity}, + config::{Config, ConfigBuilder, ValidationMode, Version}, + error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}, + metrics::Config as MetricsConfig, + peer_score::{ + score_parameter_decay, + score_parameter_decay_with_base, + PeerScoreParams, + PeerScoreThresholds, + TopicScoreParams, + }, + subscription_filter::{ + AllowAllSubscriptionFilter, + CallbackSubscriptionFilter, + CombinedSubscriptionFilters, + MaxCountSubscriptionFilter, + RegexSubscriptionFilter, + TopicSubscriptionFilter, + WhitelistSubscriptionFilter, + }, + topic::{Hasher, Topic, TopicHash}, + transform::{DataTransform, IdentityTransform}, + types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}, }; -pub use self::topic::{Hasher, Topic, TopicHash}; -pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}; #[deprecated(note = "Will be removed from the public API.")] pub type Rpc = self::types::Rpc; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index aa65e3b7f1d..fb5459411a5 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::topic::TopicHash; -use crate::types::{MessageId, RawMessage}; -use libp2p_identity::PeerId; -use std::collections::hash_map::Entry; -use std::fmt::Debug; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, fmt, + fmt::Debug, +}; + +use libp2p_identity::PeerId; + +use crate::{ + topic::TopicHash, + types::{MessageId, RawMessage}, }; /// CacheEntry stored in the history. @@ -39,12 +42,13 @@ pub(crate) struct CacheEntry { #[derive(Clone)] pub(crate) struct MessageCache { msgs: HashMap)>, - /// For every message and peer the number of times this peer asked for the message + /// For every message and peer the number of times this peer asked for the + /// message iwant_counts: HashMap>, history: Vec>, - /// The number of indices in the cache history used for gossiping. That means that a message - /// won't get gossiped anymore when shift got called `gossip` many times after inserting the - /// message in the cache. + /// The number of indices in the cache history used for gossiping. That + /// means that a message won't get gossiped anymore when shift got + /// called `gossip` many times after inserting the message in the cache. gossip: usize, } @@ -92,11 +96,12 @@ impl MessageCache { } } - /// Keeps track of peers we know have received the message to prevent forwarding to said peers. + /// Keeps track of peers we know have received the message to prevent + /// forwarding to said peers. pub(crate) fn observe_duplicate(&mut self, message_id: &MessageId, source: &PeerId) { if let Some((message, originating_peers)) = self.msgs.get_mut(message_id) { - // if the message is already validated, we don't need to store extra peers sending us - // duplicates as the message has already been forwarded + // if the message is already validated, we don't need to store extra peers + // sending us duplicates as the message has already been forwarded if message.validated { return; } @@ -111,8 +116,8 @@ impl MessageCache { self.msgs.get(message_id).map(|(message, _)| message) } - /// Increases the iwant count for the given message by one and returns the message together - /// with the iwant if the message exists. + /// Increases the iwant count for the given message by one and returns the + /// message together with the iwant if the message exists. pub(crate) fn get_with_iwant_counts( &mut self, message_id: &MessageId, @@ -137,16 +142,17 @@ impl MessageCache { } /// Gets a message with [`MessageId`] and tags it as validated. - /// This function also returns the known peers that have sent us this message. This is used to - /// prevent us sending redundant messages to peers who have already propagated it. + /// This function also returns the known peers that have sent us this + /// message. This is used to prevent us sending redundant messages to + /// peers who have already propagated it. pub(crate) fn validate( &mut self, message_id: &MessageId, ) -> Option<(&RawMessage, HashSet)> { self.msgs.get_mut(message_id).map(|(message, known_peers)| { message.validated = true; - // Clear the known peers list (after a message is validated, it is forwarded and we no - // longer need to store the originating peers). + // Clear the known peers list (after a message is validated, it is forwarded and + // we no longer need to store the originating peers). let originating_peers = std::mem::take(known_peers); (&*message, originating_peers) }) @@ -210,8 +216,9 @@ impl MessageCache { &mut self, message_id: &MessageId, ) -> Option<(RawMessage, HashSet)> { - //We only remove the message from msgs and iwant_count and keep the message_id in the - // history vector. Zhe id in the history vector will simply be ignored on popping. + // We only remove the message from msgs and iwant_count and keep the message_id + // in the history vector. Zhe id in the history vector will simply be + // ignored on popping. self.iwant_counts.remove(message_id); self.msgs.remove(message_id) diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs index 40af1af2cac..0f237fd2aa2 100644 --- a/protocols/gossipsub/src/metrics.rs +++ b/protocols/gossipsub/src/metrics.rs @@ -18,36 +18,44 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! A set of metrics used to help track and diagnose the network behaviour of the gossipsub -//! protocol. +//! A set of metrics used to help track and diagnose the network behaviour of +//! the gossipsub protocol. use std::collections::HashMap; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::{Family, MetricConstructor}; -use prometheus_client::metrics::gauge::Gauge; -use prometheus_client::metrics::histogram::{linear_buckets, Histogram}; -use prometheus_client::registry::Registry; - -use crate::topic::TopicHash; -use crate::types::{MessageAcceptance, PeerKind}; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{ + counter::Counter, + family::{Family, MetricConstructor}, + gauge::Gauge, + histogram::{linear_buckets, Histogram}, + }, + registry::Registry, +}; + +use crate::{ + topic::TopicHash, + types::{MessageAcceptance, PeerKind}, +}; // Default value that limits for how many topics do we store metrics. const DEFAULT_MAX_TOPICS: usize = 300; -// Default value that limits how many topics for which there has never been a subscription do we -// store metrics. +// Default value that limits how many topics for which there has never been a +// subscription do we store metrics. const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 50; #[derive(Debug, Clone)] pub struct Config { - /// This provides an upper bound to the number of mesh topics we create metrics for. It - /// prevents unbounded labels being created in the metrics. + /// This provides an upper bound to the number of mesh topics we create + /// metrics for. It prevents unbounded labels being created in the + /// metrics. pub max_topics: usize, - /// Mesh topics are controlled by the user via subscriptions whereas non-mesh topics are - /// determined by users on the network. This limit permits a fixed amount of topics to allow, - /// in-addition to the mesh topics. + /// Mesh topics are controlled by the user via subscriptions whereas + /// non-mesh topics are determined by users on the network. This limit + /// permits a fixed amount of topics to allow, in-addition to the mesh + /// topics. pub max_never_subscribed_topics: usize, /// Buckets used for the score histograms. pub score_buckets: Vec, @@ -100,24 +108,26 @@ type EverSubscribed = bool; /// A collection of metrics used throughout the Gossipsub behaviour. pub(crate) struct Metrics { - /* Configuration parameters */ - /// Maximum number of topics for which we store metrics. This helps keep the metrics bounded. + // Configuration parameters + /// Maximum number of topics for which we store metrics. This helps keep the + /// metrics bounded. max_topics: usize, - /// Maximum number of topics for which we store metrics, where the topic in not one to which we - /// have subscribed at some point. This helps keep the metrics bounded, since these topics come - /// from received messages and not explicit application subscriptions. + /// Maximum number of topics for which we store metrics, where the topic in + /// not one to which we have subscribed at some point. This helps keep + /// the metrics bounded, since these topics come from received messages + /// and not explicit application subscriptions. max_never_subscribed_topics: usize, - /* Auxiliary variables */ + // Auxiliary variables /// Information needed to decide if a topic is allowed or not. topic_info: HashMap, - /* Metrics per known topic */ - /// Status of our subscription to this topic. This metric allows analyzing other topic metrics - /// filtered by our current subscription status. + // Metrics per known topic + /// Status of our subscription to this topic. This metric allows analyzing + /// other topic metrics filtered by our current subscription status. topic_subscription_status: Family, - /// Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour - /// regardless of our subscription status. + /// Number of peers subscribed to each topic. This allows us to analyze a + /// topic's behaviour regardless of our subscription status. topic_peers_count: Family, /// The number of invalid messages received for a given topic. invalid_messages: Family, @@ -134,16 +144,17 @@ pub(crate) struct Metrics { /// The number of messages that timed out and could not be sent. timedout_messages_dropped: Family, - /* Metrics regarding mesh state */ - /// Number of peers in our mesh. This metric should be updated with the count of peers for a - /// topic in the mesh regardless of inclusion and churn events. + // Metrics regarding mesh state + /// Number of peers in our mesh. This metric should be updated with the + /// count of peers for a topic in the mesh regardless of inclusion and + /// churn events. mesh_peer_counts: Family, /// Number of times we include peers in a topic mesh for different reasons. mesh_peer_inclusion_events: Family, /// Number of times we remove peers in a topic mesh for different reasons. mesh_peer_churn_events: Family, - /* Metrics regarding messages sent/received */ + // Metrics regarding messages sent/received /// Number of gossip messages sent to each topic. topic_msg_sent_counts: Family, /// Bytes from gossip messages sent to each topic. @@ -151,34 +162,38 @@ pub(crate) struct Metrics { /// Number of gossipsub messages published to each topic. topic_msg_published: Family, - /// Number of gossipsub messages received on each topic (without filtering duplicates). + /// Number of gossipsub messages received on each topic (without filtering + /// duplicates). topic_msg_recv_counts_unfiltered: Family, - /// Number of gossipsub messages received on each topic (after filtering duplicates). + /// Number of gossipsub messages received on each topic (after filtering + /// duplicates). topic_msg_recv_counts: Family, /// Bytes received from gossip messages for each topic. topic_msg_recv_bytes: Family, - /* Metrics related to scoring */ + // Metrics related to scoring /// Histogram of the scores for each mesh topic. score_per_mesh: Family, /// A counter of the kind of penalties being applied to peers. scoring_penalties: Family, - /* General Metrics */ - /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based - /// on which protocol they support. This metric keeps track of the number of peers that are - /// connected of each type. + // General Metrics + /// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers + /// are classified based on which protocol they support. This metric + /// keeps track of the number of peers that are connected of each type. peers_per_protocol: Family, /// The time it takes to complete one iteration of the heartbeat. heartbeat_duration: Histogram, - /* Performance metrics */ - /// When the user validates a message, it tries to re propagate it to its mesh peers. If the - /// message expires from the memcache before it can be validated, we count this a cache miss - /// and it is an indicator that the memcache size should be increased. + // Performance metrics + /// When the user validates a message, it tries to re propagate it to its + /// mesh peers. If the message expires from the memcache before it can + /// be validated, we count this a cache miss and it is an indicator that + /// the memcache size should be increased. memcache_misses: Counter, - /// The number of times we have decided that an IWANT control message is required for this - /// topic. A very high metric might indicate an underperforming network. + /// The number of times we have decided that an IWANT control message is + /// required for this topic. A very high metric might indicate an + /// underperforming network. topic_iwant_msgs: Family, /// The size of the priority queue. @@ -280,7 +295,8 @@ impl Metrics { let topic_msg_recv_counts = register_family!( "topic_msg_recv_counts", - "Number of gossip messages received on each topic (after duplicates have been filtered)" + "Number of gossip messages received on each topic (after duplicates have been \ + filtered)" ); let topic_msg_recv_bytes = register_family!( "topic_msg_recv_bytes", @@ -389,8 +405,8 @@ impl Metrics { } else if self.topic_info.len() < self.max_topics && self.non_subscription_topics_count() < self.max_never_subscribed_topics { - // This is a topic without an explicit subscription and we register it if we are within - // the configured bounds. + // This is a topic without an explicit subscription and we register it if we are + // within the configured bounds. self.topic_info.entry(topic.clone()).or_insert(false); self.topic_subscription_status.get_or_create(topic).set(0); Ok(()) @@ -414,7 +430,7 @@ impl Metrics { } } - /* Mesh related methods */ + // Mesh related methods /// Registers the subscription to a topic if the configured limits allow it. /// Sets the registered number of peers in the mesh to 0. @@ -427,8 +443,8 @@ impl Metrics { } } - /// Registers the unsubscription to a topic if the topic was previously allowed. - /// Sets the registered number of peers in the mesh to 0. + /// Registers the unsubscription to a topic if the topic was previously + /// allowed. Sets the registered number of peers in the mesh to 0. pub(crate) fn left(&mut self, topic: &TopicHash) { if self.topic_info.contains_key(topic) { // Depending on the configured topic bounds we could miss a mesh topic. @@ -597,7 +613,8 @@ impl Metrics { .inc(); } - /// Removes a peer from the counter based on its protocol when it disconnects. + /// Removes a peer from the counter based on its protocol when it + /// disconnects. pub(crate) fn peer_protocol_disconnected(&mut self, kind: PeerKind) { let metric = self .peers_per_protocol diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index e8d1a6e5f97..133296316bd 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -18,25 +18,36 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! -//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour. +//! Manages and stores the Scoring logic of a particular peer on the gossipsub +//! behaviour. + +use std::{ + collections::{hash_map, HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; -use crate::metrics::{Metrics, Penalty}; -use crate::time_cache::TimeCache; -use crate::{MessageId, TopicHash}; use libp2p_identity::PeerId; -use std::collections::{hash_map, HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; use web_time::Instant; +use crate::{ + metrics::{Metrics, Penalty}, + time_cache::TimeCache, + MessageId, + TopicHash, +}; + mod params; -use crate::ValidationError; pub use params::{ - score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, + score_parameter_decay, + score_parameter_decay_with_base, + PeerScoreParams, + PeerScoreThresholds, TopicScoreParams, }; +use crate::ValidationError; + #[cfg(test)] mod tests; @@ -64,11 +75,14 @@ struct PeerStats { topics: HashMap, /// IP tracking for individual peers. known_ips: HashSet, - /// Behaviour penalty that is applied to the peer, assigned by the behaviour. + /// Behaviour penalty that is applied to the peer, assigned by the + /// behaviour. behaviour_penalty: f64, - /// Application specific score. Can be manipulated by calling PeerScore::set_application_score + /// Application specific score. Can be manipulated by calling + /// PeerScore::set_application_score application_score: f64, - /// Scoring based on how whether this peer consumes messages fast enough or not. + /// Scoring based on how whether this peer consumes messages fast enough or + /// not. slow_peer_penalty: f64, } @@ -96,8 +110,9 @@ impl Default for PeerStats { } impl PeerStats { - /// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the - /// topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + /// Returns a mutable reference to topic stats if they exist, otherwise if + /// the supplied parameters score the topic, inserts the default stats + /// and returns a reference to those. If neither apply, returns None. pub(crate) fn stats_or_default_mut( &mut self, topic_hash: TopicHash, @@ -116,7 +131,8 @@ struct TopicStats { mesh_status: MeshStatus, /// Number of first message deliveries. first_message_deliveries: f64, - /// True if the peer has been in the mesh for enough time to activate mesh message deliveries. + /// True if the peer has been in the mesh for enough time to activate mesh + /// message deliveries. mesh_message_deliveries_active: bool, /// Number of message deliveries from the mesh. mesh_message_deliveries: f64, @@ -197,7 +213,8 @@ impl Default for DeliveryRecord { } impl PeerScore { - /// Creates a new [`PeerScore`] using a given set of peer scoring parameters. + /// Creates a new [`PeerScore`] using a given set of peer scoring + /// parameters. #[allow(dead_code)] pub(crate) fn new(params: PeerScoreParams) -> Self { Self::new_with_message_delivery_time_callback(params, None) @@ -221,8 +238,8 @@ impl PeerScore { self.metric_score(peer_id, None) } - /// Returns the score for a peer, logging metrics. This is called from the heartbeat and - /// increments the metric counts for penalties. + /// Returns the score for a peer, logging metrics. This is called from the + /// heartbeat and increments the metric counts for penalties. pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 { let Some(peer_stats) = self.peer_stats.get(peer_id) else { return 0.0; @@ -285,12 +302,14 @@ impl PeerScore { } // P3b: - // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), + // so this detracts. let p3b = topic_stats.mesh_failure_penalty; topic_score += p3b * topic_params.mesh_failure_penalty_weight; // P4: invalid messages - // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts. + // NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), + // so this detracts. let p4 = topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries; topic_score += p4 * topic_params.invalid_message_deliveries_weight; @@ -391,9 +410,10 @@ impl PeerScore { } // we don't decay retained scores, as the peer is not active. - // this way the peer cannot reset a negative score by simply disconnecting and reconnecting, - // unless the retention period has elapsed. - // similarly, a well behaved peer does not lose its score by getting disconnected. + // this way the peer cannot reset a negative score by simply disconnecting and + // reconnecting, unless the retention period has elapsed. + // similarly, a well behaved peer does not lose its score by getting + // disconnected. return true; } @@ -450,8 +470,8 @@ impl PeerScore { }); } - /// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later - /// through add_ip. + /// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips + /// get added later through add_ip. pub(crate) fn add_peer(&mut self, peer_id: PeerId) { let peer_stats = self.peer_stats.entry(peer_id).or_default(); @@ -459,13 +479,14 @@ impl PeerScore { peer_stats.status = ConnectionStatus::Connected; } - /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it + /// Adds a new ip to a peer, if the peer is not yet known creates a new + /// peer_stats entry for it pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); let peer_stats = self.peer_stats.entry(*peer_id).or_default(); - // Mark the peer as connected (currently the default is connected, but we don't want to - // rely on the default). + // Mark the peer as connected (currently the default is connected, but we don't + // want to rely on the default). peer_stats.status = ConnectionStatus::Connected; // Insert the ip @@ -504,8 +525,8 @@ impl PeerScore { } } - /// Removes a peer from the score table. This retains peer statistics if their score is - /// non-positive. + /// Removes a peer from the score table. This retains peer statistics if + /// their score is non-positive. pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { // we only retain non-positive scores of peers if self.score(peer_id) > 0f64 { @@ -516,8 +537,8 @@ impl PeerScore { return; } - // if the peer is retained (including it's score) the `first_message_delivery` counters - // are reset to 0 and mesh delivery penalties applied. + // if the peer is retained (including it's score) the `first_message_delivery` + // counters are reset to 0 and mesh delivery penalties applied. if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { for (topic, topic_stats) in peer_stats.topics.iter_mut() { topic_stats.first_message_deliveries = 0f64; @@ -627,18 +648,20 @@ impl PeerScore { return; } - // mark the message as valid and reward mesh peers that have already forwarded it to us + // mark the message as valid and reward mesh peers that have already forwarded + // it to us record.status = DeliveryStatus::Valid(Instant::now()); for peer in record.peers.iter().cloned().collect::>() { - // this check is to make sure a peer can't send us a message twice and get a double - // count if it is a first delivery + // this check is to make sure a peer can't send us a message twice and get a + // double count if it is a first delivery if &peer != from { self.mark_duplicate_message_delivery(&peer, topic_hash, None); } } } - /// Similar to `reject_message` except does not require the message id or reason for an invalid message. + /// Similar to `reject_message` except does not require the message id or + /// reason for an invalid message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { tracing::debug!( peer=%from, @@ -672,21 +695,22 @@ impl PeerScore { let peers: Vec<_> = { let record = self.deliveries.entry(msg_id.clone()).or_default(); - // Multiple peers can now reject the same message as we track which peers send us the - // message. If we have already updated the status, return. + // Multiple peers can now reject the same message as we track which peers send + // us the message. If we have already updated the status, return. if record.status != DeliveryStatus::Unknown { return; } if let RejectReason::ValidationIgnored = reason { - // we were explicitly instructed by the validator to ignore the message but not penalize - // the peer + // we were explicitly instructed by the validator to ignore the message but not + // penalize the peer record.status = DeliveryStatus::Ignored; record.peers.clear(); return; } - // mark the message as invalid and penalize peers that have already forwarded it. + // mark the message as invalid and penalize peers that have already forwarded + // it. record.status = DeliveryStatus::Invalid; // release the delivery time tracking map to free some memory early record.peers.drain().collect() @@ -744,13 +768,15 @@ impl PeerScore { self.mark_invalid_message_delivery(from, topic_hash); } DeliveryStatus::Ignored => { - // the message was ignored; do nothing (we don't know if it was valid) + // the message was ignored; do nothing (we don't know if it was + // valid) } } } - /// Sets the application specific score for a peer. Returns true if the peer is the peer is - /// connected or if the score of the peer is not yet expired and false otherwise. + /// Sets the application specific score for a peer. Returns true if the peer + /// is the peer is connected or if the score of the peer is not yet + /// expired and false otherwise. pub(crate) fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { peer_stats.application_score = new_score; @@ -800,8 +826,8 @@ impl PeerScore { self.params.topics.get(topic_hash) } - /// Increments the "invalid message deliveries" counter for all scored topics the message - /// is published in. + /// Increments the "invalid message deliveries" counter for all scored + /// topics the message is published in. fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { if let Some(topic_stats) = @@ -818,9 +844,9 @@ impl PeerScore { } } - /// Increments the "first message deliveries" counter for all scored topics the message is - /// published in, as well as the "mesh message deliveries" counter, if the peer is in the - /// mesh for the topic. + /// Increments the "first message deliveries" counter for all scored topics + /// the message is published in, as well as the "mesh message + /// deliveries" counter, if the peer is in the mesh for the topic. fn mark_first_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { if let Some(topic_stats) = @@ -858,8 +884,8 @@ impl PeerScore { } } - /// Increments the "mesh message deliveries" counter for messages we've seen before, as long the - /// message was received within the P3 window. + /// Increments the "mesh message deliveries" counter for messages we've seen + /// before, as long the message was received within the P3 window. fn mark_duplicate_message_delivery( &mut self, peer_id: &PeerId, @@ -882,13 +908,14 @@ impl PeerScore { .get(topic_hash) .expect("Topic must exist if there are known topic_stats"); - // check against the mesh delivery window -- if the validated time is passed as 0, then - // the message was received before we finished validation and thus falls within the mesh + // check against the mesh delivery window -- if the validated time is passed as + // 0, then the message was received before we finished + // validation and thus falls within the mesh // delivery window. let mut falls_in_mesh_deliver_window = true; if let Some(validated_time) = validated_time { if let Some(now) = &now { - //should always be true + // should always be true let window_time = validated_time .checked_add(topic_params.mesh_message_deliveries_window) .unwrap_or(*now); diff --git a/protocols/gossipsub/src/peer_score/params.rs b/protocols/gossipsub/src/peer_score/params.rs index ae70991f7fb..8b6690a770e 100644 --- a/protocols/gossipsub/src/peer_score/params.rs +++ b/protocols/gossipsub/src/peer_score/params.rs @@ -18,18 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + net::IpAddr, + time::Duration, +}; + use crate::TopicHash; -use std::collections::{HashMap, HashSet}; -use std::net::IpAddr; -use std::time::Duration; /// The default number of seconds for a decay interval. const DEFAULT_DECAY_INTERVAL: u64 = 1; /// The default rate to decay to 0. const DEFAULT_DECAY_TO_ZERO: f64 = 0.1; -/// Computes the decay factor for a parameter, assuming the `decay_interval` is 1s -/// and that the value decays to zero if it drops below 0.01. +/// Computes the decay factor for a parameter, assuming the `decay_interval` is +/// 1s and that the value decays to zero if it drops below 0.01. pub fn score_parameter_decay(decay: Duration) -> f64 { score_parameter_decay_with_base( decay, @@ -38,7 +41,8 @@ pub fn score_parameter_decay(decay: Duration) -> f64 { ) } -/// Computes the decay factor for a parameter using base as the `decay_interval`. +/// Computes the decay factor for a parameter using base as the +/// `decay_interval`. pub fn score_parameter_decay_with_base(decay: Duration, base: Duration, decay_to_zero: f64) -> f64 { // the decay is linear, so after n ticks the value is factor^n // so factor^n = decay_to_zero => factor = decay_to_zero^(1/n) @@ -53,16 +57,18 @@ pub struct PeerScoreThresholds { pub gossip_threshold: f64, /// The score threshold below which we shouldn't publish when using flood - /// publishing (also applies to fanout peers); should be negative and <= `gossip_threshold`. + /// publishing (also applies to fanout peers); should be negative and <= + /// `gossip_threshold`. pub publish_threshold: f64, - /// The score threshold below which message processing is suppressed altogether, - /// implementing an effective graylist according to peer score; should be negative and - /// <= `publish_threshold`. + /// The score threshold below which message processing is suppressed + /// altogether, implementing an effective graylist according to peer + /// score; should be negative and <= `publish_threshold`. pub graylist_threshold: f64, - /// The score threshold below which px will be ignored; this should be positive - /// and limited to scores attainable by bootstrappers and other trusted nodes. + /// The score threshold below which px will be ignored; this should be + /// positive and limited to scores attainable by bootstrappers and other + /// trusted nodes. pub accept_px_threshold: f64, /// The median mesh score threshold before triggering opportunistic @@ -108,33 +114,38 @@ pub struct PeerScoreParams { /// Score parameters per topic. pub topics: HashMap, - /// Aggregate topic score cap; this limits the total contribution of topics towards a positive - /// score. It must be positive (or 0 for no cap). + /// Aggregate topic score cap; this limits the total contribution of topics + /// towards a positive score. It must be positive (or 0 for no cap). pub topic_score_cap: f64, /// P5: Application-specific peer scoring pub app_specific_weight: f64, /// P6: IP-colocation factor. - /// The parameter has an associated counter which counts the number of peers with the same IP. - /// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value - /// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`. - /// If the number of peers in the same IP is less than the threshold, then the value is 0. - /// The weight of the parameter MUST be negative, unless you want to disable for testing. - /// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0 - /// thus disabling the IP colocation penalty. + /// The parameter has an associated counter which counts the number of + /// peers with the same IP. If the number of peers in the same IP + /// exceeds `ip_colocation_factor_threshold, then the value + /// is the square of the difference, ie `(peers_in_same_ip - + /// ip_colocation_threshold)^2`. If the number of peers in the same IP + /// is less than the threshold, then the value is 0. The weight of the + /// parameter MUST be negative, unless you want to disable for testing. + /// Note: In order to simulate many IPs in a manageable manner when + /// testing, you can set the weight to 0 thus disabling the IP + /// colocation penalty. pub ip_colocation_factor_weight: f64, pub ip_colocation_factor_threshold: f64, pub ip_colocation_factor_whitelist: HashSet, /// P7: behavioural pattern penalties. - /// This parameter has an associated counter which tracks misbehaviour as detected by the - /// router. The router currently applies penalties for the following behaviors: + /// This parameter has an associated counter which tracks misbehaviour as + /// detected by the router. The router currently applies penalties for + /// the following behaviors: /// - attempting to re-graft before the prune backoff time has elapsed. - /// - not following up in IWANT requests for messages advertised with IHAVE. + /// - not following up in IWANT requests for messages advertised with + /// IHAVE. /// - /// The value of the parameter is the square of the counter over the threshold, which decays - /// with BehaviourPenaltyDecay. + /// The value of the parameter is the square of the counter over the + /// threshold, which decays with BehaviourPenaltyDecay. /// The weight of the parameter MUST be negative (or zero to disable). pub behaviour_penalty_weight: f64, pub behaviour_penalty_threshold: f64, @@ -150,8 +161,9 @@ pub struct PeerScoreParams { pub retain_score: Duration, /// Slow peer penalty conditions, - /// by default `slow_peer_weight` is 50 times lower than `behaviour_penalty_weight` - /// i.e. 50 slow peer penalties match 1 behaviour penalty. + /// by default `slow_peer_weight` is 50 times lower than + /// `behaviour_penalty_weight` i.e. 50 slow peer penalties match 1 + /// behaviour penalty. pub slow_peer_weight: f64, pub slow_peer_threshold: f64, pub slow_peer_decay: f64, @@ -227,7 +239,8 @@ impl PeerScoreParams { return Err("Invalid decay_to_zero; must be between 0 and 1".into()); } - // no need to check the score retention; a value of 0 means that we don't retain scores + // no need to check the score retention; a value of 0 means that we don't retain + // scores Ok(()) } } @@ -239,15 +252,17 @@ pub struct TopicScoreParams { /// P1: time in the mesh /// This is the time the peer has been grafted in the mesh. - /// The value of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap` - /// The weight of the parameter must be positive (or zero to disable). + /// The value of the parameter is the `time/time_in_mesh_quantum`, capped + /// by `time_in_mesh_cap` The weight of the parameter must be positive + /// (or zero to disable). pub time_in_mesh_weight: f64, pub time_in_mesh_quantum: Duration, pub time_in_mesh_cap: f64, /// P2: first message deliveries /// This is the number of message deliveries in the topic. - /// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped + /// The value of the parameter is a counter, decaying with + /// `first_message_deliveries_decay`, and capped /// by `first_message_deliveries_cap`. /// The weight of the parameter MUST be positive (or zero to disable). pub first_message_deliveries_weight: f64, @@ -256,18 +271,21 @@ pub struct TopicScoreParams { /// P3: mesh message deliveries /// This is the number of message deliveries in the mesh, within the - /// `mesh_message_deliveries_window` of message validation; deliveries during validation also - /// count and are retroactively applied when validation succeeds. - /// This window accounts for the minimum time before a hostile mesh peer trying to game the - /// score could replay back a valid message we just sent them. - /// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer + /// `mesh_message_deliveries_window` of message validation; deliveries + /// during validation also count and are retroactively applied when + /// validation succeeds. This window accounts for the minimum time + /// before a hostile mesh peer trying to game the score could replay + /// back a valid message we just sent them. It effectively tracks first + /// and near-first deliveries, ie a message seen from a mesh peer /// before we have forwarded it to them. - /// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`. - /// If the counter exceeds the threshold, its value is 0. - /// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of - /// the deficit, ie (`message_deliveries_threshold - counter)^2` - /// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh. - /// The weight of the parameter MUST be negative (or zero to disable). + /// The parameter has an associated counter, decaying with + /// `mesh_message_deliveries_decay`. If the counter exceeds the + /// threshold, its value is 0. If the counter is below the + /// `mesh_message_deliveries_threshold`, the value is the square of the + /// deficit, ie (`message_deliveries_threshold - counter)^2` + /// The penalty is only activated after + /// `mesh_message_deliveries_activation` time in the mesh. The weight + /// of the parameter MUST be negative (or zero to disable). pub mesh_message_deliveries_weight: f64, pub mesh_message_deliveries_decay: f64, pub mesh_message_deliveries_cap: f64, @@ -276,8 +294,8 @@ pub struct TopicScoreParams { pub mesh_message_deliveries_activation: Duration, /// P3b: sticky mesh propagation failures - /// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active - /// mesh message delivery penalty. + /// This is a sticky penalty that applies when a peer gets pruned from the + /// mesh with an active mesh message delivery penalty. /// The weight of the parameter MUST be negative (or zero to disable) pub mesh_failure_penalty_weight: f64, pub mesh_failure_penalty_decay: f64, diff --git a/protocols/gossipsub/src/peer_score/tests.rs b/protocols/gossipsub/src/peer_score/tests.rs index 064e277eed7..7cfe70d1056 100644 --- a/protocols/gossipsub/src/peer_score/tests.rs +++ b/protocols/gossipsub/src/peer_score/tests.rs @@ -20,9 +20,7 @@ /// A collection of unit tests mostly ported from the go implementation. use super::*; - -use crate::types::RawMessage; -use crate::{IdentTopic as Topic, Message}; +use crate::{types::RawMessage, IdentTopic as Topic, Message}; // estimates a value within variance fn within_variance(value: f64, expected: f64, variance: f64) -> bool { @@ -325,8 +323,8 @@ fn test_score_mesh_message_deliveries() { // peer A always delivers the message first. // peer B delivers next (within the delivery window). // peer C delivers outside the delivery window. - // we expect peers A and B to have a score of zero, since all other parameter weights are zero. - // Peer C should have a negative score. + // we expect peers A and B to have a score of zero, since all other parameter + // weights are zero. Peer C should have a negative score. let peer_id_a = PeerId::random(); let peer_id_b = PeerId::random(); let peer_id_c = PeerId::random(); @@ -338,7 +336,8 @@ fn test_score_mesh_message_deliveries() { peer_score.graft(peer_id, topic.clone()); } - // assert that nobody has been penalized yet for not delivering messages before activation time + // assert that nobody has been penalized yet for not delivering messages before + // activation time peer_score.refresh_scores(); for peer_id in &peers { let score = peer_score.score(peer_id); @@ -351,8 +350,8 @@ fn test_score_mesh_message_deliveries() { // wait for the activation time to kick in std::thread::sleep(topic_params.mesh_message_deliveries_activation); - // deliver a bunch of messages from peer A, with duplicates within the window from peer B, - // and duplicates outside the window from peer C. + // deliver a bunch of messages from peer A, with duplicates within the window + // from peer B, and duplicates outside the window from peer C. let messages = 100; let mut messages_to_send = Vec::new(); for seq in 0..messages { @@ -384,8 +383,9 @@ fn test_score_mesh_message_deliveries() { "expected non-negative score for Peer B, got score {score_b}" ); - // the penalty is the difference between the threshold and the actual mesh deliveries, squared. - // since we didn't deliver anything, this is just the value of the threshold + // the penalty is the difference between the threshold and the actual mesh + // deliveries, squared. since we didn't deliver anything, this is just the + // value of the threshold let penalty = topic_params.mesh_message_deliveries_threshold * topic_params.mesh_message_deliveries_threshold; let expected = @@ -431,7 +431,8 @@ fn test_score_mesh_message_deliveries_decay() { peer_score.deliver_message(&peer_id_a, &id, &msg.topic); } - // we should have a positive score, since we delivered more messages than the threshold + // we should have a positive score, since we delivered more messages than the + // threshold peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); @@ -447,7 +448,8 @@ fn test_score_mesh_message_deliveries_decay() { } let score_a = peer_score.score(&peer_id_a); - // the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared. + // the penalty is the difference between the threshold and the (decayed) mesh + // deliveries, squared. let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count; let penalty = deficit * deficit; let expected = @@ -506,7 +508,8 @@ fn test_score_mesh_failure_penalty() { peer_score.deliver_message(&peer_id_a, &id, &msg.topic); } - // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet + // peers A and B should both have zero scores, since the failure penalty hasn't + // been applied yet peer_score.refresh_scores(); let score_a = peer_score.score(&peer_id_a); let score_b = peer_score.score(&peer_id_b); @@ -526,8 +529,8 @@ fn test_score_mesh_failure_penalty() { assert_eq!(score_a, 0.0, "expected Peer A to have a 0"); - // penalty calculation is the same as for mesh_message_deliveries, but multiplied by - // mesh_failure_penalty_weigh + // penalty calculation is the same as for mesh_message_deliveries, but + // multiplied by mesh_failure_penalty_weigh // instead of mesh_message_deliveries_weight let penalty = topic_params.mesh_message_deliveries_threshold * topic_params.mesh_message_deliveries_threshold; @@ -693,8 +696,8 @@ fn test_score_reject_message_deliveries() { // insert a record in the message deliveries peer_score.validate_message(&peer_id_a, &id, &msg.topic); - // this should have no effect in the score, and subsequent duplicate messages should have no - // effect either + // this should have no effect in the score, and subsequent duplicate messages + // should have no effect either peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); @@ -711,8 +714,8 @@ fn test_score_reject_message_deliveries() { // insert a record in the message deliveries peer_score.validate_message(&peer_id_a, &id, &msg.topic); - // this should have no effect in the score, and subsequent duplicate messages should have no - // effect either + // this should have no effect in the score, and subsequent duplicate messages + // should have no effect either peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored); peer_score.duplicated_message(&peer_id_b, &id, &msg.topic); @@ -837,7 +840,8 @@ fn test_score_ip_colocation() { peer_score.graft(peer_id, topic.clone()); } - // peerA should have no penalty, but B, C, and D should be penalized for sharing an IP + // peerA should have no penalty, but B, C, and D should be penalized for sharing + // an IP peer_score.add_ip(&peer_id_a, "1.2.3.4".parse().unwrap()); peer_score.add_ip(&peer_id_b, "2.3.4.5".parse().unwrap()); peer_score.add_ip(&peer_id_c, "2.3.4.5".parse().unwrap()); diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 8d33fe51a90..76f9fd34396 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -18,15 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::ValidationMode; -use crate::handler::HandlerEvent; -use crate::rpc_proto::proto; -use crate::topic::TopicHash; -use crate::types::{ - ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc, - Subscription, SubscriptionAction, -}; -use crate::ValidationError; +use std::{convert::Infallible, pin::Pin}; + use asynchronous_codec::{Decoder, Encoder, Framed}; use byteorder::{BigEndian, ByteOrder}; use bytes::BytesMut; @@ -35,8 +28,28 @@ use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::Writer; -use std::convert::Infallible; -use std::pin::Pin; + +use crate::{ + config::ValidationMode, + handler::HandlerEvent, + rpc_proto::proto, + topic::TopicHash, + types::{ + ControlAction, + Graft, + IHave, + IWant, + MessageId, + PeerInfo, + PeerKind, + Prune, + RawMessage, + Rpc, + Subscription, + SubscriptionAction, + }, + ValidationError, +}; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; @@ -53,7 +66,8 @@ pub(crate) const FLOODSUB_PROTOCOL: ProtocolId = ProtocolId { kind: PeerKind::Floodsub, }; -/// Implementation of [`InboundUpgrade`] and [`OutboundUpgrade`] for the Gossipsub protocol. +/// Implementation of [`InboundUpgrade`] and [`OutboundUpgrade`] for the +/// Gossipsub protocol. #[derive(Debug, Clone)] pub struct ProtocolConfig { /// The Gossipsub protocol id to listen on. @@ -136,7 +150,7 @@ where } } -/* Gossip codec for the framing */ +// Gossip codec for the framing pub struct GossipsubCodec { /// Determines the level of validation performed on incoming messages. @@ -154,9 +168,10 @@ impl GossipsubCodec { } } - /// Verifies a gossipsub message. This returns either a success or failure. All errors - /// are logged, which prevents error handling in the codec and handler. We simply drop invalid - /// messages and log warnings, rather than propagating errors through the codec. + /// Verifies a gossipsub message. This returns either a success or failure. + /// All errors are logged, which prevents error handling in the codec + /// and handler. We simply drop invalid messages and log warnings, + /// rather than propagating errors through the codec. fn verify_signature(message: &proto::Message) -> bool { use quick_protobuf::MessageWrite; @@ -175,8 +190,8 @@ impl GossipsubCodec { return false; }; - // If there is a key value in the protobuf, use that key otherwise the key must be - // obtained from the inlined source peer_id. + // If there is a key value in the protobuf, use that key otherwise the key must + // be obtained from the inlined source peer_id. let public_key = match message.key.as_deref().map(PublicKey::try_decode_protobuf) { Some(Ok(key)) => key, _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { @@ -271,15 +286,18 @@ impl Decoder for GossipsubCodec { ); invalid_kind = Some(ValidationError::SequenceNumberPresent); } else if message.from.is_some() { - tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Message dropped. Message source was non-empty and anonymous \ + validation mode is set" + ); invalid_kind = Some(ValidationError::MessageSourcePresent); } } ValidationMode::None => {} } - // If the initial validation logic failed, add the message to invalid messages and - // continue processing the others. + // If the initial validation logic failed, add the message to invalid messages + // and continue processing the others. if let Some(validation_error) = invalid_kind.take() { let message = RawMessage { source: None, // don't bother inform the application @@ -506,13 +524,19 @@ impl Decoder for GossipsubCodec { #[cfg(test)] mod tests { - use super::*; - use crate::config::Config; - use crate::{Behaviour, ConfigBuilder, MessageAuthenticity}; - use crate::{IdentTopic as Topic, Version}; use libp2p_identity::Keypair; use quickcheck::*; + use super::*; + use crate::{ + config::Config, + Behaviour, + ConfigBuilder, + IdentTopic as Topic, + MessageAuthenticity, + Version, + }; + #[derive(Clone, Debug)] struct Message(RawMessage); @@ -520,7 +544,8 @@ mod tests { fn arbitrary(g: &mut Gen) -> Self { let keypair = TestKeypair::arbitrary(g); - // generate an arbitrary GossipsubMessage using the behaviour signing functionality + // generate an arbitrary GossipsubMessage using the behaviour signing + // functionality let config = Config::default(); let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::Signed(keypair.0), config).unwrap(); diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index c90e46a85da..1b3c6e0b6c3 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{stream::Peekable, Stream, StreamExt}; use std::{ future::Future, pin::Pin, @@ -29,6 +28,8 @@ use std::{ task::{Context, Poll}, }; +use futures::{stream::Peekable, Stream, StreamExt}; + use crate::types::RpcOut; /// `RpcOut` sender that is priority aware. @@ -46,8 +47,8 @@ pub(crate) struct Sender { impl Sender { /// Create a RpcSender. pub(crate) fn new(cap: usize) -> Sender { - // We intentionally do not bound the channel, as we still need to send control messages - // such as `GRAFT`, `PRUNE`, `SUBSCRIBE`, and `UNSUBSCRIBE`. + // We intentionally do not bound the channel, as we still need to send control + // messages such as `GRAFT`, `PRUNE`, `SUBSCRIBE`, and `UNSUBSCRIBE`. // That's also why we define `cap` and divide it by two, // to ensure there is capacity for both priority and non_priority messages. let (priority_sender, priority_receiver) = async_channel::unbounded(); @@ -119,7 +120,8 @@ pub struct Receiver { impl Receiver { // Peek the next message in the queues and return it if its timeout has elapsed. - // Returns `None` if there aren't any more messages on the stream or none is stale. + // Returns `None` if there aren't any more messages on the stream or none is + // stale. pub(crate) fn poll_stale(&mut self, cx: &mut Context<'_>) -> Poll> { // Peek priority queue. let priority = match self.priority.as_mut().poll_peek_mut(cx) { diff --git a/protocols/gossipsub/src/rpc_proto.rs b/protocols/gossipsub/src/rpc_proto.rs index 94c7aafbc3e..2f6832a01a1 100644 --- a/protocols/gossipsub/src/rpc_proto.rs +++ b/protocols/gossipsub/src/rpc_proto.rs @@ -26,12 +26,12 @@ pub(crate) mod proto { #[cfg(test)] mod test { - use crate::rpc_proto::proto::compat; - use crate::IdentTopic as Topic; use libp2p_identity::PeerId; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use rand::Rng; + use crate::{rpc_proto::proto::compat, IdentTopic as Topic}; + #[test] fn test_multi_topic_message_compatibility() { let topic1 = Topic::new("t1").hash(); diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 02bb9b4eab6..0be52362fe1 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::types::Subscription; -use crate::TopicHash; use std::collections::{BTreeSet, HashMap, HashSet}; +use crate::{types::Subscription, TopicHash}; + pub trait TopicSubscriptionFilter { /// Returns true iff the topic is of interest and we can subscribe to it. fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool; @@ -55,7 +55,8 @@ pub trait TopicSubscriptionFilter { } /// Filters a set of deduplicated subscriptions - /// By default this filters the elements based on [`Self::allow_incoming_subscription`]. + /// By default this filters the elements based on + /// [`Self::allow_incoming_subscription`]. fn filter_incoming_subscription_set<'a>( &mut self, mut subscriptions: HashSet<&'a Subscription>, @@ -73,16 +74,16 @@ pub trait TopicSubscriptionFilter { } /// Returns true iff we allow an incoming subscription. - /// This is used by the default implementation of filter_incoming_subscription_set to decide - /// whether to filter out a subscription or not. - /// By default this uses can_subscribe to decide the same for incoming subscriptions as for - /// outgoing ones. + /// This is used by the default implementation of + /// filter_incoming_subscription_set to decide whether to filter out a + /// subscription or not. By default this uses can_subscribe to decide + /// the same for incoming subscriptions as for outgoing ones. fn allow_incoming_subscription(&mut self, subscription: &Subscription) -> bool { self.can_subscribe(&subscription.topic_hash) } } -//some useful implementers +// some useful implementers /// Allows all subscriptions #[derive(Default, Clone)] @@ -199,7 +200,7 @@ where } } -///A subscription filter that filters topics based on a regular expression. +/// A subscription filter that filters topics based on a regular expression. pub struct RegexSubscriptionFilter(pub regex::Regex); impl TopicSubscriptionFilter for RegexSubscriptionFilter { diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index a3e5c01ac4c..d6bccc60d3f 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -18,15 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! This implements a time-based LRU cache for checking gossipsub message duplicates. +//! This implements a time-based LRU cache for checking gossipsub message +//! duplicates. + +use std::{ + collections::{ + hash_map::{ + self, + Entry::{Occupied, Vacant}, + }, + VecDeque, + }, + time::Duration, +}; use fnv::FnvHashMap; -use std::collections::hash_map::{ - self, - Entry::{Occupied, Vacant}, -}; -use std::collections::VecDeque; -use std::time::Duration; use web_time::Instant; struct ExpiringElement { @@ -37,8 +43,8 @@ struct ExpiringElement { } pub(crate) struct TimeCache { - /// Mapping a key to its value together with its latest expire time (can be updated through - /// reinserts). + /// Mapping a key to its value together with its latest expire time (can be + /// updated through reinserts). map: FnvHashMap>, /// An ordered list of keys by expires time. list: VecDeque>, @@ -167,8 +173,8 @@ where // Inserts new elements and removes any expired elements. // - // If the key was not present this returns `true`. If the value was already present this - // returns `false`. + // If the key was not present this returns `true`. If the value was already + // present this returns `false`. pub(crate) fn insert(&mut self, key: Key) -> bool { if let Entry::Vacant(entry) = self.0.entry(key) { entry.insert(()); @@ -206,7 +212,7 @@ mod test { cache.insert("t"); assert!(!cache.insert("t")); cache.insert("e"); - //assert!(!cache.insert("t")); + // assert!(!cache.insert("t")); assert!(!cache.insert("e")); // sleep until cache expiry std::thread::sleep(Duration::from_millis(101)); diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index a73496b53f2..36136165c20 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -18,12 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::rpc_proto::proto; +use std::fmt; + use base64::prelude::*; use prometheus_client::encoding::EncodeLabelSet; use quick_protobuf::Writer; use sha2::{Digest, Sha256}; -use std::fmt; + +use crate::rpc_proto::proto; /// A generic trait that can be extended for various hashing types for a topic. pub trait Hasher { @@ -44,8 +46,8 @@ impl Hasher for IdentityHash { #[derive(Debug, Clone)] pub struct Sha256Hash {} impl Hasher for Sha256Hash { - /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the - /// hash. + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding + /// the hash. fn hash(topic_string: String) -> TopicHash { use quick_protobuf::MessageWrite; diff --git a/protocols/gossipsub/src/transform.rs b/protocols/gossipsub/src/transform.rs index 4831f9781b0..b91639e9a83 100644 --- a/protocols/gossipsub/src/transform.rs +++ b/protocols/gossipsub/src/transform.rs @@ -18,30 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! This trait allows of extended user-level decoding that can apply to message-data before a -//! message-id is calculated. +//! This trait allows of extended user-level decoding that can apply to +//! message-data before a message-id is calculated. //! -//! This is primarily designed to allow applications to implement their own custom compression -//! algorithms that can be topic-specific. Once the raw data is transformed the message-id is then -//! calculated, allowing for applications to employ message-id functions post compression. +//! This is primarily designed to allow applications to implement their own +//! custom compression algorithms that can be topic-specific. Once the raw data +//! is transformed the message-id is then calculated, allowing for applications +//! to employ message-id functions post compression. use crate::{Message, RawMessage, TopicHash}; /// A general trait of transforming a [`RawMessage`] into a [`Message`]. The /// /// [`RawMessage`] is obtained from the wire and the [`Message`] is used to -/// calculate the [`crate::MessageId`] of the message and is what is sent to the application. +/// calculate the [`crate::MessageId`] of the message and is what is sent to the +/// application. /// -/// The inbound/outbound transforms must be inverses. Applying the inbound transform and then the -/// outbound transform MUST leave the underlying data un-modified. +/// The inbound/outbound transforms must be inverses. Applying the inbound +/// transform and then the outbound transform MUST leave the underlying data +/// un-modified. /// /// By default, this is the identity transform for all fields in [`Message`]. pub trait DataTransform { /// Takes a [`RawMessage`] received and converts it to a [`Message`]. fn inbound_transform(&self, raw_message: RawMessage) -> Result; - /// Takes the data to be published (a topic and associated data) transforms the data. The - /// transformed data will then be used to create a [`crate::RawMessage`] to be sent to peers. + /// Takes the data to be published (a topic and associated data) transforms + /// the data. The transformed data will then be used to create a + /// [`crate::RawMessage`] to be sent to peers. fn outbound_transform( &self, topic: &TopicHash, @@ -49,7 +53,8 @@ pub trait DataTransform { ) -> Result, std::io::Error>; } -/// The default transform, the raw data is propagated as is to the application layer gossipsub. +/// The default transform, the raw data is propagated as is to the application +/// layer gossipsub. #[derive(Default, Clone)] pub struct IdentityTransform; diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index bb1916fefd0..0101a2e0801 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,30 +19,32 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. -use crate::rpc::Sender; -use crate::TopicHash; +use std::{collections::BTreeSet, fmt, fmt::Debug}; + use futures_timer::Delay; use libp2p_identity::PeerId; use libp2p_swarm::ConnectionId; use prometheus_client::encoding::EncodeLabelValue; use quick_protobuf::MessageWrite; -use std::fmt::Debug; -use std::{collections::BTreeSet, fmt}; - -use crate::rpc_proto::proto; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; + /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default)] pub struct FailedMessages { - /// The number of publish messages that failed to be published in a heartbeat. + /// The number of publish messages that failed to be published in a + /// heartbeat. pub publish: usize, - /// The number of forward messages that failed to be published in a heartbeat. + /// The number of forward messages that failed to be published in a + /// heartbeat. pub forward: usize, - /// The number of messages that were failed to be sent to the priority queue as it was full. + /// The number of messages that were failed to be sent to the priority queue + /// as it was full. pub priority: usize, - /// The number of messages that were failed to be sent to the non-priority queue as it was full. + /// The number of messages that were failed to be sent to the non-priority + /// queue as it was full. pub non_priority: usize, /// The number of messages that timed out and could not be sent. pub timeout: usize, @@ -63,12 +65,14 @@ impl FailedMessages { #[derive(Debug)] /// Validation kinds from the application for received messages. pub enum MessageAcceptance { - /// The message is considered valid, and it should be delivered and forwarded to the network. + /// The message is considered valid, and it should be delivered and + /// forwarded to the network. Accept, - /// The message is considered invalid, and it should be rejected and trigger the P₄ penalty. + /// The message is considered invalid, and it should be rejected and trigger + /// the P₄ penalty. Reject, - /// The message is neither delivered nor forwarded to the network, but the router does not - /// trigger the P₄ penalty. + /// The message is neither delivered nor forwarded to the network, but the + /// router does not trigger the P₄ penalty. Ignore, } @@ -143,15 +147,18 @@ pub struct RawMessage { /// The signature of the message if it's signed. pub signature: Option>, - /// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined. + /// The public key of the message if it is signed and the source [`PeerId`] + /// cannot be inlined. pub key: Option>, - /// Flag indicating if this message has been validated by the application or not. + /// Flag indicating if this message has been validated by the application or + /// not. pub validated: bool, } impl RawMessage { - /// Calculates the encoded length of this message (used for calculating metrics). + /// Calculates the encoded length of this message (used for calculating + /// metrics). pub fn raw_protobuf_len(&self) -> usize { let message = proto::Message { from: self.source.map(|m| m.to_bytes()), @@ -178,8 +185,8 @@ impl From for proto::Message { } } -/// The message sent to the user after a [`RawMessage`] has been transformed by a -/// [`crate::DataTransform`]. +/// The message sent to the user after a [`RawMessage`] has been transformed by +/// a [`crate::DataTransform`]. #[derive(Clone, PartialEq, Eq, Hash)] pub struct Message { /// Id of the peer that published this message. @@ -230,9 +237,9 @@ pub enum SubscriptionAction { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(crate) struct PeerInfo { pub(crate) peer_id: Option, - //TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // TODO add this when RFC: Signed Address Records got added to the spec (see pull request // https://github.com/libp2p/specs/pull/217) - //pub signed_peer_record: ?, + // pub signed_peer_record: ?, } /// A Control message received by the gossipsub system. @@ -240,7 +247,8 @@ pub(crate) struct PeerInfo { pub enum ControlAction { /// Node broadcasts known messages per topic - IHave control message. IHave(IHave), - /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. + /// The node requests specific message ids (peer_id + sequence _number) - + /// IWant control message. IWant(IWant), /// The node has been added to the mesh - Graft control message. Graft(Graft), @@ -257,7 +265,8 @@ pub struct IHave { pub(crate) message_ids: Vec, } -/// The node requests specific message ids (peer_id + sequence _number) - IWant control message. +/// The node requests specific message ids (peer_id + sequence _number) - IWant +/// control message. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct IWant { /// A list of known message ids (peer_id + sequence _number) as a string. @@ -285,11 +294,11 @@ pub struct Prune { /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { - /// Publish a Gossipsub message on network.`timeout` limits the duration the message - /// can wait to be sent before it is abandoned. + /// Publish a Gossipsub message on network.`timeout` limits the duration the + /// message can wait to be sent before it is abandoned. Publish { message: RawMessage, timeout: Delay }, - /// Forward a Gossipsub message on network. `timeout` limits the duration the message - /// can wait to be sent before it is abandoned. + /// Forward a Gossipsub message on network. `timeout` limits the duration + /// the message can wait to be sent before it is abandoned. Forward { message: RawMessage, timeout: Delay }, /// Subscribe a topic. Subscribe(TopicHash), diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index 3b6261afa54..04c8dbca9c5 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -18,15 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::{FuturesUnordered, SelectAll}; -use futures::StreamExt; +use std::{task::Poll, time::Duration}; + +use futures::{ + stream::{FuturesUnordered, SelectAll}, + StreamExt, +}; use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; -use std::{task::Poll, time::Duration}; use tokio::{runtime::Runtime, time}; use tracing_subscriber::EnvFilter; @@ -65,8 +68,8 @@ impl Graph { } } - /// Polls the graph and passes each event into the provided FnMut until the closure returns - /// `true`. + /// Polls the graph and passes each event into the provided FnMut until the + /// closure returns `true`. /// /// Returns [`true`] on success and [`false`] on timeout. async fn wait_for bool>(&mut self, mut f: F) -> bool { @@ -91,7 +94,8 @@ impl Graph { } } - /// Polls the graph until Poll::Pending is obtained, completing the underlying polls. + /// Polls the graph until Poll::Pending is obtained, completing the + /// underlying polls. async fn drain_events(&mut self) { let fut = futures::future::poll_fn(|cx| loop { match self.nodes.poll_next_unpin(cx) { @@ -104,10 +108,10 @@ impl Graph { } async fn build_node() -> Swarm { - // NOTE: The graph of created nodes can be disconnected from the mesh point of view as nodes - // can reach their d_lo value and not add other nodes to their mesh. To speed up this test, we - // reduce the default values of the heartbeat, so that all nodes will receive gossip in a - // timely fashion. + // NOTE: The graph of created nodes can be disconnected from the mesh point of + // view as nodes can reach their d_lo value and not add other nodes to their + // mesh. To speed up this test, we reduce the default values of the + // heartbeat, so that all nodes will receive gossip in a timely fashion. let mut swarm = Swarm::new_ephemeral(|identity| { let peer_id = identity.public().to_peer_id(); @@ -170,12 +174,14 @@ fn multi_hop_propagation() { if !all_subscribed { return TestResult::error(format!( - "Timed out waiting for all nodes to subscribe but only have {subscribed:?}/{num_nodes:?}.", + "Timed out waiting for all nodes to subscribe but only have \ + {subscribed:?}/{num_nodes:?}.", )); } - // It can happen that the publish occurs before all grafts have completed causing this test - // to fail. We drain all the poll messages before publishing. + // It can happen that the publish occurs before all grafts have completed + // causing this test to fail. We drain all the poll messages before + // publishing. graph.drain_events().await; // Publish a single message. @@ -205,7 +211,8 @@ fn multi_hop_propagation() { if !all_received { return TestResult::error(format!( - "Timed out waiting for all nodes to receive the msg but only have {received_msgs:?}/{num_nodes:?}.", + "Timed out waiting for all nodes to receive the msg but only have \ + {received_msgs:?}/{num_nodes:?}.", )); } diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index b69f2014d81..98879d981af 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -18,28 +18,43 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{self, Handler, InEvent}; -use crate::protocol::{Info, UpgradeError}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr, ConnectedPoint, Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + num::NonZeroUsize, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::{ + multiaddr, + multiaddr::Protocol, + transport::PortUse, + ConnectedPoint, + Endpoint, + Multiaddr, +}; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ - ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, - NotifyHandler, PeerAddresses, StreamUpgradeError, THandlerInEvent, ToSwarm, + behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + ConnectionDenied, + ConnectionId, + DialError, + ExternalAddresses, + ListenAddresses, + NetworkBehaviour, + NotifyHandler, + PeerAddresses, + StreamUpgradeError, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, _address_translation, }; -use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; -use std::collections::hash_map::Entry; -use std::num::NonZeroUsize; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::Context, - task::Poll, - time::Duration, +use crate::{ + handler::{self, Handler, InEvent}, + protocol::{Info, UpgradeError}, }; /// Whether an [`Multiaddr`] is a valid for the QUIC transport. @@ -86,20 +101,22 @@ fn is_tcp_addr(addr: &Multiaddr) -> bool { matches!(first, Ip4(_) | Ip6(_) | Dns(_) | Dns4(_) | Dns6(_)) && matches!(second, Tcp(_)) } -/// Network behaviour that automatically identifies nodes periodically, returns information -/// about them, and answers identify queries from other nodes. +/// Network behaviour that automatically identifies nodes periodically, returns +/// information about them, and answers identify queries from other nodes. /// /// All external addresses of the local node supposedly observed by remotes /// are reported via [`ToSwarm::NewExternalAddrCandidate`]. pub struct Behaviour { config: Config, - /// For each peer we're connected to, the observed address to send back to it. + /// For each peer we're connected to, the observed address to send back to + /// it. connected: HashMap>, /// The address a remote observed for us. our_observed_addresses: HashMap, - /// The outbound connections established without port reuse (require translation) + /// The outbound connections established without port reuse (require + /// translation) outbound_connections_with_ephemeral_port: HashSet, /// Pending events to be emitted when polled. @@ -191,7 +208,8 @@ impl Config { self } - /// Configures the size of the LRU cache, caching addresses of discovered peers. + /// Configures the size of the LRU cache, caching addresses of discovered + /// peers. pub fn with_cache_size(mut self, cache_size: usize) -> Self { self.cache_size = cache_size; self @@ -259,7 +277,8 @@ impl Behaviour { } } - /// Initiates an active push of the local peer information to the given peers. + /// Initiates an active push of the local peer information to the given + /// peers. pub fn push(&mut self, peers: I) where I: IntoIterator, @@ -323,7 +342,8 @@ impl Behaviour { .contains(&connection_id) { // Apply address translation to the candidate address. - // For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address. + // For TCP without port-reuse, the observed address contains an ephemeral port + // which needs to be replaced by the port of a listen address. let translated_addresses = { let mut addrs: Vec<_> = self .listen_addresses @@ -346,7 +366,8 @@ impl Behaviour { addrs }; - // If address translation yielded nothing, broadcast the original candidate address. + // If address translation yielded nothing, broadcast the original candidate + // address. if translated_addresses.is_empty() { self.events .push_back(ToSwarm::NewExternalAddrCandidate(observed.clone())); @@ -398,7 +419,8 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { // Contrary to inbound events, outbound events are full-p2p qualified // so we remove /p2p/ in order to be homogeneous - // this will avoid Autonatv2 to probe twice the same address (fully-p2p-qualified + not fully-p2p-qualified) + // this will avoid Autonatv2 to probe twice the same address + // (fully-p2p-qualified + not fully-p2p-qualified) let mut addr = addr.clone(); if matches!(addr.iter().last(), Some(multiaddr::Protocol::P2p(_))) { addr.pop(); @@ -415,7 +437,8 @@ impl NetworkBehaviour for Behaviour { self.config.local_public_key.clone(), self.config.protocol_version.clone(), self.config.agent_version.clone(), - addr.clone(), // TODO: This is weird? That is the public address we dialed, shouldn't need to tell the other party? + addr.clone(), /* TODO: This is weird? That is the public address we dialed, + * shouldn't need to tell the other party? */ self.all_addresses(), )) } @@ -638,7 +661,8 @@ impl Event { } /// If there is a given peer_id in the multiaddr, make sure it is the same as -/// the given peer_id. If there is no peer_id for the peer in the mutiaddr, this returns true. +/// the given peer_id. If there is no peer_id for the peer in the mutiaddr, this +/// returns true. fn multiaddr_matches_peer_id(addr: &Multiaddr, peer_id: &PeerId) -> bool { let last_component = addr.iter().last(); if let Some(multiaddr::Protocol::P2p(multi_addr_peer_id)) = last_component { diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index dd073d50ed6..404064d5d22 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -18,29 +18,46 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{Info, PushInfo, UpgradeError}; -use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +use std::{ + collections::HashSet, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; use futures::prelude::*; use futures_bounded::Timeout; use futures_timer::Delay; -use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ProtocolSupport, +use libp2p_core::{ + upgrade::{ReadyUpgrade, SelectUpgrade}, + Multiaddr, }; +use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, SupportedProtocols, + handler::{ + ConnectionEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ProtocolSupport, + }, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + StreamUpgradeError, + SubstreamProtocol, + SupportedProtocols, }; use smallvec::SmallVec; -use std::collections::HashSet; -use std::{task::Context, task::Poll, time::Duration}; use tracing::Level; +use crate::{ + protocol, + protocol::{Info, PushInfo, UpgradeError}, + PROTOCOL_NAME, + PUSH_PROTOCOL_NAME, +}; + const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; diff --git a/protocols/identify/src/lib.rs b/protocols/identify/src/lib.rs index 7d28e5b5cc7..76daedc830c 100644 --- a/protocols/identify/src/lib.rs +++ b/protocols/identify/src/lib.rs @@ -28,21 +28,24 @@ //! //! # Important Discrepancies //! -//! - **Using Identify with other protocols** Unlike some other libp2p implementations, -//! rust-libp2p does not treat Identify as a core protocol. This means that other protocols cannot -//! rely upon the existence of Identify, and need to be manually hooked up to Identify in order to -//! make use of its capabilities. +//! - **Using Identify with other protocols** Unlike some other libp2p +//! implementations, rust-libp2p does not treat Identify as a core protocol. +//! This means that other protocols cannot rely upon the existence of +//! Identify, and need to be manually hooked up to Identify in order to make +//! use of its capabilities. //! //! # Usage //! -//! The [`Behaviour`] struct implements a [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) -//! that negotiates and executes the protocol on every established connection, emitting -//! [`Event`]s. +//! The [`Behaviour`] struct implements a +//! [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) that negotiates and +//! executes the protocol on every established connection, emitting [`Event`]s. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::behaviour::{Behaviour, Config, Event}; -pub use self::protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; +pub use self::{ + behaviour::{Behaviour, Config, Event}, + protocol::{Info, UpgradeError, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}, +}; mod behaviour; mod handler; diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index f4dfd544dd1..b6c986b584c 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -18,16 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; +use std::io; + use asynchronous_codec::{FramedRead, FramedWrite}; use futures::prelude::*; use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use std::io; use thiserror::Error; +use crate::proto; + const MAX_MESSAGE_SIZE_BYTES: usize = 4096; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0"); @@ -77,7 +79,8 @@ impl Info { } /// Identify push information of a peer sent in protocol messages. -/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed. +/// Note that missing fields should be ignored, as peers may choose to send +/// partial updates containing only the fields whose values have changed. #[derive(Debug, Clone)] pub struct PushInfo { pub public_key: Option, @@ -264,9 +267,10 @@ pub enum UpgradeError { #[cfg(test)] mod tests { - use super::*; use libp2p_identity as identity; + use super::*; + #[test] fn skip_invalid_multiaddr() { let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap(); diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index d624005408e..edbaadbd05d 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,10 +1,13 @@ +use std::{ + collections::HashSet, + iter, + time::{Duration, Instant}, +}; + use futures::StreamExt; use libp2p_identify as identify; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::collections::HashSet; -use std::iter; -use std::time::{Duration, Instant}; use tracing_subscriber::EnvFilter; #[async_std::test] @@ -34,8 +37,7 @@ async fn periodic_identify() { let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; swarm2.connect(&mut swarm1).await; - use identify::Event::Received; - use identify::Event::Sent; + use identify::Event::{Received, Sent}; match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { ( @@ -67,8 +69,8 @@ async fn periodic_identify() { assert_eq!(s2_info.agent_version, "b"); assert!(!s2_info.protocols.is_empty()); - // Cannot assert observed address of dialer because memory transport uses ephemeral, outgoing ports. - // assert_eq!( + // Cannot assert observed address of dialer because memory transport uses + // ephemeral, outgoing ports. assert_eq!( // s2_info.observed_addr, // swarm2_memory_listen.with(Protocol::P2p(swarm2_peer_id.into())) // ); diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 0b3dc71e649..093dc037455 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt; + use libp2p_core::Multiaddr; use smallvec::SmallVec; -use std::fmt; /// A non-empty list of (unique) addresses of a peer in the routing table. /// Every address must be a fully-qualified /p2p address. @@ -60,9 +61,9 @@ impl Addresses { /// Removes the given address from the list. /// - /// Returns `Ok(())` if the address is either not in the list or was found and - /// removed. Returns `Err(())` if the address is the last remaining address, - /// which cannot be removed. + /// Returns `Ok(())` if the address is either not in the list or was found + /// and removed. Returns `Err(())` if the address is the last remaining + /// address, which cannot be removed. /// /// An address should only be removed if is determined to be invalid or /// otherwise unreachable. @@ -175,7 +176,8 @@ mod tests { ); } - /// Helper function to easily initialize Addresses struct with multiple addresses. + /// Helper function to easily initialize Addresses struct with multiple + /// addresses. fn make_addresses(addresses: impl IntoIterator) -> Addresses { Addresses { addrs: SmallVec::from_iter(addresses), diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index f577971167f..5fc1783d89b 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -22,41 +22,57 @@ mod test; -use crate::addresses::Addresses; -use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; -use crate::kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}; -use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; -use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record::{ - self, - store::{self, RecordStore}, - ProviderRecord, Record, +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + fmt, + num::NonZeroUsize, + task::{Context, Poll, Waker}, + time::Duration, + vec, }; -use crate::{bootstrap, K_VALUE}; -use crate::{jobs::*, protocol}; + use fnv::FnvHashSet; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, -}; use libp2p_swarm::{ + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, dial_opts::{self, DialOpts}, - ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, - ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + DialError, + ExternalAddresses, + ListenAddresses, + NetworkBehaviour, + NotifyHandler, + StreamProtocol, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::fmt; -use std::num::NonZeroUsize; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; -use std::vec; use thiserror::Error; use tracing::Level; use web_time::Instant; pub use crate::query::QueryStats; +use crate::{ + addresses::Addresses, + bootstrap, + handler::{Handler, HandlerEvent, HandlerIn, RequestId}, + jobs::*, + kbucket::{self, Distance, KBucketConfig, KBucketsTable, NodeStatus}, + protocol, + protocol::{ConnectionType, KadPeer, ProtocolConfig}, + query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}, + record::{ + self, + store::{self, RecordStore}, + ProviderRecord, + Record, + }, + K_VALUE, +}; /// `Behaviour` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. @@ -78,7 +94,8 @@ pub struct Behaviour { /// The currently connected peers. /// - /// This is a superset of the connected peers currently in the routing table. + /// This is a superset of the connected peers currently in the routing + /// table. connected_peers: FnvHashSet, /// Periodic job for re-publication of provider records for keys @@ -130,8 +147,8 @@ pub enum BucketInserts { /// in the routing table, it is inserted as long as there /// is a free slot in the corresponding k-bucket. If the /// k-bucket is full but still has a free pending slot, - /// it may be inserted into the routing table at a later time if an unresponsive - /// disconnected peer is evicted from the bucket. + /// it may be inserted into the routing table at a later time if an + /// unresponsive disconnected peer is evicted from the bucket. OnConnected, /// New peers and addresses are only added to the routing table via /// explicit calls to [`Behaviour::add_address`]. @@ -157,13 +174,15 @@ pub enum StoreInserts { /// the record is forwarded immediately to the [`RecordStore`]. Unfiltered, /// Whenever a (provider) record is received, an event is emitted. - /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`], - /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. + /// Provider records generate a [`InboundRequest::AddProvider`] under + /// [`Event::InboundRequest`], normal records generate a + /// [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. /// /// When deemed valid, a (provider) record needs to be explicitly stored in - /// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`], - /// whichever is applicable. A mutable reference to the [`RecordStore`] can - /// be retrieved via [`Behaviour::store_mut`]. + /// the [`RecordStore`] via [`RecordStore::put`] or + /// [`RecordStore::add_provider`], whichever is applicable. A mutable + /// reference to the [`RecordStore`] can be retrieved via + /// [`Behaviour::store_mut`]. FilterBoth, } @@ -204,10 +223,11 @@ pub enum Caching { /// that do not return a record are not tracked, i.e. /// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty. Disabled, - /// Up to `max_peers` peers not returning a record that are closest to the key - /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`]. - /// The write-back operation must be performed explicitly, if - /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`]. + /// Up to `max_peers` peers not returning a record that are closest to the + /// key being looked up are tracked and returned in + /// [`GetRecordOk::FinishedWithNoAdditionalRecord`]. The write-back + /// operation must be performed explicitly, if desired and after + /// choosing a record from the results, via [`Behaviour::put_record_to`]. Enabled { max_peers: u16 }, } @@ -407,15 +427,17 @@ impl Config { /// Sets the [`Caching`] strategy to use for successful lookups. /// /// The default is [`Caching::Enabled`] with a `max_peers` of 1. - /// Hence, with default settings and a lookup quorum of 1, a successful lookup - /// will result in the record being cached at the closest node to the key that - /// did not return the record, i.e. the standard Kademlia behaviour. + /// Hence, with default settings and a lookup quorum of 1, a successful + /// lookup will result in the record being cached at the closest node to + /// the key that did not return the record, i.e. the standard Kademlia + /// behaviour. pub fn set_caching(&mut self, c: Caching) -> &mut Self { self.caching = c; self } - /// Sets the interval on which [`Behaviour::bootstrap`] is called periodically. + /// Sets the interval on which [`Behaviour::bootstrap`] is called + /// periodically. /// /// * Default to `5` minutes. /// * Set to `None` to disable periodic bootstrap. @@ -442,16 +464,19 @@ impl Config { self } - /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a new peer is inserted in the routing table. - /// This prevent cascading bootstrap requests when multiple peers are inserted into the routing table "at the same time". - /// This also allows to wait a little bit for other potential peers to be inserted into the routing table before - /// triggering a bootstrap, giving more context to the future bootstrap request. + /// Sets the time to wait before calling [`Behaviour::bootstrap`] after a + /// new peer is inserted in the routing table. This prevent cascading + /// bootstrap requests when multiple peers are inserted into the routing + /// table "at the same time". This also allows to wait a little bit for + /// other potential peers to be inserted into the routing table before + /// triggering a bootstrap, giving more context to the future bootstrap + /// request. /// /// * Default to `500` ms. - /// * Set to `Some(Duration::ZERO)` to never wait before triggering a bootstrap request when a new peer - /// is inserted in the routing table. - /// * Set to `None` to disable automatic bootstrap (no bootstrap request will be triggered when a new - /// peer is inserted in the routing table). + /// * Set to `Some(Duration::ZERO)` to never wait before triggering a + /// bootstrap request when a new peer is inserted in the routing table. + /// * Set to `None` to disable automatic bootstrap (no bootstrap request + /// will be triggered when a new peer is inserted in the routing table). #[cfg(test)] pub(crate) fn set_automatic_bootstrap_throttle( &mut self, @@ -576,12 +601,11 @@ where /// 1. In order for a node to join the DHT, it must know about at least /// one other node of the DHT. /// - /// 2. When a remote peer initiates a connection and that peer is not - /// yet in the routing table, the `Kademlia` behaviour must be - /// informed of an address on which that peer is listening for - /// connections before it can be added to the routing table - /// from where it can subsequently be discovered by all peers - /// in the DHT. + /// 2. When a remote peer initiates a connection and that peer is not yet + /// in the routing table, the `Kademlia` behaviour must be informed of + /// an address on which that peer is listening for connections before + /// it can be added to the routing table from where it can subsequently + /// be discovered by all peers in the DHT. /// /// If the routing table has been updated as a result of this operation, /// a [`Event::RoutingUpdated`] event is emitted. @@ -675,14 +699,16 @@ where match self.kbuckets.entry(&key)? { kbucket::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { - Some(entry.remove()) // it is the last address, thus remove the peer. + Some(entry.remove()) // it is the last address, thus remove + // the peer. } else { None } } kbucket::Entry::Pending(mut entry, _) => { if entry.value().remove(address).is_err() { - Some(entry.remove()) // it is the last address, thus remove the peer. + Some(entry.remove()) // it is the last address, thus remove + // the peer. } else { None } @@ -751,7 +777,8 @@ where // The inner code never expect higher than K_VALUE results to be returned. // And removing such cap will be tricky, // since it would involve forging a new key and additional requests. - // Hence bound to K_VALUE here to set clear expectation and prevent unexpected behaviour. + // Hence bound to K_VALUE here to set clear expectation and prevent unexpected + // behaviour. let capped_num_results = std::cmp::min(num_results, K_VALUE); self.get_closest_peers_inner(key, Some(capped_num_results)) } @@ -771,8 +798,8 @@ where self.queries.add_iter_closest(target, peer_keys, info) } - /// Returns all peers ordered by distance to the given key; takes peers from local routing table - /// only. + /// Returns all peers ordered by distance to the given key; takes peers from + /// local routing table only. pub fn get_closest_local_peers<'a, K: Clone>( &'a mut self, key: &'a kbucket::Key, @@ -780,11 +807,12 @@ where self.kbuckets.closest_keys(key) } - /// Finds the closest peers to a `key` in the context of a request by the `source` peer, such - /// that the `source` peer is never included in the result. + /// Finds the closest peers to a `key` in the context of a request by the + /// `source` peer, such that the `source` peer is never included in the + /// result. /// - /// Takes peers from local routing table only. Only returns number of peers equal to configured - /// replication factor. + /// Takes peers from local routing table only. Only returns number of peers + /// equal to configured replication factor. pub fn find_closest_local_peers<'a, K: Clone>( &'a mut self, key: &'a kbucket::Key, @@ -861,15 +889,17 @@ where /// The result of the query is eventually reported as a /// [`Event::OutboundQueryProgressed{QueryResult::PutRecord}`]. /// - /// The record is always stored locally with the given expiration. If the record's - /// expiration is `None`, the common case, it does not expire in local storage - /// but is still replicated with the configured record TTL. To remove the record - /// locally and stop it from being re-published in the DHT, see [`Behaviour::remove_record`]. - /// - /// After the initial publication of the record, it is subject to (re-)replication - /// and (re-)publication as per the configured intervals. Periodic (re-)publication - /// does not update the record's expiration in local storage, thus a given record - /// with an explicit expiration will always expire at that instant and until then + /// The record is always stored locally with the given expiration. If the + /// record's expiration is `None`, the common case, it does not expire + /// in local storage but is still replicated with the configured record + /// TTL. To remove the record locally and stop it from being + /// re-published in the DHT, see [`Behaviour::remove_record`]. + /// + /// After the initial publication of the record, it is subject to + /// (re-)replication and (re-)publication as per the configured + /// intervals. Periodic (re-)publication does not update the record's + /// expiration in local storage, thus a given record with an explicit + /// expiration will always expire at that instant and until then /// is subject to regular (re-)replication and (re-)publication. pub fn put_record( &mut self, @@ -907,7 +937,8 @@ where /// > "version" of a record or to "cache" a record at further peers /// > to increase the lookup success rate on the DHT for other peers. /// > - /// > In particular, there is no automatic storing of records performed, and this + /// > In particular, there is no automatic storing of records performed, and + /// > this /// > method must be used to ensure the standard Kademlia /// > procedure of "caching" (i.e. storing) a found record at the closest /// > node to the key that _did not_ return it. @@ -963,29 +994,36 @@ where /// Bootstraps the local node to join the DHT. /// - /// Bootstrapping is a multi-step operation that starts with a lookup of the local node's - /// own ID in the DHT. This introduces the local node to the other nodes - /// in the DHT and populates its routing table with the closest neighbours. + /// Bootstrapping is a multi-step operation that starts with a lookup of the + /// local node's own ID in the DHT. This introduces the local node to + /// the other nodes in the DHT and populates its routing table with the + /// closest neighbours. /// - /// Subsequently, all buckets farther from the bucket of the closest neighbour are - /// refreshed by initiating an additional bootstrapping query for each such - /// bucket with random keys. + /// Subsequently, all buckets farther from the bucket of the closest + /// neighbour are refreshed by initiating an additional bootstrapping + /// query for each such bucket with random keys. /// - /// Returns `Ok` if bootstrapping has been initiated with a self-lookup, providing the - /// `QueryId` for the entire bootstrapping process. The progress of bootstrapping is - /// reported via [`Event::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, + /// Returns `Ok` if bootstrapping has been initiated with a self-lookup, + /// providing the `QueryId` for the entire bootstrapping process. The + /// progress of bootstrapping is reported via + /// [`Event::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, /// with one such event per bootstrapping query. /// /// Returns `Err` if bootstrapping is impossible due an empty routing table. /// - /// > **Note**: Bootstrapping requires at least one node of the DHT to be known. + /// > **Note**: Bootstrapping requires at least one node of the DHT to be + /// > known. /// > See [`Behaviour::add_address`]. /// - /// > **Note**: Bootstrap does not require to be called manually. It is periodically - /// > invoked at regular intervals based on the configured `periodic_bootstrap_interval` (see - /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is also automatically invoked + /// > **Note**: Bootstrap does not require to be called manually. It is + /// > periodically + /// > invoked at regular intervals based on the configured + /// > `periodic_bootstrap_interval` (see + /// > [`Config::set_periodic_bootstrap_interval`] for details) and it is + /// > also automatically invoked /// > when a new peer is inserted in the routing table. - /// > This parameter is used to call [`Behaviour::bootstrap`] periodically and automatically + /// > This parameter is used to call [`Behaviour::bootstrap`] periodically + /// > and automatically /// > to ensure a healthy routing table. pub fn bootstrap(&mut self) -> Result { let local_key = *self.kbuckets.local_key(); @@ -1007,25 +1045,28 @@ where /// Establishes the local node as a provider of a value for the given key. /// /// This operation publishes a provider record with the given key and - /// identity of the local node to the peers closest to the key, thus establishing - /// the local node as a provider. + /// identity of the local node to the peers closest to the key, thus + /// establishing the local node as a provider. /// /// Returns `Ok` if a provider record has been stored locally, providing the - /// `QueryId` of the initial query that announces the local node as a provider. - /// - /// The publication of the provider records is periodically repeated as per the - /// configured interval, to renew the expiry and account for changes to the DHT - /// topology. A provider record may be removed from local storage and - /// thus no longer re-published by calling [`Behaviour::stop_providing`]. - /// - /// In contrast to the standard Kademlia push-based model for content distribution - /// implemented by [`Behaviour::put_record`], the provider API implements a - /// pull-based model that may be used in addition or as an alternative. - /// The means by which the actual value is obtained from a provider is out of scope - /// of the libp2p Kademlia provider API. - /// - /// The results of the (repeated) provider announcements sent by this node are - /// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. + /// `QueryId` of the initial query that announces the local node as a + /// provider. + /// + /// The publication of the provider records is periodically repeated as per + /// the configured interval, to renew the expiry and account for changes + /// to the DHT topology. A provider record may be removed from local + /// storage and thus no longer re-published by calling + /// [`Behaviour::stop_providing`]. + /// + /// In contrast to the standard Kademlia push-based model for content + /// distribution implemented by [`Behaviour::put_record`], the provider + /// API implements a pull-based model that may be used in addition or as + /// an alternative. The means by which the actual value is obtained from + /// a provider is out of scope of the libp2p Kademlia provider API. + /// + /// The results of the (repeated) provider announcements sent by this node + /// are reported via + /// [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. pub fn start_providing(&mut self, key: record::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are @@ -1049,7 +1090,8 @@ where Ok(id) } - /// Stops the local node from announcing that it is a provider for the given key. + /// Stops the local node from announcing that it is a provider for the given + /// key. /// /// This is a local operation. The local node will still be considered as a /// provider for the key by other nodes until these provider records expire. @@ -1061,7 +1103,8 @@ where /// Performs a lookup for providers of a value to the given key. /// /// The result of this operation is delivered in a - /// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. + /// reported via + /// [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. pub fn get_providers(&mut self, key: record::Key) -> QueryId { let providers: HashSet<_> = self .store @@ -1107,10 +1150,13 @@ where /// Set the [`Mode`] in which we should operate. /// - /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. + /// By default, we are in [`Mode::Client`] and will swap into + /// [`Mode::Server`] as soon as we have a confirmed, external address via + /// [`FromSwarm::ExternalAddrConfirmed`]. /// - /// Setting a mode via this function disables this automatic behaviour and unconditionally operates in the specified mode. - /// To reactivate the automatic configuration, pass [`None`] instead. + /// Setting a mode via this function disables this automatic behaviour and + /// unconditionally operates in the specified mode. To reactivate the + /// automatic configuration, pass [`None`] instead. pub fn set_mode(&mut self, mode: Option) { match mode { Some(mode) => { @@ -1166,7 +1212,10 @@ where self.mode = match (self.external_addresses.as_slice(), self.mode) { ([], Mode::Server) => { - tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); + tracing::debug!( + "Switching to client-mode because we no longer have any confirmed external \ + addresses" + ); Mode::Client } @@ -1180,7 +1229,10 @@ where let confirmed_external_addresses = to_comma_separated_list(confirmed_external_addresses); - tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); + tracing::debug!( + "Switching to server-mode assuming that one of \ + [{confirmed_external_addresses}] is externally reachable" + ); } Mode::Server @@ -1191,7 +1243,8 @@ where "Previous match arm handled empty list" ); - // Previously, server-mode, now also server-mode because > 1 external address. Don't log anything to avoid spam. + // Previously, server-mode, now also server-mode because > 1 external address. + // Don't log anything to avoid spam. Mode::Server } @@ -1207,7 +1260,8 @@ where } } - /// Processes discovered peers from a successful request in an iterative `Query`. + /// Processes discovered peers from a successful request in an iterative + /// `Query`. fn discovered<'a, I>(&'a mut self, query_id: &QueryId, source: &PeerId, peers: I) where I: Iterator + Clone, @@ -1230,7 +1284,8 @@ where } } - /// Collects all peers who are known to be providers of the value for a given `Multihash`. + /// Collects all peers who are known to be providers of the value for a + /// given `Multihash`. fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec { let kbuckets = &mut self.kbuckets; let connected = &mut self.connected_peers; @@ -1314,7 +1369,8 @@ where self.queries.add_iter_closest(target.clone(), peers, info); } - /// Updates the routing table with a new connection status and address of a peer. + /// Updates the routing table with a new connection status and address of a + /// peer. fn connection_updated( &mut self, peer: PeerId, @@ -1428,9 +1484,9 @@ where } } - /// A new peer has been inserted in the routing table but we check if the routing - /// table is currently small (less that `K_VALUE` peers are present) and only - /// trigger a bootstrap in that case + /// A new peer has been inserted in the routing table but we check if the + /// routing table is currently small (less that `K_VALUE` peers are + /// present) and only trigger a bootstrap in that case fn bootstrap_on_low_peers(&mut self) { if self .kbuckets() @@ -1999,7 +2055,8 @@ where // if it is the least recently connected peer, currently disconnected // and is unreachable in the context of another peer pending insertion // into the same bucket. This is handled transparently by the - // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` + // `KBucketsTable` and takes effect through + // `KBucketsTable::take_applied_pending` // within `Behaviour::poll`. tracing::debug!( peer=%peer_id, @@ -2128,8 +2185,8 @@ where | dial_opts::PeerCondition::NotDialing | dial_opts::PeerCondition::DisconnectedAndNotDialing, ) => { - // We might (still) be connected, or about to be connected, thus do not report the - // failure to the queries. + // We might (still) be connected, or about to be connected, thus + // do not report the failure to the queries. } DialError::DialPeerConditionFalse(dial_opts::PeerCondition::Always) => { unreachable!("DialPeerCondition::Always can not trigger DialPeerConditionFalse."); @@ -2157,7 +2214,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are waiting to be sent to + /// the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, @@ -2166,7 +2224,8 @@ where ) { self.connections.insert(connection_id, peer); // Queue events for sending pending RPCs to the connected peer. - // There can be only one pending RPC for a particular peer and query per definition. + // There can be only one pending RPC for a particular peer and query per + // definition. for (_peer_id, event) in self.queries.iter_mut().filter_map(|q| { q.pending_rpcs .iter() @@ -2250,8 +2309,8 @@ where Some(peer) => peer, }; - // We should order addresses from decreasing likelihood of connectivity, so start with - // the addresses of that peer in the k-buckets. + // We should order addresses from decreasing likelihood of connectivity, so + // start with the addresses of that peer in the k-buckets. let key = kbucket::Key::from(peer_id); let mut peer_addrs = if let Some(kbucket::Entry::Present(mut entry, _)) = self.kbuckets.entry(&key) { @@ -2705,7 +2764,8 @@ where } } -/// Peer Info combines a Peer ID with a set of multiaddrs that the peer is listening on. +/// Peer Info combines a Peer ID with a set of multiaddrs that the peer is +/// listening on. #[derive(Debug, Clone, PartialEq, Eq)] pub struct PeerInfo { pub peer_id: PeerId, @@ -2755,7 +2815,6 @@ pub struct PeerRecord { #[allow(clippy::large_enum_variant)] pub enum Event { /// An inbound request has been received and handled. - // // Note on the difference between 'request' and 'query': A request is a // single request-response style exchange with a single remote peer. A query // is made of multiple requests across multiple remote peers. @@ -2769,7 +2828,8 @@ pub enum Event { result: QueryResult, /// Execution statistics from the query. stats: QueryStats, - /// Indicates which event this is, if therer are multiple responses for a single query. + /// Indicates which event this is, if therer are multiple responses for + /// a single query. step: ProgressStep, }, @@ -2794,7 +2854,8 @@ pub enum Event { /// A peer has connected for whom no listen address is known. /// /// If the peer is to be added to the routing table, a known - /// listen address for the peer must be provided via [`Behaviour::add_address`]. + /// listen address for the peer must be provided via + /// [`Behaviour::add_address`]. UnroutablePeer { peer: PeerId }, /// A connection to a peer has been established for whom a listen address @@ -2873,8 +2934,8 @@ pub enum InboundRequest { num_provider_peers: usize, }, /// A peer sent an add provider request. - /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`ProviderRecord`] is - /// included. + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the + /// [`ProviderRecord`] is included. /// /// See [`StoreInserts`] and [`Config::set_record_filtering`] for details.. AddProvider { record: Option }, @@ -2884,7 +2945,8 @@ pub enum InboundRequest { present_locally: bool, }, /// A peer sent a put record request. - /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`Record`] is included. + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`Record`] is + /// included. /// /// See [`StoreInserts`] and [`Config::set_record_filtering`]. PutRecord { @@ -2933,12 +2995,13 @@ pub enum GetRecordOk { /// If caching is enabled, these are the peers closest /// _to the record key_ (not the local node) that were queried but /// did not return the record, sorted by distance to the record key - /// from closest to farthest. How many of these are tracked is configured - /// by [`Config::set_caching`]. + /// from closest to farthest. How many of these are tracked is + /// configured by [`Config::set_caching`]. /// /// Writing back the cache at these peers is a manual operation. - /// ie. you may wish to use these candidates with [`Behaviour::put_record_to`] - /// after selecting one of the returned records. + /// ie. you may wish to use these candidates with + /// [`Behaviour::put_record_to`] after selecting one of the + /// returned records. cache_candidates: BTreeMap, }, } @@ -3263,8 +3326,9 @@ pub enum QueryInfo { step: ProgressStep, /// Did we find at least one record? found_a_record: bool, - /// The peers closest to the `key` that were queried but did not return a record, - /// i.e. the peers that are candidates for caching the record. + /// The peers closest to the `key` that were queried but did not return + /// a record, i.e. the peers that are candidates for caching the + /// record. cache_candidates: BTreeMap, }, } @@ -3349,7 +3413,8 @@ pub enum PutRecordPhase { /// The query is replicating the record to the closest nodes to the key. PutRecord { - /// A list of peers the given record has been successfully replicated to. + /// A list of peers the given record has been successfully replicated + /// to. success: Vec, /// Query statistics from the finished `GetClosestPeers` phase. get_closest_peers_stats: QueryStats, diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 7409168ac2a..1be5cd8cbe3 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -20,17 +20,14 @@ #![cfg(test)] -use super::*; - -use crate::record::{store::MemoryStore, Key}; -use crate::{K_VALUE, PROTOCOL_NAME, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; use libp2p_core::{ multiaddr::{multiaddr, Protocol}, multihash::Multihash, transport::MemoryTransport, - upgrade, Transport, + upgrade, + Transport, }; use libp2p_identity as identity; use libp2p_noise as noise; @@ -39,6 +36,14 @@ use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; +use super::*; +use crate::{ + record::{store::MemoryStore, Key}, + K_VALUE, + PROTOCOL_NAME, + SHA_256_MH, +}; + type TestSwarm = Swarm>; fn build_node() -> (Multiaddr, TestSwarm) { @@ -73,12 +78,14 @@ fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) { (address, swarm) } -/// Builds swarms, each listening on a port. Does *not* connect the nodes together. +/// Builds swarms, each listening on a port. Does *not* connect the nodes +/// together. fn build_nodes(num: usize) -> Vec<(Multiaddr, TestSwarm)> { build_nodes_with_config(num, Default::default()) } -/// Builds swarms, each listening on a port. Does *not* connect the nodes together. +/// Builds swarms, each listening on a port. Does *not* connect the nodes +/// together. fn build_nodes_with_config(num: usize, cfg: Config) -> Vec<(Multiaddr, TestSwarm)> { (0..num) .map(|_| build_node_with_config(cfg.clone())) @@ -164,7 +171,8 @@ fn bootstrap() { let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); let mut cfg = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap + // from triggering automatically. cfg.set_periodic_bootstrap_interval(None); cfg.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -246,7 +254,8 @@ fn query_iter() { fn run(rng: &mut impl Rng) { let num_total = rng.gen_range(2..20); let mut config = Config::new(PROTOCOL_NAME); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap + // from triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); let mut swarms = build_connected_nodes_with_config(num_total, 1, config) @@ -323,8 +332,8 @@ fn unresponsive_not_returned_direct() { let _ = tracing_subscriber::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .try_init(); - // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a - // random peer. We make sure that no fake address is returned. + // Build one node. It contains fake addresses to non-existing nodes. We ask it + // to find a random peer. We make sure that no fake address is returned. let mut swarms = build_nodes(1) .into_iter() @@ -368,9 +377,9 @@ fn unresponsive_not_returned_direct() { #[test] fn unresponsive_not_returned_indirect() { - // Build two nodes. Node #2 knows about node #1. Node #1 contains fake addresses to - // non-existing nodes. We ask node #2 to find a random peer. We make sure that no fake address - // is returned. + // Build two nodes. Node #2 knows about node #1. Node #1 contains fake addresses + // to non-existing nodes. We ask node #2 to find a random peer. We make sure + // that no fake address is returned. let mut swarms = build_nodes(2); @@ -468,9 +477,19 @@ fn get_closest_with_different_num_results_inner(num_results: usize, replication_ }))) => { assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); if num_results > k_value { - assert_eq!(ok.peers.len(), k_value, "Failed with replication_factor: {replication_factor}, num_results: {num_results}"); + assert_eq!( + ok.peers.len(), + k_value, + "Failed with replication_factor: {replication_factor}, \ + num_results: {num_results}" + ); } else { - assert_eq!(ok.peers.len(), num_results, "Failed with replication_factor: {replication_factor}, num_results: {num_results}"); + assert_eq!( + ok.peers.len(), + num_results, + "Failed with replication_factor: {replication_factor}, \ + num_results: {num_results}" + ); } return Poll::Ready(()); @@ -561,7 +580,8 @@ fn put_record() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap + // from triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -933,7 +953,8 @@ fn add_provider() { let mut config = Config::new(PROTOCOL_NAME); config.set_replication_factor(replication_factor); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap + // from triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); if rng.gen() { @@ -1161,7 +1182,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); - // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap from triggering automatically. + // Disabling periodic bootstrap and automatic bootstrap to prevent the bootstrap + // from triggering automatically. config.set_periodic_bootstrap_interval(None); config.set_automatic_bootstrap_throttle(None); @@ -1218,8 +1240,8 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { } if step.last { panic!( - "Expected query not to finish until all \ - disjoint paths have been explored.", + "Expected query not to finish until all disjoint paths have been \ + explored.", ); } match result { diff --git a/protocols/kad/src/bootstrap.rs b/protocols/kad/src/bootstrap.rs index 40acdfd88ee..afe8b9825a7 100644 --- a/protocols/kad/src/bootstrap.rs +++ b/protocols/kad/src/bootstrap.rs @@ -1,7 +1,9 @@ -use futures::FutureExt; -use std::task::{Context, Poll, Waker}; -use std::time::Duration; +use std::{ + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::FutureExt; use futures_timer::Delay; /// Default value chosen at ``. @@ -9,22 +11,26 @@ pub(crate) const DEFAULT_AUTOMATIC_THROTTLE: Duration = Duration::from_millis(50 #[derive(Debug)] pub(crate) struct Status { - /// If the user did not disable periodic bootstrap (by providing `None` for `periodic_interval`) - /// this is the periodic interval and the delay of the current period. When `Delay` finishes, - /// a bootstrap will be triggered and the `Delay` will be reset. + /// If the user did not disable periodic bootstrap (by providing `None` for + /// `periodic_interval`) this is the periodic interval and the delay of + /// the current period. When `Delay` finishes, a bootstrap will be + /// triggered and the `Delay` will be reset. interval_and_delay: Option<(Duration, Delay)>, - /// Configured duration to wait before triggering a bootstrap when a new peer - /// is inserted in the routing table. `None` if automatic bootstrap is disabled. + /// Configured duration to wait before triggering a bootstrap when a new + /// peer is inserted in the routing table. `None` if automatic bootstrap + /// is disabled. automatic_throttle: Option, - /// Timer that will be set (if automatic bootstrap is not disabled) when a new peer is inserted - /// in the routing table. When it finishes, it will trigger a bootstrap and will be set to `None` - /// again. If an other new peer is inserted in the routing table before this timer finishes, + /// Timer that will be set (if automatic bootstrap is not disabled) when a + /// new peer is inserted in the routing table. When it finishes, it will + /// trigger a bootstrap and will be set to `None` again. If an other new + /// peer is inserted in the routing table before this timer finishes, /// the timer is reset. throttle_timer: Option, - /// Number of bootstrap requests currently in progress. We ensure neither periodic bootstrap - /// or automatic bootstrap trigger new requests when there is still some running. + /// Number of bootstrap requests currently in progress. We ensure neither + /// periodic bootstrap or automatic bootstrap trigger new requests when + /// there is still some running. current_bootstrap_requests: usize, /// Waker to wake up the `poll` method if progress is ready to be made. waker: Option, @@ -44,7 +50,8 @@ impl Status { } } - /// Trigger a bootstrap now or after the configured `automatic_throttle` if configured. + /// Trigger a bootstrap now or after the configured `automatic_throttle` if + /// configured. pub(crate) fn trigger(&mut self) { // Registering `self.throttle_timer` means scheduling a bootstrap. // A bootstrap will be triggered when `self.throttle_timer` finishes. @@ -77,7 +84,8 @@ impl Status { // trigger a bootstrap. self.current_bootstrap_requests += 1; - // Resetting the Status timers since a bootstrap request is being triggered right now. + // Resetting the Status timers since a bootstrap request is being triggered + // right now. self.reset_timers(); } @@ -106,18 +114,21 @@ impl Status { if let Some(throttle_delay) = &mut self.throttle_timer { // A `throttle_timer` has been registered. It means one or more peers have been - // inserted into the routing table and that a bootstrap request should be triggered. - // However, to not risk cascading bootstrap requests, we wait a little time to ensure - // the user will not add more peers in the routing table in the next "throttle_timer" remaining. + // inserted into the routing table and that a bootstrap request should be + // triggered. However, to not risk cascading bootstrap requests, we + // wait a little time to ensure the user will not add more peers in + // the routing table in the next "throttle_timer" remaining. if throttle_delay.poll_unpin(cx).is_ready() { // The `throttle_timer` is finished, triggering bootstrap right now. // The call to `on_started` will reset `throttle_delay`. return Poll::Ready(()); } - // The `throttle_timer` is not finished but the periodic interval for triggering bootstrap might be reached. + // The `throttle_timer` is not finished but the periodic interval + // for triggering bootstrap might be reached. } else { - // No new peer has recently been inserted into the routing table or automatic bootstrap is disabled. + // No new peer has recently been inserted into the routing table or + // automatic bootstrap is disabled. } // Checking if the user has enabled the periodic bootstrap feature. @@ -131,7 +142,8 @@ impl Status { // The user disabled periodic bootstrap. } - // Registering the `waker` so that we can wake up when calling `on_new_peer_in_routing_table`. + // Registering the `waker` so that we can wake up when calling + // `on_new_peer_in_routing_table`. self.waker = Some(cx.waker().clone()); Poll::Pending } @@ -175,9 +187,10 @@ impl futures::Future for ThrottleTimer { #[cfg(test)] mod tests { - use super::*; use web_time::Instant; + use super::*; + const MS_5: Duration = Duration::from_millis(5); const MS_100: Duration = Duration::from_millis(100); @@ -212,7 +225,8 @@ mod tests { async_std::future::timeout(Duration::from_millis(500), status.next()) .await .is_ok(), - "bootstrap to be triggered in less then the configured delay because we connected to a new peer" + "bootstrap to be triggered in less then the configured delay because we connected to \ + a new peer" ); } @@ -237,7 +251,8 @@ mod tests { async_std::future::timeout(MS_5 * 2, status.next()) .await .is_ok(), - "bootstrap to be triggered in less then the configured periodic delay because we connected to a new peer" + "bootstrap to be triggered in less then the configured periodic delay because we \ + connected to a new peer" ); } @@ -318,10 +333,11 @@ mod tests { Delay::new(MS_100 - MS_5).await; assert!( - async_std::future::timeout(MS_5*2, status.next()) + async_std::future::timeout(MS_5 * 2, status.next()) .await .is_ok(), - "bootstrap to be triggered in the configured throttle delay because we connected to a new peer" + "bootstrap to be triggered in the configured throttle delay because we connected to a \ + new peer" ); } diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 384ebc3f2b1..c8b1829abcf 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -18,35 +18,52 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::Mode; -use crate::protocol::{ - KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, +use std::{ + collections::VecDeque, + error, + fmt, + io, + marker::PhantomData, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, }; -use crate::record::{self, Record}; -use crate::QueryId; + use either::Either; -use futures::channel::oneshot; -use futures::prelude::*; -use futures::stream::SelectAll; +use futures::{channel::oneshot, prelude::*, stream::SelectAll}; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, + handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, + ConnectionHandlerEvent, + Stream, + StreamUpgradeError, + SubstreamProtocol, SupportedProtocols, }; -use std::collections::VecDeque; -use std::task::Waker; -use std::time::Duration; -use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; + +use crate::{ + behaviour::Mode, + protocol::{ + KadInStreamSink, + KadOutStreamSink, + KadPeer, + KadRequestMsg, + KadResponseMsg, + ProtocolConfig, + }, + record::{self, Record}, + QueryId, +}; const MAX_NUM_STREAMS: usize = 32; /// Protocol handler that manages substreams for the Kademlia protocol /// on a single connection with a peer. /// -/// The handler will automatically open a Kademlia substream with the remote for each request we -/// make. +/// The handler will automatically open a Kademlia substream with the remote for +/// each request we make. /// /// It also handles requests made by the remote. pub struct Handler { @@ -63,12 +80,14 @@ pub struct Handler { outbound_substreams: futures_bounded::FuturesTupleSet>, QueryId>, - /// Contains one [`oneshot::Sender`] per outbound stream that we have requested. + /// Contains one [`oneshot::Sender`] per outbound stream that we have + /// requested. pending_streams: VecDeque, StreamUpgradeError>>>, /// List of outbound substreams that are waiting to become active next. - /// Contains the request we want to send, and the user data if we expect an answer. + /// Contains the request we want to send, and the user data if we expect an + /// answer. pending_messages: VecDeque<(KadRequestMsg, QueryId)>, /// List of active inbound substreams with the state they are in. @@ -106,7 +125,8 @@ enum InboundSubstreamState { connection_id: UniqueConnecId, substream: KadInStreamSink, }, - /// Waiting for the behaviour to send a [`HandlerIn`] event containing the response. + /// Waiting for the behaviour to send a [`HandlerIn`] event containing the + /// response. WaitingBehaviour(UniqueConnecId, KadInStreamSink, Option), /// Waiting to send an answer back to the remote. PendingSend(UniqueConnecId, KadInStreamSink, KadResponseMsg), @@ -179,14 +199,16 @@ impl InboundSubstreamState { #[derive(Debug)] pub enum HandlerEvent { /// The configured protocol name has been confirmed by the peer through - /// a successfully negotiated substream or by learning the supported protocols of the remote. + /// a successfully negotiated substream or by learning the supported + /// protocols of the remote. ProtocolConfirmed { endpoint: ConnectedPoint }, - /// The configured protocol name(s) are not or no longer supported by the peer on the provided - /// connection and it should be removed from the routing table. + /// The configured protocol name(s) are not or no longer supported by the + /// peer on the provided connection and it should be removed from the + /// routing table. ProtocolNotSupported { endpoint: ConnectedPoint }, - /// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes - /// returned is not specified, but should be around 20. + /// Request for the list of nodes whose IDs are the closest to `key`. The + /// number of nodes returned is not specified, but should be around 20. FindNodeReq { /// The key for which to locate the closest nodes. key: Vec, @@ -202,8 +224,8 @@ pub enum HandlerEvent { query_id: QueryId, }, - /// Same as `FindNodeReq`, but should also return the entries of the local providers list for - /// this key. + /// Same as `FindNodeReq`, but should also return the entries of the local + /// providers list for this key. GetProvidersReq { /// The key for which providers are requested. key: record::Key, @@ -322,8 +344,8 @@ pub enum HandlerIn { /// Change the connection to the specified mode. ReconfigureMode { new_mode: Mode }, - /// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes - /// returned is not specified, but should be around 20. + /// Request for the list of nodes whose IDs are the closest to `key`. The + /// number of nodes returned is not specified, but should be around 20. FindNodeReq { /// Identifier of the node. key: Vec, @@ -341,8 +363,8 @@ pub enum HandlerIn { request_id: RequestId, }, - /// Same as `FindNodeReq`, but should also return the entries of the local providers list for - /// this key. + /// Same as `FindNodeReq`, but should also return the entries of the local + /// providers list for this key. GetProvidersReq { /// Identifier being searched. key: record::Key, @@ -364,8 +386,8 @@ pub enum HandlerIn { /// Indicates that this provider is known for this key. /// - /// The API of the handler doesn't expose any event that allows you to know whether this - /// succeeded. + /// The API of the handler doesn't expose any event that allows you to know + /// whether this succeeded. AddProvider { /// Key for which we should add providers. key: record::Key, @@ -411,8 +433,8 @@ pub enum HandlerIn { }, } -/// Unique identifier for a request. Must be passed back in order to answer a request from -/// the remote. +/// Unique identifier for a request. Must be passed back in order to answer a +/// request from the remote. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct RequestId { /// Unique identifier for an incoming connection. @@ -497,8 +519,8 @@ impl Handler { ::InboundOpenInfo, >, ) { - // If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and `protocol` - // is a `Infallible`. + // If `self.allow_listening` is false, then we produced a `DeniedUpgrade` and + // `protocol` is a `Infallible`. let protocol = match protocol { future::Either::Left(p) => p, // TODO: remove when Rust 1.82 is MSRV @@ -550,7 +572,8 @@ impl Handler { }); } - /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + /// Takes the given [`KadRequestMsg`] and composes it into an outbound + /// request-response protocol handshake using a [`oneshot::channel`]. fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { let (sender, receiver) = oneshot::channel(); @@ -1019,7 +1042,8 @@ impl futures::Stream for InboundSubstreamState { } } -/// Process a Kademlia message that's supposed to be a response to one of our requests. +/// Process a Kademlia message that's supposed to be a response to one of our +/// requests. fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEvent { // TODO: must check that the response corresponds to the request match event { @@ -1060,10 +1084,11 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven #[cfg(test)] mod tests { - use super::*; use quickcheck::{Arbitrary, Gen}; use tracing_subscriber::EnvFilter; + use super::*; + impl Arbitrary for ProtocolStatus { fn arbitrary(g: &mut Gen) -> Self { Self { diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index 537f652b7a4..15886f936ee 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -25,12 +25,12 @@ //! To ensure persistence of records in the DHT, a Kademlia node //! must periodically (re-)publish and (re-)replicate its records: //! -//! 1. (Re-)publishing: The original publisher or provider of a record -//! must regularly re-publish in order to prolong the expiration. +//! 1. (Re-)publishing: The original publisher or provider of a record must +//! regularly re-publish in order to prolong the expiration. //! //! 2. (Re-)replication: Every node storing a replica of a record must -//! regularly re-replicate it to the closest nodes to the key in -//! order to ensure the record is present at these nodes. +//! regularly re-replicate it to the closest nodes to the key in order to +//! ensure the record is present at these nodes. //! //! Re-publishing primarily ensures persistence of the record beyond its //! initial TTL, for as long as the publisher stores (or provides) the record, @@ -41,11 +41,11 @@ //! //! This module implements two periodic jobs: //! -//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of -//! regular (value-)records. +//! * [`PutRecordJob`]: For (re-)publication and (re-)replication of regular +//! (value-)records. //! -//! * [`AddProviderJob`]: For (re-)publication of provider records. -//! Provider records currently have no separate replication mechanism. +//! * [`AddProviderJob`]: For (re-)publication of provider records. Provider +//! records currently have no separate replication mechanism. //! //! A periodic job is driven like a `Future` or `Stream` by `poll`ing it. //! Once a job starts running it emits records to send to the `k` closest @@ -59,19 +59,24 @@ //! > to replicate from the `RecordStore` when it starts and thus, to account //! > for the worst case, it temporarily requires additional memory proportional //! > to the size of all stored records. As a job runs, the records are moved -//! > out of the job to the consumer, where they can be dropped after being sent. +//! > out of the job to the consumer, where they can be dropped after being +//! > sent. + +use std::{ + collections::HashSet, + pin::Pin, + task::{Context, Poll}, + time::Duration, + vec, +}; -use crate::record::{self, store::RecordStore, ProviderRecord, Record}; use futures::prelude::*; use futures_timer::Delay; use libp2p_identity::PeerId; -use std::collections::HashSet; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::vec; use web_time::Instant; +use crate::record::{self, store::RecordStore, ProviderRecord, Record}; + /// The maximum number of queries towards which background jobs /// are allowed to start new queries on an invocation of /// `Behaviour::poll`. @@ -335,12 +340,13 @@ impl AddProviderJob { #[cfg(test)] mod tests { - use super::*; - use crate::record::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; + use super::*; + use crate::record::store::MemoryStore; + fn rand_put_record_job() -> PutRecordJob { let mut rng = rand::thread_rng(); let id = PeerId::random(); diff --git a/protocols/kad/src/kbucket.rs b/protocols/kad/src/kbucket.rs index 28d7df03917..a0304f81c92 100644 --- a/protocols/kad/src/kbucket.rs +++ b/protocols/kad/src/kbucket.rs @@ -27,10 +27,11 @@ //! //! When the bucket associated with the `Key` of an inserted entry is full //! but contains disconnected nodes, it accepts a [`PendingEntry`]. -//! Pending entries are inserted lazily when their timeout is found to be expired -//! upon querying the `KBucketsTable`. When that happens, the `KBucketsTable` records -//! an [`AppliedPending`] result which must be consumed by calling [`take_applied_pending`] -//! regularly and / or after performing lookup operations like [`entry`] and [`closest`]. +//! Pending entries are inserted lazily when their timeout is found to be +//! expired upon querying the `KBucketsTable`. When that happens, the +//! `KBucketsTable` records an [`AppliedPending`] result which must be consumed +//! by calling [`take_applied_pending`] regularly and / or after performing +//! lookup operations like [`entry`] and [`closest`]. //! //! [`entry`]: KBucketsTable::entry //! [`closest`]: KBucketsTable::closest @@ -45,24 +46,24 @@ // The routing table is currently implemented as a fixed-size "array" of // buckets, ordered by increasing distance relative to a local key // that identifies the local peer. This is an often-used, simplified -// implementation that approximates the properties of the b-tree (or prefix tree) -// implementation described in the full paper [0], whereby buckets are split on-demand. -// This should be treated as an implementation detail, however, so that the -// implementation may change in the future without breaking the API. +// implementation that approximates the properties of the b-tree (or prefix +// tree) implementation described in the full paper [0], whereby buckets are +// split on-demand. This should be treated as an implementation detail, however, +// so that the implementation may change in the future without breaking the API. // // 2. Replacement Cache // // In this implementation, the "replacement cache" for unresponsive peers // consists of a single entry per bucket. Furthermore, this implementation is // currently tailored to connection-oriented transports, meaning that the -// "LRU"-based ordering of entries in a bucket is actually based on the last reported -// connection status of the corresponding peers, from least-recently (dis)connected to -// most-recently (dis)connected, and controlled through the `Entry` API. As a result, -// the nodes in the buckets are not reordered as a result of RPC activity, but only as a -// result of nodes being marked as connected or disconnected. In particular, -// if a bucket is full and contains only entries for peers that are considered -// connected, no pending entry is accepted. See the `bucket` submodule for -// further details. +// "LRU"-based ordering of entries in a bucket is actually based on the last +// reported connection status of the corresponding peers, from least-recently +// (dis)connected to most-recently (dis)connected, and controlled through the +// `Entry` API. As a result, the nodes in the buckets are not reordered as a +// result of RPC activity, but only as a result of nodes being marked as +// connected or disconnected. In particular, if a bucket is full and contains +// only entries for peers that are considered connected, no pending entry is +// accepted. See the `bucket` submodule for further details. // // [0]: https://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf @@ -72,13 +73,11 @@ mod entry; #[allow(clippy::assign_op_pattern)] mod key; -pub use bucket::NodeStatus; -pub use entry::*; +use std::{collections::VecDeque, num::NonZeroUsize, time::Duration}; use bucket::KBucket; -use std::collections::VecDeque; -use std::num::NonZeroUsize; -use std::time::Duration; +pub use bucket::NodeStatus; +pub use entry::*; use web_time::Instant; /// Maximum number of k-buckets. @@ -132,8 +131,8 @@ pub(crate) struct KBucketsTable { applied_pending: VecDeque>, } -/// A (type-safe) index into a `KBucketsTable`, i.e. a non-negative integer in the -/// interval `[0, NUM_BUCKETS)`. +/// A (type-safe) index into a `KBucketsTable`, i.e. a non-negative integer in +/// the interval `[0, NUM_BUCKETS)`. #[derive(Debug, Copy, Clone, PartialEq, Eq)] struct BucketIndex(usize); @@ -202,8 +201,8 @@ where &self.local_key } - /// Returns an `Entry` for the given key, representing the state of the entry - /// in the routing table. + /// Returns an `Entry` for the given key, representing the state of the + /// entry in the routing table. /// /// Returns `None` in case the key points to the local node. pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Option> { @@ -254,15 +253,17 @@ where /// Consumes the next applied pending entry, if any. /// - /// When an entry is attempted to be inserted and the respective bucket is full, - /// it may be recorded as pending insertion after a timeout, see [`InsertResult::Pending`]. + /// When an entry is attempted to be inserted and the respective bucket is + /// full, it may be recorded as pending insertion after a timeout, see + /// [`InsertResult::Pending`]. /// - /// If the oldest currently disconnected entry in the respective bucket does not change - /// its status until the timeout of pending entry expires, it is evicted and - /// the pending entry inserted instead. These insertions of pending entries - /// happens lazily, whenever the `KBucketsTable` is accessed, and the corresponding - /// buckets are updated accordingly. The fact that a pending entry was applied is - /// recorded in the `KBucketsTable` in the form of `AppliedPending` results, which must be + /// If the oldest currently disconnected entry in the respective bucket does + /// not change its status until the timeout of pending entry expires, it + /// is evicted and the pending entry inserted instead. These insertions + /// of pending entries happens lazily, whenever the `KBucketsTable` is + /// accessed, and the corresponding buckets are updated accordingly. The + /// fact that a pending entry was applied is recorded in the + /// `KBucketsTable` in the form of `AppliedPending` results, which must be /// consumed by calling this function. pub(crate) fn take_applied_pending(&mut self) -> Option> { self.applied_pending.pop_front() @@ -292,8 +293,8 @@ where } } - /// Returns an iterator over the nodes closest to the `target` key, ordered by - /// increasing distance. + /// Returns an iterator over the nodes closest to the `target` key, ordered + /// by increasing distance. pub(crate) fn closest<'a, T>( &'a mut self, target: &'a T, @@ -366,9 +367,10 @@ struct ClosestIter<'a, TTarget, TKey, TVal, TMap, TOut> { fmap: TMap, } -/// An iterator over the bucket indices, in the order determined by the `Distance` of -/// a target from the `local_key`, such that the entries in the buckets are incrementally -/// further away from the target, starting with the bucket covering the target. +/// An iterator over the bucket indices, in the order determined by the +/// `Distance` of a target from the `local_key`, such that the entries in the +/// buckets are incrementally further away from the target, starting with the +/// bucket covering the target. struct ClosestBucketsIter { /// The distance to the `local_key`. distance: Distance, @@ -382,10 +384,10 @@ enum ClosestBucketsIterState { /// then transitions to `ZoomIn`. Start(BucketIndex), /// The iterator "zooms in" to yield the next bucket containing nodes that - /// are incrementally closer to the local node but further from the `target`. - /// These buckets are identified by a `1` in the corresponding bit position - /// of the distance bit string. When bucket `0` is reached, the iterator - /// transitions to `ZoomOut`. + /// are incrementally closer to the local node but further from the + /// `target`. These buckets are identified by a `1` in the corresponding + /// bit position of the distance bit string. When bucket `0` is reached, + /// the iterator transitions to `ZoomOut`. ZoomIn(BucketIndex), /// Once bucket `0` has been reached, the iterator starts "zooming out" /// to buckets containing nodes that are incrementally further away from @@ -539,10 +541,11 @@ where /// Generates a random distance that falls into this bucket. /// - /// Together with a known key `a` (e.g. the local key), a random distance `d` for - /// this bucket w.r.t `k` gives rise to the corresponding (random) key `b` s.t. - /// the XOR distance between `a` and `b` is `d`. In other words, it gives - /// rise to a random key falling into this bucket. See [`key::Key::for_distance`]. + /// Together with a known key `a` (e.g. the local key), a random distance + /// `d` for this bucket w.r.t `k` gives rise to the corresponding + /// (random) key `b` s.t. the XOR distance between `a` and `b` is `d`. + /// In other words, it gives rise to a random key falling into this + /// bucket. See [`key::Key::for_distance`]. pub fn rand_distance(&self, rng: &mut impl rand::Rng) -> Distance { self.index.rand_distance(rng) } @@ -561,10 +564,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + type TestTable = KBucketsTable; impl Arbitrary for TestTable { diff --git a/protocols/kad/src/kbucket/bucket.rs b/protocols/kad/src/kbucket/bucket.rs index 1426017aa7a..1ed816446d6 100644 --- a/protocols/kad/src/kbucket/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -36,7 +36,8 @@ pub(crate) struct PendingNode { /// The status of the pending node. status: NodeStatus, - /// The instant at which the pending node is eligible for insertion into a bucket. + /// The instant at which the pending node is eligible for insertion into a + /// bucket. replace: Instant, } @@ -102,12 +103,12 @@ pub(crate) struct KBucket { /// The position (index) in `nodes` that marks the first connected node. /// - /// Since the entries in `nodes` are ordered from least-recently connected to - /// most-recently connected, all entries above this index are also considered - /// connected, i.e. the range `[0, first_connected_pos)` marks the sub-list of entries - /// that are considered disconnected and the range - /// `[first_connected_pos, capacity)` marks sub-list of entries that are - /// considered connected. + /// Since the entries in `nodes` are ordered from least-recently connected + /// to most-recently connected, all entries above this index are also + /// considered connected, i.e. the range `[0, first_connected_pos)` + /// marks the sub-list of entries that are considered disconnected and + /// the range `[first_connected_pos, capacity)` marks sub-list of + /// entries that are considered connected. /// /// `None` indicates that there are no connected entries in the bucket, i.e. /// the bucket is either empty, or contains only entries for peers that are @@ -131,14 +132,15 @@ pub(crate) struct KBucket { pub(crate) enum InsertResult { /// The entry has been successfully inserted. Inserted, - /// The entry is pending insertion because the relevant bucket is currently full. - /// The entry is inserted after a timeout elapsed, if the status of the - /// least-recently connected (and currently disconnected) node in the bucket - /// is not updated before the timeout expires. + /// The entry is pending insertion because the relevant bucket is currently + /// full. The entry is inserted after a timeout elapsed, if the status + /// of the least-recently connected (and currently disconnected) node in + /// the bucket is not updated before the timeout expires. Pending { - /// The key of the least-recently connected entry that is currently considered - /// disconnected and whose corresponding peer should be checked for connectivity - /// in order to prevent it from being evicted. If connectivity to the peer is + /// The key of the least-recently connected entry that is currently + /// considered disconnected and whose corresponding peer should + /// be checked for connectivity in order to prevent it from + /// being evicted. If connectivity to the peer is /// re-established, the corresponding entry should be updated with /// [`NodeStatus::Connected`]. disconnected: TKey, @@ -191,7 +193,8 @@ where self.pending.as_ref() } - /// Returns a mutable reference to the pending node of the bucket, if there is any. + /// Returns a mutable reference to the pending node of the bucket, if there + /// is any. pub(crate) fn pending_mut(&mut self) -> Option<&mut PendingNode> { self.pending.as_mut() } @@ -203,7 +206,8 @@ where .filter(|p| p.node.key.as_ref() == key.as_ref()) } - /// Returns an iterator over the nodes in the bucket, together with their status. + /// Returns an iterator over the nodes in the bucket, together with their + /// status. pub(crate) fn iter(&self) -> impl Iterator, NodeStatus)> { self.nodes .iter() @@ -311,19 +315,20 @@ where /// /// The status of the node to insert determines the result as follows: /// - /// * `NodeStatus::Connected`: If the bucket is full and either all nodes are connected - /// or there is already a pending node, insertion fails with `InsertResult::Full`. - /// If the bucket is full but at least one node is disconnected and there is no pending - /// node, the new node is inserted as pending, yielding `InsertResult::Pending`. - /// Otherwise the bucket has free slots and the new node is added to the end of the + /// * `NodeStatus::Connected`: If the bucket is full and either all nodes + /// are connected or there is already a pending node, insertion fails + /// with `InsertResult::Full`. If the bucket is full but at least one + /// node is disconnected and there is no pending node, the new node is + /// inserted as pending, yielding `InsertResult::Pending`. Otherwise the + /// bucket has free slots and the new node is added to the end of the /// bucket as the most-recently connected node. /// - /// * `NodeStatus::Disconnected`: If the bucket is full, insertion fails with - /// `InsertResult::Full`. Otherwise the bucket has free slots and the new node - /// is inserted at the position preceding the first connected node, - /// i.e. as the most-recently disconnected node. If there are no connected nodes, - /// the new node is added as the last element of the bucket. - /// + /// * `NodeStatus::Disconnected`: If the bucket is full, insertion fails + /// with `InsertResult::Full`. Otherwise the bucket has free slots and + /// the new node is inserted at the position preceding the first + /// connected node, i.e. as the most-recently disconnected node. If + /// there are no connected nodes, the new node is added as the last + /// element of the bucket. pub(crate) fn insert( &mut self, node: Node, @@ -416,7 +421,8 @@ where self.first_connected_pos.map_or(0, |i| self.nodes.len() - i) } - /// Gets the number of entries in the bucket that are considered disconnected. + /// Gets the number of entries in the bucket that are considered + /// disconnected. #[cfg(test)] pub(crate) fn num_disconnected(&self) -> usize { self.nodes.len() - self.num_connected() @@ -443,10 +449,11 @@ where #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; use quickcheck::*; + use super::*; + impl Arbitrary for KBucket, ()> { fn arbitrary(g: &mut Gen) -> KBucket, ()> { let timeout = Duration::from_secs(g.gen_range(1..g.size()) as u64); @@ -552,7 +559,8 @@ mod tests { x => panic!("{x:?}"), } - // One-by-one fill the bucket with connected nodes, replacing the disconnected ones. + // One-by-one fill the bucket with connected nodes, replacing the disconnected + // ones. for i in 0..bucket.capacity { let (first, first_status) = bucket.iter().next().unwrap(); let first_disconnected = first.clone(); diff --git a/protocols/kad/src/kbucket/entry.rs b/protocols/kad/src/kbucket/entry.rs index 808db08d858..bdf8b9b5a18 100644 --- a/protocols/kad/src/kbucket/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -23,7 +23,6 @@ pub(crate) use super::bucket::{AppliedPending, InsertResult, Node, K_VALUE}; pub use super::key::*; - use super::*; /// An immutable by-reference view of a bucket entry. diff --git a/protocols/kad/src/kbucket/key.rs b/protocols/kad/src/kbucket/key.rs index f35849c6b26..3b59cef37d5 100644 --- a/protocols/kad/src/kbucket/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -18,15 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; -use sha2::digest::generic_array::{typenum::U32, GenericArray}; -use sha2::{Digest, Sha256}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; +use sha2::{ + digest::generic_array::{typenum::U32, GenericArray}, + Digest, + Sha256, +}; use uint::*; +use crate::record; + construct_uint! { /// 256-bit unsigned integer. pub(super) struct U256(4); @@ -37,8 +44,8 @@ construct_uint! { /// Keys in the DHT keyspace identify both the participating nodes, as well as /// the records stored in the DHT. /// -/// `Key`s have an XOR metric as defined in the Kademlia paper, i.e. the bitwise XOR of -/// the hash digests, interpreted as an integer. See [`Key::distance`]. +/// `Key`s have an XOR metric as defined in the Kademlia paper, i.e. the bitwise +/// XOR of the hash digests, interpreted as an integer. See [`Key::distance`]. #[derive(Clone, Copy, Debug)] pub struct Key { preimage: T, @@ -200,9 +207,10 @@ impl Distance { #[cfg(test)] mod tests { + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; impl Arbitrary for Key { fn arbitrary(_: &mut Gen) -> Key { diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index 060bfc518e4..070b02d78b1 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -50,44 +50,81 @@ mod proto { include!("generated/mod.rs"); pub use self::dht::pb::{ mod_Message::{ConnectionType, MessageType, Peer}, - Message, Record, + Message, + Record, }; } +use std::num::NonZeroUsize; + pub use addresses::Addresses; pub use behaviour::{ - AddProviderContext, AddProviderError, AddProviderOk, AddProviderPhase, AddProviderResult, - BootstrapError, BootstrapOk, BootstrapResult, GetClosestPeersError, GetClosestPeersOk, - GetClosestPeersResult, GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError, - GetRecordOk, GetRecordResult, InboundRequest, Mode, NoKnownPeers, PeerInfo, PeerRecord, - PutRecordContext, PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, - QueryMut, QueryRef, QueryResult, QueryStats, RoutingUpdate, -}; -pub use behaviour::{ - Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, + AddProviderContext, + AddProviderError, + AddProviderOk, + AddProviderPhase, + AddProviderResult, + Behaviour, + BootstrapError, + BootstrapOk, + BootstrapResult, + BucketInserts, + Caching, + Config, + Event, + GetClosestPeersError, + GetClosestPeersOk, + GetClosestPeersResult, + GetProvidersError, + GetProvidersOk, + GetProvidersResult, + GetRecordError, + GetRecordOk, + GetRecordResult, + InboundRequest, + Mode, + NoKnownPeers, + PeerInfo, + PeerRecord, + ProgressStep, + PutRecordContext, + PutRecordError, + PutRecordOk, + PutRecordPhase, + PutRecordResult, + QueryInfo, + QueryMut, + QueryRef, + QueryResult, + QueryStats, + Quorum, + RoutingUpdate, + StoreInserts, }; pub use kbucket::{ - Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, + Distance as KBucketDistance, + EntryView, + KBucketRef, + Key as KBucketKey, + NodeStatus, }; +use libp2p_swarm::StreamProtocol; pub use protocol::{ConnectionType, KadPeer}; pub use query::QueryId; pub use record::{store, Key as RecordKey, ProviderRecord, Record}; -use libp2p_swarm::StreamProtocol; -use std::num::NonZeroUsize; - /// The `k` parameter of the Kademlia specification. /// /// This parameter determines: /// /// 1) The (fixed) maximum number of nodes in a bucket. -/// 2) The (default) replication factor, which in turn determines: -/// a) The number of closer peers returned in response to a request. -/// b) The number of closest peers to a key to search for in an iterative query. +/// 2) The (default) replication factor, which in turn determines: a) The +/// number of closer peers returned in response to a request. b) The number +/// of closest peers to a key to search for in an iterative query. /// -/// The choice of (1) is fixed to this constant. The replication factor is configurable -/// but should generally be no greater than `K_VALUE`. All nodes in a Kademlia -/// DHT should agree on the choices made for (1) and (2). +/// The choice of (1) is fixed to this constant. The replication factor is +/// configurable but should generally be no greater than `K_VALUE`. All nodes in +/// a Kademlia DHT should agree on the choices made for (1) and (2). /// /// The current value is `20`. pub const K_VALUE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(20) }; @@ -104,6 +141,7 @@ pub const ALPHA_VALUE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(3) }; pub const PROTOCOL_NAME: StreamProtocol = protocol::DEFAULT_PROTO_NAME; -/// Constant shared across tests for the [`Multihash`](libp2p_core::multihash::Multihash) type. +/// Constant shared across tests for the +/// [`Multihash`](libp2p_core::multihash::Multihash) type. #[cfg(test)] const SHA_256_MH: u64 = 0x12; diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 9d2ef56f5d8..e131cc220e6 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -21,26 +21,30 @@ //! The Kademlia connection protocol upgrade and associated message types. //! //! The connection protocol upgrade is provided by [`ProtocolConfig`], with the -//! request and response types [`KadRequestMsg`] and [`KadResponseMsg`], respectively. -//! The upgrade's output is a `Sink + Stream` of messages. The `Stream` component is used -//! to poll the underlying transport for incoming messages, and the `Sink` component -//! is used to send messages to remote peers. +//! request and response types [`KadRequestMsg`] and [`KadResponseMsg`], +//! respectively. The upgrade's output is a `Sink + Stream` of messages. The +//! `Stream` component is used to poll the underlying transport for incoming +//! messages, and the `Sink` component is used to send messages to remote peers. + +use std::{io, iter, marker::PhantomData, time::Duration}; -use crate::proto; -use crate::record::{self, Record}; use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; use futures::prelude::*; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_core::Multiaddr; +use libp2p_core::{ + upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + Multiaddr, +}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use std::marker::PhantomData; -use std::time::Duration; -use std::{io, iter}; use tracing::debug; use web_time::Instant; +use crate::{ + proto, + record::{self, Record}, +}; + /// The protocol name used for negotiating with multistream-select. pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); /// The default maximum size for a varint length-delimited packet. @@ -87,7 +91,8 @@ impl From for proto::ConnectionType { pub struct KadPeer { /// Identifier of the peer. pub node_id: PeerId, - /// The multiaddresses that the sender think can be used in order to reach the peer. + /// The multiaddresses that the sender think can be used in order to reach + /// the peer. pub multiaddrs: Vec, /// How the sender is connected to that remote. pub connection_ty: ConnectionType, @@ -98,8 +103,8 @@ impl TryFrom for KadPeer { type Error = io::Error; fn try_from(peer: proto::Peer) -> Result { - // TODO: this is in fact a CID; not sure if this should be handled in `from_bytes` or - // as a special case here + // TODO: this is in fact a CID; not sure if this should be handled in + // `from_bytes` or as a special case here let node_id = PeerId::from_bytes(&peer.id).map_err(|_| invalid_data("invalid peer id"))?; let mut addrs = Vec::with_capacity(peer.addrs.len()); @@ -131,8 +136,9 @@ impl From for proto::Peer { } } -/// Configuration for a Kademlia connection upgrade. When applied to a connection, turns this -/// connection into a `Stream + Sink` whose items are of type `KadRequestMsg` and `KadResponseMsg`. +/// Configuration for a Kademlia connection upgrade. When applied to a +/// connection, turns this connection into a `Stream + Sink` whose items are of +/// type `KadRequestMsg` and `KadResponseMsg`. // TODO: if, as suspected, we can confirm with Protocol Labs that each open Kademlia substream does // only one request, then we can change the output of the `InboundUpgrade` and // `OutboundUpgrade` to be just a single message @@ -164,8 +170,8 @@ impl ProtocolConfig { &self.protocol_names } - /// Modifies the protocol names used on the wire. Can be used to create incompatibilities - /// between networks on purpose. + /// Modifies the protocol names used on the wire. Can be used to create + /// incompatibilities between networks on purpose. #[deprecated(note = "Use `ProtocolConfig::new` instead")] pub fn set_protocol_names(&mut self, names: Vec) { self.protocol_names = names; @@ -270,15 +276,15 @@ pub enum KadRequestMsg { /// Ping request. Ping, - /// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes - /// returned is not specified, but should be around 20. + /// Request for the list of nodes whose IDs are the closest to `key`. The + /// number of nodes returned is not specified, but should be around 20. FindNode { /// The key for which to locate the closest nodes. key: Vec, }, - /// Same as `FindNode`, but should also return the entries of the local providers list for - /// this key. + /// Same as `FindNode`, but should also return the entries of the local + /// providers list for this key. GetProviders { /// Identifier being searched. key: record::Key, @@ -364,7 +370,8 @@ impl TryFrom for KadResponseMsg { } } -/// Converts a `KadRequestMsg` into the corresponding protobuf message for sending. +/// Converts a `KadRequestMsg` into the corresponding protobuf message for +/// sending. fn req_msg_to_proto(kad_msg: KadRequestMsg) -> proto::Message { match kad_msg { KadRequestMsg::Ping => proto::Message { @@ -405,7 +412,8 @@ fn req_msg_to_proto(kad_msg: KadRequestMsg) -> proto::Message { } } -/// Converts a `KadResponseMsg` into the corresponding protobuf message for sending. +/// Converts a `KadResponseMsg` into the corresponding protobuf message for +/// sending. fn resp_msg_to_proto(kad_msg: KadResponseMsg) -> proto::Message { match kad_msg { KadResponseMsg::Pong => proto::Message { @@ -453,7 +461,8 @@ fn resp_msg_to_proto(kad_msg: KadResponseMsg) -> proto::Message { /// Converts a received protobuf message into a corresponding `KadRequestMsg`. /// -/// Fails if the protobuf message is not a valid and supported Kademlia request message. +/// Fails if the protobuf message is not a valid and supported Kademlia request +/// message. fn proto_to_req_msg(message: proto::Message) -> Result { match message.type_pb { proto::MessageType::PING => Ok(KadRequestMsg::Ping), @@ -487,9 +496,11 @@ fn proto_to_req_msg(message: proto::Message) -> Result } } -/// Converts a received protobuf message into a corresponding `KadResponseMessage`. +/// Converts a received protobuf message into a corresponding +/// `KadResponseMessage`. /// -/// Fails if the protobuf message is not a valid and supported Kademlia response message. +/// Fails if the protobuf message is not a valid and supported Kademlia response +/// message. fn proto_to_resp_msg(message: proto::Message) -> Result { match message.type_pb { proto::MessageType::PING => Ok(KadResponseMsg::Pong), @@ -667,92 +678,92 @@ mod tests { assert_eq!(peer.multiaddrs, vec![valid_multiaddr]) } - /*// TODO: restore - use self::libp2p_tcp::TcpTransport; - use self::tokio::runtime::current_thread::Runtime; - use futures::{Future, Sink, Stream}; - use libp2p_core::{PeerId, PublicKey, Transport}; - use multihash::{encode, Hash}; - use protocol::{ConnectionType, KadPeer, ProtocolConfig}; - use std::sync::mpsc; - use std::thread; - - #[test] - fn correct_transfer() { - // We open a server and a client, send a message between the two, and check that they were - // successfully received. - - test_one(KadMsg::Ping); - test_one(KadMsg::FindNodeReq { - key: PeerId::random(), - }); - test_one(KadMsg::FindNodeRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - }); - test_one(KadMsg::GetProvidersReq { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - }); - test_one(KadMsg::GetProvidersRes { - closer_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }], - provider_peers: vec![KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], - connection_ty: ConnectionType::NotConnected, - }], - }); - test_one(KadMsg::AddProvider { - key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), - provider_peer: KadPeer { - node_id: PeerId::random(), - multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], - connection_ty: ConnectionType::Connected, - }, - }); - // TODO: all messages - - fn test_one(msg_server: KadMsg) { - let msg_client = msg_server.clone(); - let (tx, rx) = mpsc::channel(); - - let bg_thread = thread::spawn(move || { - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let (listener, addr) = transport - .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - tx.send(addr).unwrap(); - - let future = listener - .into_future() - .map_err(|(err, _)| err) - .and_then(|(client, _)| client.unwrap().0) - .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, _)| v)) - .map(|recv_msg| { - assert_eq!(recv_msg.unwrap(), msg_server); - () - }); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - }); - - let transport = TcpTransport::default().with_upgrade(ProtocolConfig); - - let future = transport - .dial(rx.recv().unwrap()) - .unwrap() - .and_then(|proto| proto.send(msg_client)) - .map(|_| ()); - let mut rt = Runtime::new().unwrap(); - let _ = rt.block_on(future).unwrap(); - bg_thread.join().unwrap(); - } - }*/ + // // TODO: restore + // use self::libp2p_tcp::TcpTransport; + // use self::tokio::runtime::current_thread::Runtime; + // use futures::{Future, Sink, Stream}; + // use libp2p_core::{PeerId, PublicKey, Transport}; + // use multihash::{encode, Hash}; + // use protocol::{ConnectionType, KadPeer, ProtocolConfig}; + // use std::sync::mpsc; + // use std::thread; + // + // #[test] + // fn correct_transfer() { + // We open a server and a client, send a message between the two, and check + // that they were successfully received. + // + // test_one(KadMsg::Ping); + // test_one(KadMsg::FindNodeReq { + // key: PeerId::random(), + // }); + // test_one(KadMsg::FindNodeRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // }); + // test_one(KadMsg::GetProvidersReq { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // }); + // test_one(KadMsg::GetProvidersRes { + // closer_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }], + // provider_peers: vec![KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], + // connection_ty: ConnectionType::NotConnected, + // }], + // }); + // test_one(KadMsg::AddProvider { + // key: encode(Hash::SHA2256, &[9, 12, 0, 245, 245, 201, 28, 95]).unwrap(), + // provider_peer: KadPeer { + // node_id: PeerId::random(), + // multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], + // connection_ty: ConnectionType::Connected, + // }, + // }); + // TODO: all messages + // + // fn test_one(msg_server: KadMsg) { + // let msg_client = msg_server.clone(); + // let (tx, rx) = mpsc::channel(); + // + // let bg_thread = thread::spawn(move || { + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let (listener, addr) = transport + // .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) + // .unwrap(); + // tx.send(addr).unwrap(); + // + // let future = listener + // .into_future() + // .map_err(|(err, _)| err) + // .and_then(|(client, _)| client.unwrap().0) + // .and_then(|proto| proto.into_future().map_err(|(err, _)| err).map(|(v, + // _)| v)) .map(|recv_msg| { + // assert_eq!(recv_msg.unwrap(), msg_server); + // () + // }); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // }); + // + // let transport = TcpTransport::default().with_upgrade(ProtocolConfig); + // + // let future = transport + // .dial(rx.recv().unwrap()) + // .unwrap() + // .and_then(|proto| proto.send(msg_client)) + // .map(|_| ()); + // let mut rt = Runtime::new().unwrap(); + // let _ = rt.block_on(future).unwrap(); + // bg_thread.join().unwrap(); + // } + // } } diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index 1a895d9627c..bc8a1396921 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -20,25 +20,31 @@ mod peers; -use libp2p_core::Multiaddr; -use peers::closest::{ - disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig, -}; -use peers::fixed::FixedPeersIter; -use peers::PeersIterState; -use smallvec::SmallVec; +use std::{num::NonZeroUsize, time::Duration}; -use crate::behaviour::PeerInfo; -use crate::handler::HandlerIn; -use crate::kbucket::{Key, KeyBytes}; -use crate::{QueryInfo, ALPHA_VALUE, K_VALUE}; use either::Either; use fnv::FnvHashMap; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use std::{num::NonZeroUsize, time::Duration}; +use peers::{ + closest::{disjoint::ClosestDisjointPeersIter, ClosestPeersIter, ClosestPeersIterConfig}, + fixed::FixedPeersIter, + PeersIterState, +}; +use smallvec::SmallVec; use web_time::Instant; -/// A `QueryPool` provides an aggregate state machine for driving `Query`s to completion. +use crate::{ + behaviour::PeerInfo, + handler::HandlerIn, + kbucket::{Key, KeyBytes}, + QueryInfo, + ALPHA_VALUE, + K_VALUE, +}; + +/// A `QueryPool` provides an aggregate state machine for driving `Query`s to +/// completion. /// /// Internally, a `Query` is in turn driven by an underlying `QueryPeerIter` /// that determines the peer selection strategy, i.e. the order in which the @@ -116,7 +122,8 @@ impl QueryPool { self.queries.insert(id, query); } - /// Adds a query to the pool that iterates towards the closest peers to the target. + /// Adds a query to the pool that iterates towards the closest peers to the + /// target. pub(crate) fn add_iter_closest(&mut self, target: T, peers: I, info: QueryInfo) -> QueryId where T: Into + Clone, @@ -127,7 +134,8 @@ impl QueryPool { id } - /// Adds a query to the pool that iterates towards the closest peers to the target. + /// Adds a query to the pool that iterates towards the closest peers to the + /// target. pub(crate) fn continue_iter_closest( &mut self, id: QueryId, @@ -175,7 +183,8 @@ impl QueryPool { self.queries.get(id) } - /// Returns a mutablereference to a query with the given ID, if it is in the pool. + /// Returns a mutablereference to a query with the given ID, if it is in the + /// pool. pub(crate) fn get_mut(&mut self, id: &QueryId) -> Option<&mut Query> { self.queries.get_mut(id) } @@ -288,7 +297,8 @@ pub(crate) struct Query { /// A map of pending requests to peers. /// /// A request is pending if the targeted peer is not currently connected - /// and these requests are sent as soon as a connection to the peer is established. + /// and these requests are sent as soon as a connection to the peer is + /// established. pub(crate) pending_rpcs: SmallVec<[(PeerId, HandlerIn); K_VALUE.get()]>, } @@ -301,7 +311,8 @@ pub(crate) struct QueryPeers { } impl QueryPeers { - /// Consumes the peers iterator, producing a final `Iterator` over the discovered `PeerId`s. + /// Consumes the peers iterator, producing a final `Iterator` over the + /// discovered `PeerId`s. pub(crate) fn into_peerids_iter(self) -> impl Iterator { match self.peer_iter { QueryPeerIter::Closest(iter) => Either::Left(Either::Left(iter.into_result())), @@ -310,8 +321,8 @@ impl QueryPeers { } } - /// Consumes the peers iterator, producing a final `Iterator` over the discovered `PeerId`s - /// with their matching `Multiaddr`s. + /// Consumes the peers iterator, producing a final `Iterator` over the + /// discovered `PeerId`s with their matching `Multiaddr`s. pub(crate) fn into_peerinfos_iter(mut self) -> impl Iterator { match self.peer_iter { QueryPeerIter::Closest(iter) => Either::Left(Either::Left(iter.into_result())), @@ -437,8 +448,9 @@ impl Query { /// Finishes the query prematurely. /// - /// A finished query immediately stops yielding new peers to contact and will be - /// reported by [`QueryPool::poll`] via [`QueryPoolState::Finished`]. + /// A finished query immediately stops yielding new peers to contact and + /// will be reported by [`QueryPool::poll`] via + /// [`QueryPoolState::Finished`]. pub(crate) fn finish(&mut self) { match &mut self.peers.peer_iter { QueryPeerIter::Closest(iter) => iter.finish(), diff --git a/protocols/kad/src/query/peers.rs b/protocols/kad/src/query/peers.rs index 11b8f974de9..12ee040e88e 100644 --- a/protocols/kad/src/query/peers.rs +++ b/protocols/kad/src/query/peers.rs @@ -18,7 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Peer selection strategies for queries in the form of iterator-like state machines. +//! Peer selection strategies for queries in the form of iterator-like state +//! machines. //! //! Using a peer iterator in a query involves performing the following steps //! repeatedly and in an alternating fashion: @@ -28,8 +29,8 @@ //! waiting for responses. //! //! 2. When responses are received or requests fail, providing input to the -//! iterator via the `on_success` and `on_failure` callbacks, -//! respectively, followed by repeating step (1). +//! iterator via the `on_success` and `on_failure` callbacks, respectively, +//! followed by repeating step (1). //! //! When a call to `next` returns [`Finished`], no more peers can be obtained //! from the iterator and the results can be obtained from `into_result`. @@ -40,9 +41,10 @@ pub(crate) mod closest; pub(crate) mod fixed; -use libp2p_identity::PeerId; use std::borrow::Cow; +use libp2p_identity::PeerId; + /// The state of a peer iterator. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PeersIterState<'a> { @@ -52,9 +54,9 @@ pub enum PeersIterState<'a> { /// from `peer`, in addition to any other peers for which it is already /// waiting for results. /// - /// `None` indicates that the iterator is waiting for results and there is no - /// new peer to contact, despite the iterator not being at capacity w.r.t. - /// the permitted parallelism. + /// `None` indicates that the iterator is waiting for results and there is + /// no new peer to contact, despite the iterator not being at capacity + /// w.r.t. the permitted parallelism. Waiting(Option>), /// The iterator is waiting for results and is at capacity w.r.t. the diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 2505ee2e9b2..12c4255f8c7 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -18,17 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::btree_map::{BTreeMap, Entry}, + num::NonZeroUsize, + time::Duration, +}; -use crate::kbucket::{Distance, Key, KeyBytes}; -use crate::{ALPHA_VALUE, K_VALUE}; -use std::collections::btree_map::{BTreeMap, Entry}; -use std::{num::NonZeroUsize, time::Duration}; use web_time::Instant; +use super::*; +use crate::{ + kbucket::{Distance, Key, KeyBytes}, + ALPHA_VALUE, + K_VALUE, +}; + pub(crate) mod disjoint; -/// A peer iterator for a dynamically changing list of peers, sorted by increasing -/// distance to a chosen target. +/// A peer iterator for a dynamically changing list of peers, sorted by +/// increasing distance to a chosen target. #[derive(Debug, Clone)] pub struct ClosestPeersIter { config: ClosestPeersIterConfig, @@ -43,7 +50,8 @@ pub struct ClosestPeersIter { /// The closest peers to the target, ordered by increasing distance. closest_peers: BTreeMap, - /// The number of peers for which the iterator is currently waiting for results. + /// The number of peers for which the iterator is currently waiting for + /// results. num_waiting: usize, } @@ -52,23 +60,24 @@ pub struct ClosestPeersIter { pub struct ClosestPeersIterConfig { /// Allowed level of parallelism. /// - /// The `α` parameter in the Kademlia paper. The maximum number of peers that - /// the iterator is allowed to wait for in parallel while iterating towards the closest - /// nodes to a target. Defaults to `ALPHA_VALUE`. + /// The `α` parameter in the Kademlia paper. The maximum number of peers + /// that the iterator is allowed to wait for in parallel while iterating + /// towards the closest nodes to a target. Defaults to `ALPHA_VALUE`. pub parallelism: NonZeroUsize, /// Number of results (closest peers) to search for. /// - /// The number of closest peers for which the iterator must obtain successful results - /// in order to finish successfully. Defaults to `K_VALUE`. + /// The number of closest peers for which the iterator must obtain + /// successful results in order to finish successfully. Defaults to + /// `K_VALUE`. pub num_results: NonZeroUsize, /// The timeout for a single peer. /// /// If a successful result is not reported for a peer within this timeout - /// window, the iterator considers the peer unresponsive and will not wait for - /// the peer when evaluating the termination conditions, until and unless a - /// result is delivered. Defaults to `10` seconds. + /// window, the iterator considers the peer unresponsive and will not wait + /// for the peer when evaluating the termination conditions, until and + /// unless a result is delivered. Defaults to `10` seconds. pub peer_timeout: Duration, } @@ -133,11 +142,12 @@ impl ClosestPeersIter { /// Callback for delivering the result of a successful request to a peer. /// - /// Delivering results of requests back to the iterator allows the iterator to make - /// progress. The iterator is said to make progress either when the given - /// `closer_peers` contain a peer closer to the target than any peer seen so far, - /// or when the iterator did not yet accumulate `num_results` closest peers and - /// `closer_peers` contains a new peer, regardless of its distance to the target. + /// Delivering results of requests back to the iterator allows the iterator + /// to make progress. The iterator is said to make progress either when + /// the given `closer_peers` contain a peer closer to the target than + /// any peer seen so far, or when the iterator did not yet accumulate + /// `num_results` closest peers and `closer_peers` contains a new peer, + /// regardless of its distance to the target. /// /// If the iterator is currently waiting for a result from `peer`, /// the iterator state is updated and `true` is returned. In that @@ -289,12 +299,14 @@ impl ClosestPeersIter { self.num_waiting } - /// Returns true if the iterator is waiting for a response from the given peer. + /// Returns true if the iterator is waiting for a response from the given + /// peer. pub fn is_waiting(&self, peer: &PeerId) -> bool { self.waiting().any(|p| peer == p) } - /// Advances the state of the iterator, potentially getting a new peer to contact. + /// Advances the state of the iterator, potentially getting a new peer to + /// contact. pub fn next(&mut self, now: Instant) -> PeersIterState<'_> { if let State::Finished = self.state { return PeersIterState::Finished; @@ -423,9 +435,9 @@ impl ClosestPeersIter { /// Internal state of the iterator. #[derive(Debug, PartialEq, Eq, Copy, Clone)] enum State { - /// The iterator is making progress by iterating towards `num_results` closest - /// peers to the target with a maximum of `parallelism` peers for which the - /// iterator is waiting for results at a time. + /// The iterator is making progress by iterating towards `num_results` + /// closest peers to the target with a maximum of `parallelism` peers + /// for which the iterator is waiting for results at a time. /// /// > **Note**: When the iterator switches back to `Iterating` after being /// > `Stalled`, it may temporarily be waiting for more than `parallelism` @@ -442,18 +454,19 @@ enum State { /// A iterator is stalled when it did not make progress after `parallelism` /// consecutive successful results (see `on_success`). /// - /// While the iterator is stalled, the maximum allowed parallelism for pending - /// results is increased to `num_results` in an attempt to finish the iterator. - /// If the iterator can make progress again upon receiving the remaining - /// results, it switches back to `Iterating`. Otherwise it will be finished. + /// While the iterator is stalled, the maximum allowed parallelism for + /// pending results is increased to `num_results` in an attempt to + /// finish the iterator. If the iterator can make progress again upon + /// receiving the remaining results, it switches back to `Iterating`. + /// Otherwise it will be finished. Stalled, /// The iterator is finished. /// /// A iterator finishes either when it has collected `num_results` results - /// from the closest peers (not counting those that failed or are unresponsive) - /// or because the iterator ran out of peers that have not yet delivered - /// results (or failed). + /// from the closest peers (not counting those that failed or are + /// unresponsive) or because the iterator ran out of peers that have not + /// yet delivered results (or failed). Finished, } @@ -494,12 +507,14 @@ enum PeerState { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; + use std::iter; + use libp2p_core::multihash::Multihash; use quickcheck::*; use rand::{rngs::StdRng, Rng, SeedableRng}; - use std::iter; + + use super::*; + use crate::SHA_256_MH; fn random_peers(n: usize, g: &mut R) -> Vec { (0..n) @@ -816,8 +831,8 @@ mod tests { iter.num_waiting = i; assert!( !iter.at_capacity(), - "Iterator should not be at capacity if less than \ - `max(parallelism, num_results)` requests are waiting.", + "Iterator should not be at capacity if less than `max(parallelism, \ + num_results)` requests are waiting.", ) } diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index cafe87b6ef4..e741af62860 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -18,13 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; use std::{ collections::HashMap, iter::{Cycle, Map, Peekable}, ops::{Index, IndexMut, Range}, }; +use super::*; + /// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery /// path per configured parallelism according to the S/Kademlia paper. pub(crate) struct ClosestDisjointPeersIter { @@ -373,7 +374,6 @@ enum ResponseState { /// Iterator combining the result of multiple [`ClosestPeersIter`] into a single /// deduplicated ordered iterator. -// // Note: This operates under the assumption that `I` is ordered. #[derive(Clone, Debug)] struct ResultIter @@ -433,13 +433,13 @@ impl>> Iterator for ResultIter { #[cfg(test)] mod tests { - use super::*; + use std::{collections::HashSet, iter}; - use crate::SHA_256_MH; use libp2p_core::multihash::Multihash; use quickcheck::*; - use std::collections::HashSet; - use std::iter; + + use super::*; + use crate::SHA_256_MH; impl Arbitrary for ResultIter>> { fn arbitrary(g: &mut Gen) -> Self { @@ -834,7 +834,8 @@ mod tests { } } - /// Ensure [`ClosestPeersIter`] and [`ClosestDisjointPeersIter`] yield same closest peers. + /// Ensure [`ClosestPeersIter`] and [`ClosestDisjointPeersIter`] yield same + /// closest peers. #[test] fn closest_and_disjoint_closest_yield_same_result() { fn prop( @@ -895,21 +896,19 @@ mod tests { assert!( closest.len() == num_results.0.get(), - "Expected `ClosestPeersIter` to find `num_results` closest \ - peers." + "Expected `ClosestPeersIter` to find `num_results` closest peers." ); assert!( disjoint.len() >= num_results.0.get(), - "Expected `ClosestDisjointPeersIter` to find at least \ - `num_results` closest peers." + "Expected `ClosestDisjointPeersIter` to find at least `num_results` closest peers." ); if closest.len() > disjoint.len() { let closest_only = closest.difference(&disjoint).collect::>(); panic!( - "Expected `ClosestDisjointPeersIter` to find all peers \ - found by `ClosestPeersIter`, but it did not find {closest_only:?}.", + "Expected `ClosestDisjointPeersIter` to find all peers found by \ + `ClosestPeersIter`, but it did not find {closest_only:?}.", ); }; diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index 2d0b312454d..74d42edc201 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; use fnv::FnvHashMap; -use std::{collections::hash_map::Entry, num::NonZeroUsize, vec}; + +use super::*; /// A peer iterator for a fixed set of peers. pub(crate) struct FixedPeersIter { @@ -49,7 +50,8 @@ enum PeerState { /// The iterator is waiting for a result to be reported back for the peer. Waiting, - /// The iterator has been informed that the attempt to contact the peer failed. + /// The iterator has been informed that the attempt to contact the peer + /// failed. Failed, /// The iterator has been informed of a successful result from the peer. @@ -189,9 +191,8 @@ mod test { match iter.next() { PeersIterState::Waiting(Some(_)) => {} PeersIterState::WaitingAtCapacity => panic!( - "Expected iterator to return another peer given that the \ - previous `on_failure` call should have allowed another peer \ - to be queried.", + "Expected iterator to return another peer given that the previous `on_failure` \ + call should have allowed another peer to be queried.", ), _ => panic!("Expected iterator to yield peer."), } diff --git a/protocols/kad/src/record.rs b/protocols/kad/src/record.rs index cb7c4b866fc..79b5029cd1c 100644 --- a/protocols/kad/src/record.rs +++ b/protocols/kad/src/record.rs @@ -22,13 +22,16 @@ pub mod store; +use std::{ + borrow::Borrow, + hash::{Hash, Hasher}, +}; + use bytes::Bytes; use libp2p_core::{multihash::Multihash, Multiaddr}; use libp2p_identity::PeerId; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; -use std::hash::{Hash, Hasher}; use web_time::Instant; /// The (opaque) key of a record. @@ -152,7 +155,8 @@ impl ProviderRecord { } } - /// Checks whether the provider record is expired w.r.t. the given `Instant`. + /// Checks whether the provider record is expired w.r.t. the given + /// `Instant`. pub fn is_expired(&self, now: Instant) -> bool { self.expires.map_or(false, |t| now >= t) } @@ -160,10 +164,12 @@ impl ProviderRecord { #[cfg(test)] mod tests { + use std::time::Duration; + + use quickcheck::*; + use super::*; use crate::SHA_256_MH; - use quickcheck::*; - use std::time::Duration; impl Arbitrary for Key { fn arbitrary(g: &mut Gen) -> Key { diff --git a/protocols/kad/src/record/store.rs b/protocols/kad/src/record/store.rs index 5c25bc8b2fa..846a63cc789 100644 --- a/protocols/kad/src/record/store.rs +++ b/protocols/kad/src/record/store.rs @@ -20,12 +20,13 @@ mod memory; +use std::borrow::Cow; + pub use memory::{MemoryStore, MemoryStoreConfig}; use thiserror::Error; use super::*; use crate::K_VALUE; -use std::borrow::Cow; /// The result of an operation on a `RecordStore`. pub type Result = std::result::Result; @@ -37,7 +38,8 @@ pub enum Error { #[error("the store cannot contain any more records")] MaxRecords, - /// The store is at capacity w.r.t. the total number of stored provider records. + /// The store is at capacity w.r.t. the total number of stored provider + /// records. #[error("the store cannot contain any more provider records")] MaxProvidedKeys, @@ -51,19 +53,18 @@ pub enum Error { /// There are two types of records managed by a `RecordStore`: /// /// 1. Regular (value-)records. These records store an arbitrary value -/// associated with a key which is distributed to the closest nodes -/// to the key in the Kademlia DHT as per the standard Kademlia "push-model". -/// These records are subject to re-replication and re-publication as -/// per the standard Kademlia protocol. +/// associated with a key which is distributed to the closest nodes to the +/// key in the Kademlia DHT as per the standard Kademlia "push-model". +/// These records are subject to re-replication and re-publication as per +/// the standard Kademlia protocol. /// /// 2. Provider records. These records associate the ID of a peer with a key -/// who can supposedly provide the associated value. These records are -/// mere "pointers" to the data which may be followed by contacting these -/// providers to obtain the value. These records are specific to the -/// libp2p Kademlia specification and realise a "pull-model" for distributed +/// who can supposedly provide the associated value. These records are mere +/// "pointers" to the data which may be followed by contacting these +/// providers to obtain the value. These records are specific to the libp2p +/// Kademlia specification and realise a "pull-model" for distributed /// content. Just like a regular record, a provider record is distributed /// to the closest nodes to the key. -/// pub trait RecordStore { type RecordsIter<'a>: Iterator> where diff --git a/protocols/kad/src/record/store/memory.rs b/protocols/kad/src/record/store/memory.rs index 3fb6d2be3e8..28f6a55044f 100644 --- a/protocols/kad/src/record/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -18,12 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::*; +use std::{ + collections::{hash_map, hash_set, HashMap, HashSet}, + iter, +}; -use crate::kbucket; use smallvec::SmallVec; -use std::collections::{hash_map, hash_set, HashMap, HashSet}; -use std::iter; + +use super::*; +use crate::kbucket; /// In-memory implementation of a `RecordStore`. pub struct MemoryStore { @@ -208,11 +211,12 @@ impl RecordStore for MemoryStore { #[cfg(test)] mod tests { - use super::*; - use crate::SHA_256_MH; use quickcheck::*; use rand::Rng; + use super::*; + use crate::SHA_256_MH; + fn random_multihash() -> Multihash<64> { Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap() } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 2c8d11beac7..7a938ef4df7 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -1,7 +1,6 @@ use libp2p_identify as identify; use libp2p_identity as identity; -use libp2p_kad::store::MemoryStore; -use libp2p_kad::{Behaviour, Config, Event, Mode}; +use libp2p_kad::{store::MemoryStore, Behaviour, Config, Event, Mode}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use tracing_subscriber::EnvFilter; @@ -101,10 +100,13 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti other => panic!("Unexpected events: {other:?}"), } - // Server learns its external address (this could be through AutoNAT or some other mechanism). + // Server learns its external address (this could be through AutoNAT or some + // other mechanism). server.add_external_address(memory_addr); - // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server. + // The server reconfigured its connection to the client to be in server mode, + // pushes that information to client which as a result updates its routing table + // and triggers a mode change to Mode::Server. match libp2p_swarm_test::drive(&mut client, &mut server).await { ( [Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })], diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index cecd27bf78b..17cde73a390 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -22,25 +22,42 @@ mod iface; mod socket; mod timer; -use self::iface::InterfaceState; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{Stream, StreamExt}; +use std::{ + cmp, + collections::hash_map::{Entry, HashMap}, + fmt, + future::Future, + io, + net::IpAddr, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll}, + time::Instant, +}; + +use futures::{channel::mpsc, Stream, StreamExt}; use if_watch::IfEvent; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, + dummy, + ConnectionDenied, + ConnectionId, + ListenAddresses, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use smallvec::SmallVec; -use std::collections::hash_map::{Entry, HashMap}; -use std::future::Future; -use std::sync::{Arc, RwLock}; -use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; + +use self::iface::InterfaceState; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; /// An abstraction to allow for compatibility with various async runtimes. pub trait Provider: 'static { @@ -68,11 +85,13 @@ pub trait Abort { /// The type of a [`Behaviour`] using the `async-io` implementation. #[cfg(feature = "async-io")] pub mod async_io { - use super::Provider; - use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; + use std::future::Future; + use async_std::task::JoinHandle; use if_watch::smol::IfWatcher; - use std::future::Future; + + use super::Provider; + use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; #[doc(hidden)] pub enum AsyncIo {} @@ -104,12 +123,14 @@ pub mod async_io { /// The type of a [`Behaviour`] using the `tokio` implementation. #[cfg(feature = "tokio")] pub mod tokio { - use super::Provider; - use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; - use if_watch::tokio::IfWatcher; use std::future::Future; + + use if_watch::tokio::IfWatcher; use tokio::task::JoinHandle; + use super::Provider; + use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; + #[doc(hidden)] pub enum Tokio {} @@ -137,8 +158,8 @@ pub mod tokio { pub type Behaviour = super::Behaviour; } -/// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local network and adds -/// them to the topology. +/// A `NetworkBehaviour` for mDNS. Automatically discovers peers on the local +/// network and adds them to the topology. #[derive(Debug)] pub struct Behaviour

where @@ -156,13 +177,15 @@ where query_response_receiver: mpsc::Receiver<(PeerId, Multiaddr, Instant)>, query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, - /// List of nodes that we have discovered, the address, and when their TTL expires. + /// List of nodes that we have discovered, the address, and when their TTL + /// expires. /// - /// Each combination of `PeerId` and `Multiaddr` can only appear once, but the same `PeerId` - /// can appear multiple times. + /// Each combination of `PeerId` and `Multiaddr` can only appear once, but + /// the same `PeerId` can appear multiple times. discovered_nodes: SmallVec<[(PeerId, Multiaddr, Instant); 8]>, - /// Future that fires when the TTL of at least one node in `discovered_nodes` expires. + /// Future that fires when the TTL of at least one node in + /// `discovered_nodes` expires. /// /// `None` if `discovered_nodes` is empty. closest_expiration: Option, @@ -170,7 +193,8 @@ where /// The current set of listen addresses. /// /// This is shared across all interface tasks using an [`RwLock`]. - /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. + /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as + /// [`InterfaceState`]s read from it to answer inbound mDNS queries. listen_addresses: Arc>, local_peer_id: PeerId, @@ -197,13 +221,15 @@ where }) } - /// Returns true if the given `PeerId` is in the list of nodes discovered through mDNS. + /// Returns true if the given `PeerId` is in the list of nodes discovered + /// through mDNS. #[deprecated(note = "Use `discovered_nodes` iterator instead.")] pub fn has_node(&self, peer_id: &PeerId) -> bool { self.discovered_nodes().any(|p| p == peer_id) } - /// Returns the list of nodes that we have discovered through mDNS and that are not expired. + /// Returns the list of nodes that we have discovered through mDNS and that + /// are not expired. pub fn discovered_nodes(&self) -> impl ExactSizeIterator { self.discovered_nodes.iter().map(|(p, _, _)| p) } @@ -388,7 +414,8 @@ pub enum Event { /// The given combinations of `PeerId` and `Multiaddr` have expired. /// - /// Each discovered record has a time-to-live. When this TTL expires and the address hasn't - /// been refreshed, we remove it from the list and emit it as an `Expired` event. + /// Each discovered record has a time-to-live. When this TTL expires and the + /// address hasn't been refreshed, we remove it from the list and emit + /// it as an `Expired` event. Expired(Vec<(PeerId, Multiaddr)>), } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 9302065cde2..477ecc8a095 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -21,27 +21,32 @@ mod dns; mod query; -use self::dns::{build_query, build_query_response, build_service_discovery_response}; -use self::query::MdnsPacket; -use crate::behaviour::{socket::AsyncSocket, timer::Builder}; -use crate::Config; -use futures::channel::mpsc; -use futures::{SinkExt, StreamExt}; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use libp2p_swarm::ListenAddresses; -use socket2::{Domain, Socket, Type}; -use std::future::Future; -use std::sync::{Arc, RwLock}; use std::{ collections::VecDeque, + future::Future, io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, pin::Pin, + sync::{Arc, RwLock}, task::{Context, Poll}, time::{Duration, Instant}, }; +use futures::{channel::mpsc, SinkExt, StreamExt}; +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::ListenAddresses; +use socket2::{Domain, Socket, Type}; + +use self::{ + dns::{build_query, build_query_response, build_service_discovery_response}, + query::MdnsPacket, +}; +use crate::{ + behaviour::{socket::AsyncSocket, timer::Builder}, + Config, +}; + /// Initial interval for starting probe const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500); @@ -66,8 +71,9 @@ impl ProbeState { } } -/// An mDNS instance for a networking interface. To discover all peers when having multiple -/// interfaces an [`InterfaceState`] is required for each interface. +/// An mDNS instance for a networking interface. To discover all peers when +/// having multiple interfaces an [`InterfaceState`] is required for each +/// interface. #[derive(Debug)] pub(crate) struct InterfaceState { /// Address this instance is bound to. @@ -82,11 +88,12 @@ pub(crate) struct InterfaceState { query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, /// Buffer used for receiving data from the main socket. - /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 - /// bytes, if it can be ensured that all participating devices can handle such large packets. - /// For computers with several interfaces and IP addresses responses can easily reach sizes in - /// the range of 3000 bytes, so 4096 seems sensible for now. For more information see - /// [rfc6762](https://tools.ietf.org/html/rfc6762#page-46). + /// RFC6762 discourages packets larger than the interface MTU, but allows + /// sizes of up to 9000 bytes, if it can be ensured that all + /// participating devices can handle such large packets. For computers + /// with several interfaces and IP addresses responses can easily reach + /// sizes in the range of 3000 bytes, so 4096 seems sensible for now. + /// For more information see [rfc6762](https://tools.ietf.org/html/rfc6762#page-46). recv_buffer: [u8; 4096], /// Buffers pending to send on the main socket. send_buffer: VecDeque>, diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 39dbf08c731..26fb7739c07 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -20,12 +20,13 @@ //! (M)DNS encoding and decoding on top of the `dns_parser` library. -use crate::{META_QUERY_SERVICE, SERVICE_NAME}; +use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; + use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use std::{borrow::Cow, cmp, error, fmt, str, time::Duration}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; + +use crate::{META_QUERY_SERVICE, SERVICE_NAME}; /// DNS TXT records can have up to 255 characters as a single string value. /// @@ -48,7 +49,8 @@ const MAX_RECORDS_PER_PACKET: usize = (MAX_PACKET_SIZE - 100) / MAX_TXT_RECORD_S /// An encoded MDNS packet. pub(crate) type MdnsPacket = Vec; -/// Decodes a `` (as defined by RFC1035) into a `Vec` of ASCII characters. +/// Decodes a `` (as defined by RFC1035) into a `Vec` of ASCII +/// characters. // TODO: better error type? pub(crate) fn decode_character_string(mut from: &[u8]) -> Result, ()> { if from.is_empty() { @@ -290,10 +292,9 @@ fn generate_peer_name() -> Vec { /// /// # Panic /// -/// Panics if `name` has a zero-length component or a component that is too long. -/// This is fine considering that this function is not public and is only called in a controlled -/// environment. -/// +/// Panics if `name` has a zero-length component or a component that is too +/// long. This is fine considering that this function is not public and is only +/// called in a controlled environment. fn append_qname(out: &mut Vec, name: &[u8]) { debug_assert!(name.is_ascii()); @@ -394,10 +395,11 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { - use super::*; use hickory_proto::op::Message; use libp2p_identity as identity; + use super::*; + #[test] fn build_query_correct() { let query = build_query(); diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 70b84816d0f..d724f944e81 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -18,18 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::dns; -use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use std::{ + fmt, + net::SocketAddr, + str, + time::{Duration, Instant}, +}; + use hickory_proto::{ op::Message, rr::{Name, RData}, }; use libp2p_core::multiaddr::{Multiaddr, Protocol}; +use libp2p_identity::PeerId; use libp2p_swarm::_address_translation; -use libp2p_identity::PeerId; -use std::time::Instant; -use std::{fmt, net::SocketAddr, str, time::Duration}; +use super::dns; +use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -69,7 +74,8 @@ impl MdnsPacket { .iter() .any(|q| q.name().to_utf8() == META_QUERY_SERVICE_FQDN) { - // TODO: what if multiple questions, one with SERVICE_NAME and one with META_QUERY_SERVICE? + // TODO: what if multiple questions, one with SERVICE_NAME and one with + // META_QUERY_SERVICE? return Ok(Some(MdnsPacket::ServiceDiscovery(MdnsServiceDiscovery { from, query_id: packet.header().id(), @@ -84,7 +90,8 @@ impl MdnsPacket { pub(crate) struct MdnsQuery { /// Sender of the address. from: SocketAddr, - /// Id of the received DNS query. We need to pass this ID back in the results. + /// Id of the received DNS query. We need to pass this ID back in the + /// results. query_id: u16, } @@ -113,7 +120,8 @@ impl fmt::Debug for MdnsQuery { pub(crate) struct MdnsServiceDiscovery { /// Sender of the address. from: SocketAddr, - /// Id of the received DNS query. We need to pass this ID back in the results. + /// Id of the received DNS query. We need to pass this ID back in the + /// results. query_id: u16, } @@ -202,7 +210,8 @@ impl MdnsResponse { /// Returns the list of peers that have been reported in this packet. /// - /// > **Note**: Keep in mind that this will also contain the responses we sent ourselves. + /// > **Note**: Keep in mind that this will also contain the responses we + /// > sent ourselves. fn discovered_peers(&self) -> impl Iterator { self.peers.iter() } @@ -307,8 +316,7 @@ impl fmt::Debug for MdnsPeer { #[cfg(test)] mod tests { - use super::super::dns::build_query_response; - use super::*; + use super::{super::dns::build_query_response, *}; #[test] fn test_create_mdns_peer() { diff --git a/protocols/mdns/src/behaviour/socket.rs b/protocols/mdns/src/behaviour/socket.rs index ebaad17e45f..cb17c8ef7a8 100644 --- a/protocols/mdns/src/behaviour/socket.rs +++ b/protocols/mdns/src/behaviour/socket.rs @@ -24,7 +24,8 @@ use std::{ task::{Context, Poll}, }; -/// Interface that must be implemented by the different runtimes to use the [`UdpSocket`] in async mode +/// Interface that must be implemented by the different runtimes to use the +/// [`UdpSocket`] in async mode #[allow(unreachable_pub)] // Users should not depend on this. pub trait AsyncSocket: Unpin + Send + 'static { /// Create the async socket from the [`std::net::UdpSocket`] @@ -32,7 +33,8 @@ pub trait AsyncSocket: Unpin + Send + 'static { where Self: Sized; - /// Attempts to receive a single packet on the socket from the remote address to which it is connected. + /// Attempts to receive a single packet on the socket from the remote + /// address to which it is connected. fn poll_read( &mut self, _cx: &mut Context, @@ -50,10 +52,11 @@ pub trait AsyncSocket: Unpin + Send + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; use async_io::Async; use futures::FutureExt; + use super::*; + /// AsyncIo UdpSocket pub(crate) type AsyncUdpSocket = Async; impl AsyncSocket for AsyncUdpSocket { @@ -92,9 +95,10 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; use ::tokio::{io::ReadBuf, net::UdpSocket as TkUdpSocket}; + use super::*; + /// Tokio ASync Socket` pub(crate) type TokioUdpSocket = TkUdpSocket; impl AsyncSocket for TokioUdpSocket { diff --git a/protocols/mdns/src/behaviour/timer.rs b/protocols/mdns/src/behaviour/timer.rs index 5e284654676..5fdb1beffae 100644 --- a/protocols/mdns/src/behaviour/timer.rs +++ b/protocols/mdns/src/behaviour/timer.rs @@ -42,14 +42,16 @@ pub trait Builder: Send + Unpin + 'static { #[cfg(feature = "async-io")] pub(crate) mod asio { - use super::*; - use async_io::Timer as AsioTimer; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use async_io::Timer as AsioTimer; + use futures::Stream; + + use super::*; + /// Async Timer pub(crate) type AsyncTimer = Timer; impl Builder for AsyncTimer { @@ -83,14 +85,16 @@ pub(crate) mod asio { #[cfg(feature = "tokio")] pub(crate) mod tokio { - use super::*; - use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; - use futures::Stream; use std::{ pin::Pin, task::{Context, Poll}, }; + use ::tokio::time::{self, Instant as TokioInstant, Interval, MissedTickBehavior}; + use futures::Stream; + + use super::*; + /// Tokio wrapper pub(crate) type TokioTimer = Timer; impl Builder for TokioTimer { diff --git a/protocols/mdns/src/lib.rs b/protocols/mdns/src/lib.rs index 4823d740272..272e355e177 100644 --- a/protocols/mdns/src/lib.rs +++ b/protocols/mdns/src/lib.rs @@ -23,29 +23,28 @@ //! mDNS is a protocol defined by [RFC 6762](https://tools.ietf.org/html/rfc6762) that allows //! querying nodes that correspond to a certain domain name. //! -//! In the context of libp2p, the mDNS protocol is used to discover other nodes on the local -//! network that support libp2p. +//! In the context of libp2p, the mDNS protocol is used to discover other nodes +//! on the local network that support libp2p. //! //! # Usage //! -//! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled features, which -//! implements the `NetworkBehaviour` trait. This struct will automatically discover other -//! libp2p nodes on the local network. -//! +//! This crate provides a `Mdns` and `TokioMdns`, depending on the enabled +//! features, which implements the `NetworkBehaviour` trait. This struct will +//! automatically discover other libp2p nodes on the local network. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use std::net::{Ipv4Addr, Ipv6Addr}; -use std::time::Duration; +use std::{ + net::{Ipv4Addr, Ipv6Addr}, + time::Duration, +}; mod behaviour; -pub use crate::behaviour::{Behaviour, Event}; - #[cfg(feature = "async-io")] pub use crate::behaviour::async_io; - #[cfg(feature = "tokio")] pub use crate::behaviour::tokio; +pub use crate::behaviour::{Behaviour, Event}; /// The DNS service name for all libp2p peers used to query for addresses. const SERVICE_NAME: &[u8] = b"_p2p._udp.local"; diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 549f70978af..deb73556fbd 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -18,12 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; -use libp2p_mdns::Event; -use libp2p_mdns::{async_io::Behaviour, Config}; +use libp2p_mdns::{async_io::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[async_std::test] @@ -158,7 +158,8 @@ async fn create_swarm(config: Config) -> Swarm { let mut swarm = Swarm::new_ephemeral(|key| Behaviour::new(config, key.public().to_peer_id()).unwrap()); - // Manually listen on all interfaces because mDNS only works for non-loopback addresses. + // Manually listen on all interfaces because mDNS only works for non-loopback + // addresses. let expected_listener_id = swarm .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index cf0d9f4bed4..37d2ffcacfd 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -17,11 +17,12 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE.use futures::StreamExt; +use std::time::Duration; + use futures::future::Either; use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] @@ -112,7 +113,8 @@ async fn create_swarm(config: Config) -> Swarm { let mut swarm = Swarm::new_ephemeral(|key| Behaviour::new(config, key.public().to_peer_id()).unwrap()); - // Manually listen on all interfaces because mDNS only works for non-loopback addresses. + // Manually listen on all interfaces because mDNS only works for non-loopback + // addresses. let expected_listener_id = swarm .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 9a4cfb8bcac..506455f081a 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -23,12 +23,13 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; -use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p::identity::PeerId; -use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; -use libp2p::SwarmBuilder; -use libp2p_perf::{client, server}; -use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; +use libp2p::{ + core::{multiaddr::Protocol, upgrade, Multiaddr}, + identity::PeerId, + swarm::{NetworkBehaviour, Swarm, SwarmEvent}, + SwarmBuilder, +}; +use libp2p_perf::{client, server, Final, Intermediate, Run, RunParams, RunUpdate}; use serde::{Deserialize, Serialize}; use tracing_subscriber::EnvFilter; use web_time::{Duration, Instant}; diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index 9f984a5bba1..7699bc85c17 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -21,11 +21,13 @@ mod behaviour; mod handler; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::{ + convert::Infallible, + sync::atomic::{AtomicUsize, Ordering}, +}; pub use behaviour::{Behaviour, Event}; use libp2p_swarm::StreamUpgradeError; -use std::convert::Infallible; static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index 1b181557acc..7a585bb6237 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -28,14 +28,19 @@ use std::{ use libp2p_core::{transport::PortUse, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - derive_prelude::ConnectionEstablished, ConnectionClosed, ConnectionId, FromSwarm, - NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + derive_prelude::ConnectionEstablished, + ConnectionClosed, + ConnectionId, + FromSwarm, + NetworkBehaviour, + NotifyHandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use crate::RunParams; -use crate::{client::handler::Handler, RunUpdate}; - use super::{RunError, RunId}; +use crate::{client::handler::Handler, RunParams, RunUpdate}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 85e864949f8..5634a2bf390 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -30,14 +30,23 @@ use futures::{ use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ConnectionEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + SubstreamProtocol, }; -use crate::client::{RunError, RunId}; -use crate::{RunParams, RunUpdate}; +use crate::{ + client::{RunError, RunId}, + RunParams, + RunUpdate, +}; #[derive(Debug)] pub struct Command { diff --git a/protocols/perf/src/lib.rs b/protocols/perf/src/lib.rs index be950ac87a2..69ecad5c311 100644 --- a/protocols/perf/src/lib.rs +++ b/protocols/perf/src/lib.rs @@ -77,8 +77,8 @@ pub struct Final { /// Parameters for a single run, i.e. one stream, sending and receiving data. /// -/// Property names are from the perspective of the actor. E.g. `to_send` is the amount of data to -/// send, both as the client and the server. +/// Property names are from the perspective of the actor. E.g. `to_send` is the +/// amount of data to send, both as the client and the server. #[derive(Debug, Clone, Copy)] pub struct RunParams { pub to_send: usize, diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs index f995bbe2d3b..961d7f8255b 100644 --- a/protocols/perf/src/protocol.rs +++ b/protocols/perf/src/protocol.rs @@ -18,14 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_timer::Delay; use std::time::Duration; -use web_time::Instant; use futures::{ future::{select, Either}, - AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, + AsyncRead, + AsyncReadExt, + AsyncWrite, + AsyncWriteExt, + FutureExt, + SinkExt, + Stream, + StreamExt, }; +use futures_timer::Delay; +use web_time::Instant; use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate}; @@ -36,8 +43,8 @@ pub(crate) fn send_receive( params: RunParams, stream: S, ) -> impl Stream> { - // Use a channel to simulate a generator. `send_receive_inner` can `yield` events through the - // channel. + // Use a channel to simulate a generator. `send_receive_inner` can `yield` + // events through the channel. let (sender, receiver) = futures::channel::mpsc::channel(0); let receiver = receiver.fuse(); let inner = send_receive_inner(params, stream, sender).fuse(); diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index 5408029e85d..75e0aff744a 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -28,11 +28,15 @@ use std::{ use libp2p_core::transport::PortUse; use libp2p_identity::PeerId; use libp2p_swarm::{ - ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use crate::server::handler::Handler; -use crate::Run; +use crate::{server::handler::Handler, Run}; #[derive(Debug)] pub struct Event { diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index c1363ae2380..edc95613ad0 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -18,18 +18,26 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::task::{Context, Poll}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; use futures::FutureExt; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ConnectionEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, + ConnectionHandler, + ConnectionHandlerEvent, + StreamProtocol, + SubstreamProtocol, }; -use std::convert::Infallible; use tracing::error; use crate::Run; diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index 017d475befd..a4de522e7a6 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -20,7 +20,8 @@ use libp2p_perf::{ client::{self}, - server, RunParams, + server, + RunParams, }; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 961716e934a..757c6690d2a 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -18,27 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{protocol, PROTOCOL_NAME}; -use futures::future::{BoxFuture, Either}; -use futures::prelude::*; -use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; -use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, - SubstreamProtocol, -}; -use std::collections::VecDeque; -use std::convert::Infallible; use std::{ + collections::VecDeque, + convert::Infallible, error::Error, - fmt, io, + fmt, + io, task::{Context, Poll}, time::Duration, }; +use futures::{ + future::{BoxFuture, Either}, + prelude::*, +}; +use futures_timer::Delay; +use libp2p_core::upgrade::ReadyUpgrade; +use libp2p_swarm::{ + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, + ConnectionHandlerEvent, + Stream, + StreamProtocol, + StreamUpgradeError, + SubstreamProtocol, +}; + +use crate::{protocol, PROTOCOL_NAME}; + /// The configuration for outbound pings. #[derive(Debug, Clone)] pub struct Config { @@ -154,7 +161,8 @@ enum State { Inactive { /// Whether or not we've reported the missing support yet. /// - /// This is used to avoid repeated events being emitted for a specific connection. + /// This is used to avoid repeated events being emitted for a specific + /// connection. reported: bool, }, /// We are actively pinging the other peer. @@ -185,7 +193,8 @@ impl Handler { self.outbound = None; // Request a new substream on the next `poll`. // Timer is already polled and expired before substream request is initiated - // and will be polled again later on in our `poll` because we reset `self.outbound`. + // and will be polled again later on in our `poll` because we reset + // `self.outbound`. // // `futures-timer` allows an expired timer to be polled again and returns // immediately `Poll::Ready`. However in its WASM implementation there is diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 82f240cab6b..cf8277a4f9e 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -27,9 +27,11 @@ //! # Usage //! //! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. -//! It will respond to inbound ping requests and periodically send outbound ping requests on every established connection. +//! It will respond to inbound ping requests and periodically send outbound ping +//! requests on every established connection. //! -//! It is up to the user to implement a health-check / connection management policy based on the ping protocol. +//! It is up to the user to implement a health-check / connection management +//! policy based on the ping protocol. //! //! For example: //! @@ -39,8 +41,10 @@ //! //! Users should inspect emitted [`Event`]s and call APIs on [`Swarm`]: //! -//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific connection -//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all connections to a peer +//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to +//! close a specific connection +//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to +//! close all connections to a peer //! //! [`Swarm`]: libp2p_swarm::Swarm //! [`Transport`]: libp2p_core::Transport @@ -50,22 +54,28 @@ mod handler; mod protocol; -use handler::Handler; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, -}; -use std::time::Duration; use std::{ collections::VecDeque, task::{Context, Poll}, + time::Duration, }; -pub use self::protocol::PROTOCOL_NAME; +use handler::Handler; pub use handler::{Config, Failure}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + behaviour::FromSwarm, + ConnectionDenied, + ConnectionId, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, +}; + +pub use self::protocol::PROTOCOL_NAME; /// A [`NetworkBehaviour`] that responds to inbound pings and /// periodically sends outbound pings on every established connection. diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 101c219aac4..5e84f55e090 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -18,10 +18,11 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{io, time::Duration}; + use futures::prelude::*; use libp2p_swarm::StreamProtocol; use rand::{distributions, prelude::*}; -use std::{io, time::Duration}; use web_time::Instant; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0"); @@ -40,10 +41,10 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// Successful pings report the round-trip time. /// /// > **Note**: The round-trip time of a ping may be subject to delays induced -/// > by the underlying transport, e.g. in the case of TCP there is -/// > Nagle's algorithm, delayed acks and similar configuration options -/// > which can affect latencies especially on otherwise low-volume -/// > connections. +/// > by the underlying transport, e.g. in the case of TCP there is +/// > Nagle's algorithm, delayed acks and similar configuration options +/// > which can affect latencies especially on otherwise low-volume +/// > connections. const PING_SIZE: usize = 32; /// Sends a ping and waits for the pong. @@ -81,7 +82,6 @@ where #[cfg(test)] mod tests { - use super::*; use futures::StreamExt; use libp2p_core::{ multiaddr::multiaddr, @@ -89,6 +89,8 @@ mod tests { Endpoint, }; + use super::*; + #[tokio::test] async fn ping_pong() { let mem_addr = multiaddr![Memory(thread_rng().gen::())]; diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 0752b1fced9..210f9435e4a 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -20,12 +20,12 @@ //! Integration tests for the `Ping` network behaviour. +use std::{num::NonZeroU8, time::Duration}; + use libp2p_ping as ping; -use libp2p_swarm::dummy; -use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm::{dummy, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use quickcheck::*; -use std::{num::NonZeroU8, time::Duration}; #[tokio::test] async fn ping_pong() { diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index e854ed2a1ff..ae8e9e2ed6f 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -22,27 +22,39 @@ pub(crate) mod handler; pub(crate) mod rate_limiter; -use crate::behaviour::handler::Handler; -use crate::multiaddr_ext::MultiaddrExt; -use crate::proto; -use crate::protocol::{inbound_hop, outbound_stop}; +use std::{ + collections::{hash_map, HashMap, HashSet, VecDeque}, + num::NonZeroU32, + ops::Add, + task::{Context, Poll}, + time::Duration, +}; + use either::Either; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, FromSwarm}, + dummy, + ConnectionDenied, + ConnectionId, + ExternalAddresses, + NetworkBehaviour, + NotifyHandler, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::collections::{hash_map, HashMap, HashSet, VecDeque}; -use std::num::NonZeroU32; -use std::ops::Add; -use std::task::{Context, Poll}; -use std::time::Duration; use web_time::Instant; +use crate::{ + behaviour::handler::Handler, + multiaddr_ext::MultiaddrExt, + proto, + protocol::{inbound_hop, outbound_stop}, +}; + /// Configuration for the relay [`Behaviour`]. /// /// # Panics @@ -120,12 +132,14 @@ impl std::fmt::Debug for Config { impl Default for Config { fn default() -> Self { let reservation_rate_limiters = vec![ - // For each peer ID one reservation every 2 minutes with up to 30 reservations per hour. + // For each peer ID one reservation every 2 minutes with up to 30 reservations per + // hour. rate_limiter::new_per_peer(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(30).expect("30 > 0"), interval: Duration::from_secs(60 * 2), }), - // For each IP address one reservation every minute with up to 60 reservations per hour. + // For each IP address one reservation every minute with up to 60 reservations per + // hour. rate_limiter::new_per_ip(rate_limiter::GenericRateLimiterConfig { limit: NonZeroU32::new(60).expect("60 > 0"), interval: Duration::from_secs(60), @@ -381,12 +395,13 @@ impl NetworkBehaviour for Behaviour { assert!( !endpoint.is_relayed(), - "`dummy::ConnectionHandler` handles relayed connections. It \ - denies all inbound substreams." + "`dummy::ConnectionHandler` handles relayed connections. It denies all \ + inbound substreams." ); let action = if - // Deny if it is a new reservation and exceeds `max_reservations_per_peer`. + // Deny if it is a new reservation and exceeds + // `max_reservations_per_peer`. (!renewed && self .reservations @@ -495,9 +510,9 @@ impl NetworkBehaviour for Behaviour { } hash_map::Entry::Vacant(_) => { unreachable!( - "Expect to track timed out reservation with peer {:?} on connection {:?}", - event_source, - connection, + "Expect to track timed out reservation with peer {:?} on connection \ + {:?}", + event_source, connection, ); } } @@ -515,8 +530,8 @@ impl NetworkBehaviour for Behaviour { assert!( !endpoint.is_relayed(), - "`dummy::ConnectionHandler` handles relayed connections. It \ - denies all inbound substreams." + "`dummy::ConnectionHandler` handles relayed connections. It denies all \ + inbound substreams." ); let action = if self.circuits.num_circuits_of_peer(event_source) diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 92e45720f3f..d0eb07674e3 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -18,32 +18,45 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::CircuitId; -use crate::copy_future::CopyFuture; -use crate::protocol::{inbound_hop, outbound_stop}; -use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use std::{ + collections::{HashMap, VecDeque}, + fmt, + io, + task::{Context, Poll}, + time::Duration, +}; + use bytes::Bytes; use either::Either; -use futures::future::{BoxFuture, FutureExt, TryFutureExt}; -use futures::io::AsyncWriteExt; -use futures::stream::{FuturesUnordered, StreamExt}; +use futures::{ + future::{BoxFuture, FutureExt, TryFutureExt}, + io::AsyncWriteExt, + stream::{FuturesUnordered, StreamExt}, +}; use futures_timer::Delay; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::{ConnectedPoint, Multiaddr}; +use libp2p_core::{upgrade::ReadyUpgrade, ConnectedPoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, - StreamUpgradeError, SubstreamProtocol, + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionHandler, + ConnectionHandlerEvent, + ConnectionId, + Stream, + StreamProtocol, + StreamUpgradeError, + SubstreamProtocol, }; -use std::collections::{HashMap, VecDeque}; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; use web_time::Instant; +use crate::{ + behaviour::CircuitId, + copy_future::CopyFuture, + proto, + protocol::{inbound_hop, outbound_stop}, + HOP_PROTOCOL_NAME, + STOP_PROTOCOL_NAME, +}; + const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index 45b701c1b50..01774858d3c 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -18,18 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, VecDeque}, + hash::Hash, + net::IpAddr, + num::NonZeroU32, + time::Duration, +}; + use libp2p_core::multiaddr::{Multiaddr, Protocol}; use libp2p_identity::PeerId; -use std::collections::{HashMap, VecDeque}; -use std::hash::Hash; -use std::net::IpAddr; -use std::num::NonZeroU32; -use std::time::Duration; use web_time::Instant; /// Allows rate limiting access to some resource based on the [`PeerId`] and /// [`Multiaddr`] of a remote peer. -// // See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use // [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system // number of a peers IP address. @@ -120,8 +122,9 @@ impl GenericRateLimiter { } fn refill(&mut self, now: Instant) { - // Note when used with a high number of buckets: This loop refills all the to-be-refilled - // buckets at once, thus potentially delaying the parent call to `try_next`. + // Note when used with a high number of buckets: This loop refills all the + // to-be-refilled buckets at once, thus potentially delaying the parent + // call to `try_next`. loop { match self.refill_schedule.front() { // Only continue if (a) there is a bucket and (b) the bucket has not already been @@ -170,9 +173,10 @@ impl GenericRateLimiter { #[cfg(test)] mod tests { - use super::*; use quickcheck::{QuickCheck, TestResult}; + use super::*; + #[test] fn first() { let id = 1; diff --git a/protocols/relay/src/copy_future.rs b/protocols/relay/src/copy_future.rs index c0039c29534..9d1e2e49336 100644 --- a/protocols/relay/src/copy_future.rs +++ b/protocols/relay/src/copy_future.rs @@ -19,21 +19,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Helper to interconnect two substreams, connecting the receiver side of A with the sender side of -//! B and vice versa. +//! Helper to interconnect two substreams, connecting the receiver side of A +//! with the sender side of B and vice versa. //! //! Inspired by [`futures::io::Copy`]. -use futures::future::Future; -use futures::future::FutureExt; -use futures::io::{AsyncBufRead, BufReader}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{Future, FutureExt}, + io::{AsyncBufRead, AsyncRead, AsyncWrite, BufReader}, + ready, +}; use futures_timer::Delay; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; pub(crate) struct CopyFuture { src: BufReader, @@ -129,8 +132,8 @@ where /// Forwards data from `source` to `destination`. /// -/// Returns `0` when done, i.e. `source` having reached EOF, returns number of bytes sent otherwise, -/// thus indicating progress. +/// Returns `0` when done, i.e. `source` having reached EOF, returns number of +/// bytes sent otherwise, thus indicating progress. fn forward_data( mut src: &mut S, mut dst: &mut D, @@ -161,12 +164,13 @@ fn forward_data( #[cfg(test)] mod tests { - use super::*; - use futures::executor::block_on; - use futures::io::BufWriter; - use quickcheck::QuickCheck; use std::io::ErrorKind; + use futures::{executor::block_on, io::BufWriter}; + use quickcheck::QuickCheck; + + use super::*; + #[test] fn quickcheck() { struct Connection { @@ -356,12 +360,14 @@ mod tests { } } - // The source has two reads available, handing them out on `AsyncRead::poll_read` one by one. + // The source has two reads available, handing them out on + // `AsyncRead::poll_read` one by one. let mut source = BufReader::new(NeverEndingSource { read: vec![1, 2] }); - // The destination is wrapped by a `BufWriter` with a capacity of `3`, i.e. one larger than - // the available reads of the source. Without an explicit `AsyncWrite::poll_flush` the two - // reads would thus never make it to the destination, but instead be stuck in the buffer of + // The destination is wrapped by a `BufWriter` with a capacity of `3`, i.e. one + // larger than the available reads of the source. Without an explicit + // `AsyncWrite::poll_flush` the two reads would thus never make it to + // the destination, but instead be stuck in the buffer of // the `BufWrite`. let mut destination = BufWriter::with_capacity( 3, @@ -380,10 +386,11 @@ mod tests { "Expect `forward_data` to forward one read from the source to the wrapped destination." ); assert_eq!( - destination.get_ref().method_calls.as_slice(), &[], - "Given that destination is wrapped with a `BufWrite`, the write doesn't (yet) make it to \ - the destination. The source might have more data available, thus `forward_data` has not \ - yet flushed.", + destination.get_ref().method_calls.as_slice(), + &[], + "Given that destination is wrapped with a `BufWrite`, the write doesn't (yet) make it \ + to the destination. The source might have more data available, thus `forward_data` \ + has not yet flushed.", ); assert!( @@ -394,10 +401,11 @@ mod tests { "Expect `forward_data` to forward one read from the source to the wrapped destination." ); assert_eq!( - destination.get_ref().method_calls.as_slice(), &[], - "Given that destination is wrapped with a `BufWrite`, the write doesn't (yet) make it to \ - the destination. The source might have more data available, thus `forward_data` has not \ - yet flushed.", + destination.get_ref().method_calls.as_slice(), + &[], + "Given that destination is wrapped with a `BufWrite`, the write doesn't (yet) make it \ + to the destination. The source might have more data available, thus `forward_data` \ + has not yet flushed.", ); assert!( @@ -406,14 +414,14 @@ mod tests { Poll::Pending, ), "The source has no more reads available, but does not close i.e. does not return \ - `Poll::Ready(Ok(1))` but instead `Poll::Pending`. Thus `forward_data` returns \ - `Poll::Pending` as well." + `Poll::Ready(Ok(1))` but instead `Poll::Pending`. Thus `forward_data` returns \ + `Poll::Pending` as well." ); assert_eq!( destination.get_ref().method_calls.as_slice(), &[Method::Write(vec![2, 1]), Method::Flush], - "Given that source had no more reads, `forward_data` calls flush, thus instructing the \ - `BufWriter` to flush the two buffered writes down to the destination." + "Given that source had no more reads, `forward_data` calls flush, thus instructing \ + the `BufWriter` to flush the two buffered writes down to the destination." ); } } diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index eca3578d599..8125e19acb8 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -32,10 +32,15 @@ mod protocol; mod proto { #![allow(unreachable_pub)] include!("generated/mod.rs"); - pub(crate) use self::message_v2::pb::mod_HopMessage::Type as HopMessageType; pub use self::message_v2::pb::mod_StopMessage::Type as StopMessageType; pub(crate) use self::message_v2::pb::{ - HopMessage, Limit, Peer, Reservation, Status, StopMessage, + mod_HopMessage::Type as HopMessageType, + HopMessage, + Limit, + Peer, + Reservation, + Status, + StopMessage, }; } diff --git a/protocols/relay/src/multiaddr_ext.rs b/protocols/relay/src/multiaddr_ext.rs index 6991a8b9ded..7c06eb7eab0 100644 --- a/protocols/relay/src/multiaddr_ext.rs +++ b/protocols/relay/src/multiaddr_ext.rs @@ -1,5 +1,4 @@ -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; pub(crate) trait MultiaddrExt { fn is_relayed(&self) -> bool; diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index fc9d28e66ed..9c892ebe04e 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -23,33 +23,49 @@ pub(crate) mod handler; pub(crate) mod transport; -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop}; +use std::{ + collections::{hash_map, HashMap, VecDeque}, + convert::Infallible, + io::{Error, ErrorKind, IoSlice}, + pin::Pin, + task::{Context, Poll}, +}; + use bytes::Bytes; use either::Either; -use futures::channel::mpsc::Receiver; -use futures::future::{BoxFuture, FutureExt}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::ready; -use futures::stream::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{ + channel::mpsc::Receiver, + future::{BoxFuture, FutureExt}, + io::{AsyncRead, AsyncWrite}, + ready, + stream::StreamExt, +}; +use libp2p_core::{multiaddr::Protocol, transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; -use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, - NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}, + dial_opts::DialOpts, + dummy, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + DialFailure, + NetworkBehaviour, + NotifyHandler, + Stream, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; -use std::collections::{hash_map, HashMap, VecDeque}; -use std::convert::Infallible; -use std::io::{Error, ErrorKind, IoSlice}; -use std::pin::Pin; -use std::task::{Context, Poll}; use transport::Transport; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::handler::Handler, + protocol::{self, inbound_stop}, +}; + /// The events produced by the client `Behaviour`. #[derive(Debug)] pub enum Event { @@ -89,7 +105,8 @@ pub struct Behaviour { /// Stores the address of a pending or confirmed reservation. /// - /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it. + /// This is indexed by the [`ConnectionId`] to a relay server and the + /// address is the `/p2p-circuit` address we reserved on it. reservation_addresses: HashMap, /// Queue of actions to return when polled. @@ -98,7 +115,8 @@ pub struct Behaviour { pending_handler_commands: HashMap, } -/// Create a new client relay [`Behaviour`] with it's corresponding [`Transport`]. +/// Create a new client relay [`Behaviour`] with it's corresponding +/// [`Transport`]. pub fn new(local_peer_id: PeerId) -> (Transport, Behaviour) { let (transport, from_transport) = Transport::new(); let behaviour = Behaviour { @@ -376,10 +394,9 @@ impl NetworkBehaviour for Behaviour { } } None => unreachable!( - "`relay::Behaviour` polled after channel from \ - `Transport` has been closed. Unreachable under \ - the assumption that the `client::Behaviour` is never polled after \ - `client::Transport` is dropped.", + "`relay::Behaviour` polled after channel from `Transport` has been closed. \ + Unreachable under the assumption that the `client::Behaviour` is never polled \ + after `client::Transport` is dropped.", ), }; diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 77b7f94ae60..9e67d6001ef 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,29 +18,41 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::client::Connection; -use crate::priv_client::transport; -use crate::priv_client::transport::ToListenerMsg; -use crate::protocol::{self, inbound_stop, outbound_hop}; -use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; -use futures::channel::mpsc::Sender; -use futures::channel::{mpsc, oneshot}; -use futures::future::FutureExt; +use std::{ + collections::VecDeque, + convert::Infallible, + fmt, + io, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::{mpsc, mpsc::Sender, oneshot}, + future::FutureExt, +}; use futures_timer::Delay; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::Multiaddr; +use libp2p_core::{multiaddr::Protocol, upgrade::ReadyUpgrade, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + handler::{ConnectionEvent, FullyNegotiatedInbound}, + ConnectionHandler, + ConnectionHandlerEvent, + Stream, + StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::convert::Infallible; -use std::task::{Context, Poll}; -use std::time::Duration; -use std::{fmt, io}; + +use crate::{ + client::Connection, + priv_client, + priv_client::{transport, transport::ToListenerMsg}, + proto, + protocol::{self, inbound_stop, outbound_hop}, + HOP_PROTOCOL_NAME, + STOP_PROTOCOL_NAME, +}; /// The maximum number of circuits being denied concurrently. /// diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index ec1e8ca5fb8..c2d82122b43 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -19,29 +19,40 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::multiaddr_ext::MultiaddrExt; -use crate::priv_client::Connection; -use crate::protocol::outbound_hop; -use crate::protocol::outbound_hop::{ConnectError, ReserveError}; -use crate::RequestId; -use futures::channel::mpsc; -use futures::channel::oneshot; -use futures::future::{ready, BoxFuture, FutureExt, Ready}; -use futures::sink::SinkExt; -use futures::stream::SelectAll; -use futures::stream::{Stream, StreamExt}; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::transport::{DialOpts, ListenerId, TransportError, TransportEvent}; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll, Waker}, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{ready, BoxFuture, FutureExt, Ready}, + sink::SinkExt, + stream::{SelectAll, Stream, StreamExt}, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; use libp2p_identity::PeerId; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll, Waker}; use thiserror::Error; +use crate::{ + multiaddr_ext::MultiaddrExt, + priv_client::Connection, + protocol::{ + outbound_hop, + outbound_hop::{ConnectError, ReserveError}, + }, + RequestId, +}; + /// A [`Transport`] enabling client relay capabilities. /// -/// Note: The transport only handles listening and dialing on relayed [`Multiaddr`], and depends on -/// an other transport to do the actual transmission of data. They should be combined through the +/// Note: The transport only handles listening and dialing on relayed +/// [`Multiaddr`], and depends on an other transport to do the actual +/// transmission of data. They should be combined through the /// [`OrTransport`](libp2p_core::transport::choice::OrTransport). /// /// Allows the local node to: @@ -49,7 +60,8 @@ use thiserror::Error; /// 1. Establish relayed connections by dialing `/p2p-circuit` addresses. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, transport::{DialOpts, PortUse}, connection::Endpoint}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, +/// transport::{DialOpts, PortUse}, connection::Endpoint}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; @@ -64,9 +76,9 @@ use thiserror::Error; /// let dst_addr_via_relay = Multiaddr::empty() /// .with(Protocol::Memory(40)) // Relay address. /// .with(Protocol::P2p(relay_id.into())) // Relay peer id. -/// .with(Protocol::P2pCircuit) // Signal to connect via relay and not directly. -/// .with(Protocol::P2p(destination_id.into())); // Destination peer id. -/// transport.dial(dst_addr_via_relay, DialOpts { +/// .with(Protocol::P2pCircuit) // Signal to connect via relay and not +/// directly. .with(Protocol::P2p(destination_id.into())); // Destination +/// peer id. transport.dial(dst_addr_via_relay, DialOpts { /// port_use: PortUse::Reuse, /// role: Endpoint::Dialer, /// }).unwrap(); @@ -75,7 +87,8 @@ use thiserror::Error; /// 3. Listen for incoming relayed connections via specific relay. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, transport::ListenerId, Transport}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, +/// transport::ListenerId, Transport}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; @@ -90,8 +103,8 @@ use thiserror::Error; /// let relay_addr = Multiaddr::empty() /// .with(Protocol::Memory(40)) // Relay address. /// .with(Protocol::P2p(relay_id.into())) // Relay peer id. -/// .with(Protocol::P2pCircuit); // Signal to listen via remote relay node. -/// transport.listen_on(ListenerId::next(), relay_addr).unwrap(); +/// .with(Protocol::P2pCircuit); // Signal to listen via remote relay +/// node. transport.listen_on(ListenerId::next(), relay_addr).unwrap(); /// ``` pub struct Transport { to_behaviour: mpsc::Sender, @@ -188,7 +201,8 @@ impl libp2p_core::Transport for Transport { dst_addr, } = parse_relayed_multiaddr(addr)?; - // TODO: In the future we might want to support dialing a relay by its address only. + // TODO: In the future we might want to support dialing a relay by its address + // only. let relay_peer_id = relay_peer_id.ok_or(Error::MissingRelayPeerId)?; let relay_addr = relay_addr.ok_or(Error::MissingRelayAddr)?; let dst_peer_id = dst_peer_id.ok_or(Error::MissingDstPeerId)?; @@ -305,9 +319,11 @@ pub(crate) struct Listener { listener_id: ListenerId, /// Queue of events to report when polled. queued_events: VecDeque<::Item>, - /// Channel for messages from the behaviour [`Handler`][super::handler::Handler]. + /// Channel for messages from the behaviour + /// [`Handler`][super::handler::Handler]. from_behaviour: mpsc::Receiver, - /// The listener can be closed either manually with [`Transport::remove_listener`](libp2p_core::Transport) or if + /// The listener can be closed either manually with + /// [`Transport::remove_listener`](libp2p_core::Transport) or if /// the sender side of the `from_behaviour` channel is dropped. is_closed: bool, waker: Option, @@ -344,7 +360,8 @@ impl Stream for Listener { } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed and all remaining events have + // been reported. self.waker = None; return Poll::Ready(None); } diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs index b94151259cd..b1adeedaaf5 100644 --- a/protocols/relay/src/protocol.rs +++ b/protocols/relay/src/protocol.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use libp2p_swarm::StreamProtocol; use std::time::Duration; +use libp2p_swarm::StreamProtocol; + +use crate::proto; + pub(crate) mod inbound_hop; pub(crate) mod inbound_stop; pub(crate) mod outbound_hop; diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 401c6258176..01280d70897 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -19,21 +19,18 @@ // DEALINGS IN THE SOFTWARE. use std::time::Duration; -use web_time::SystemTime; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use either::Either; use futures::prelude::*; -use thiserror::Error; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::proto; -use crate::proto::message_v2::pb::mod_HopMessage::Type; -use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, proto::message_v2::pb::mod_HopMessage::Type, protocol::MAX_MESSAGE_SIZE}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index b698a5ff769..8994c2cff73 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -18,16 +18,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{self, MAX_MESSAGE_SIZE}; +use std::io; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_swarm::Stream; -use std::io; use thiserror::Error; +use crate::{ + proto, + protocol::{self, MAX_MESSAGE_SIZE}, +}; + pub(crate) async fn handle_open_circuit(io: Stream) -> Result { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index b349f8848be..216c6d115bf 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,22 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use futures_timer::Delay; -use thiserror::Error; -use web_time::SystemTime; - use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; +use web_time::SystemTime; -use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; -use crate::{proto, HOP_PROTOCOL_NAME}; +use crate::{ + proto, + protocol::{Limit, MAX_MESSAGE_SIZE}, + HOP_PROTOCOL_NAME, +}; #[derive(Debug, Error)] pub enum ConnectError { diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 525ebc10821..272aa24eef6 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,19 +18,16 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::io; -use std::time::Duration; +use std::{io, time::Duration}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; -use thiserror::Error; - use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use thiserror::Error; -use crate::protocol::MAX_MESSAGE_SIZE; -use crate::{proto, STOP_PROTOCOL_NAME}; +use crate::{proto, protocol::MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] pub enum Error { diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 2b28d5a50cd..81cb77c427a 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -18,26 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::executor::LocalPool; -use futures::future::FutureExt; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::stream::StreamExt; -use futures::task::Spawn; -use libp2p_core::multiaddr::{Multiaddr, Protocol}; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::choice::OrTransport; -use libp2p_core::transport::{Boxed, MemoryTransport, Transport}; -use libp2p_core::upgrade; +use std::{error::Error, time::Duration}; + +use futures::{ + executor::LocalPool, + future::FutureExt, + io::{AsyncRead, AsyncWrite}, + stream::StreamExt, + task::Spawn, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + muxing::StreamMuxerBox, + transport::{choice::OrTransport, Boxed, MemoryTransport, Transport}, + upgrade, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_ping as ping; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::dial_opts::DialOpts; -use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::{dial_opts::DialOpts, Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::error::Error; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[test] @@ -355,7 +357,8 @@ fn propagate_connect_error_to_unknown_peer_to_dialer() { let mut src = build_client(); - let dst_peer_id = PeerId::random(); // We don't have a destination peer in this test, so the CONNECT request will fail. + let dst_peer_id = PeerId::random(); // We don't have a destination peer in this test, so the CONNECT request will + // fail. let dst_addr = relay_addr .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) @@ -414,7 +417,8 @@ fn reuse_connection() { .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); - // To reuse the connection, we need to ensure it is not shut down due to being idle. + // To reuse the connection, we need to ensure it is not shut down due to being + // idle. let mut client = build_client_with_config( Config::with_async_std_executor().with_idle_connection_timeout(Duration::from_secs(1)), ); diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index a794252ff0b..7412d157429 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -18,24 +18,42 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::Message::*; -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use futures::future::BoxFuture; -use futures::future::FutureExt; -use futures::stream::FuturesUnordered; -use futures::stream::StreamExt; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr, PeerRecord}; +use std::{ + collections::HashMap, + iter, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + future::{BoxFuture, FutureExt}, + stream::{FuturesUnordered, StreamExt}, +}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr, PeerRecord}; use libp2p_identity::{Keypair, PeerId, SigningError}; use libp2p_request_response::{OutboundRequestId, ProtocolSupport}; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, + ConnectionId, + ExternalAddresses, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, +}; + +use crate::codec::{ + Cookie, + ErrorCode, + Message, + Message::*, + Namespace, + NewRegistration, + Registration, + Ttl, }; -use std::collections::HashMap; -use std::iter; -use std::task::{Context, Poll}; -use std::time::Duration; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -47,12 +65,15 @@ pub struct Behaviour { /// Hold addresses of all peers that we have discovered so far. /// - /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. + /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] + /// in dialing by returning addresses from + /// [`NetworkBehaviour::handle_pending_outbound_connection`]. discovered_peers: HashMap<(PeerId, Namespace), Vec>, registered_namespaces: HashMap<(PeerId, Namespace), Ttl>, - /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak. + /// Tracks the expiry of registrations that we have discovered and stored in + /// `discovered_peers` otherwise we have a memory leak. expiring_registrations: FuturesUnordered>, external_addresses: ExternalAddresses, @@ -79,9 +100,11 @@ impl Behaviour { } } - /// Register our external addresses in the given namespace with the given rendezvous peer. + /// Register our external addresses in the given namespace with the given + /// rendezvous peer. /// - /// External addresses are either manually added via [`libp2p_swarm::Swarm::add_external_address`] or reported + /// External addresses are either manually added via + /// [`libp2p_swarm::Swarm::add_external_address`] or reported /// by other [`NetworkBehaviour`]s via [`ToSwarm::ExternalAddrConfirmed`]. pub fn register( &mut self, @@ -105,7 +128,8 @@ impl Behaviour { Ok(()) } - /// Unregister ourselves from the given namespace with the given rendezvous peer. + /// Unregister ourselves from the given namespace with the given rendezvous + /// peer. pub fn unregister(&mut self, namespace: Namespace, rendezvous_node: PeerId) { self.registered_namespaces .retain(|(rz_node, ns), _| rz_node.ne(&rendezvous_node) && ns.ne(&namespace)); @@ -119,8 +143,8 @@ impl Behaviour { /// If desired, the registrations can be filtered by a namespace. /// If no namespace is given, peers from all namespaces will be returned. /// A successfully discovery returns a cookie within [`Event::Discovered`]. - /// Such a cookie can be used to only fetch the _delta_ of registrations since - /// the cookie was acquired. + /// Such a cookie can be used to only fetch the _delta_ of registrations + /// since the cookie was acquired. pub fn discover( &mut self, namespace: Option, @@ -153,7 +177,8 @@ pub enum RegisterError { #[derive(Debug)] #[allow(clippy::large_enum_variant)] pub enum Event { - /// We successfully discovered other nodes with using the contained rendezvous node. + /// We successfully discovered other nodes with using the contained + /// rendezvous node. Discovered { rendezvous_node: PeerId, registrations: Vec, diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index cad3688e00b..0aa4c6c0c8c 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -18,16 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::DEFAULT_TTL; +use std::{fmt, io}; + use async_trait::async_trait; -use asynchronous_codec::{BytesMut, Decoder, Encoder}; -use asynchronous_codec::{FramedRead, FramedWrite}; +use asynchronous_codec::{BytesMut, Decoder, Encoder, FramedRead, FramedWrite}; use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::{peer_record, signed_envelope, PeerRecord, SignedEnvelope}; use libp2p_swarm::StreamProtocol; use quick_protobuf_codec::Codec as ProtobufCodec; use rand::RngCore; -use std::{fmt, io}; + +use crate::DEFAULT_TTL; pub type Ttl = u64; pub(crate) type Limit = u64; @@ -54,7 +55,10 @@ pub struct Namespace(String); impl Namespace { /// Creates a new [`Namespace`] from a static string. /// - /// This will panic if the namespace is too long. We accepting panicking in this case because we are enforcing a `static lifetime which means this value can only be a constant in the program and hence we hope the developer checked that it is of an acceptable length. + /// This will panic if the namespace is too long. We accepting panicking in + /// this case because we are enforcing a `static lifetime which means this + /// value can only be a constant in the program and hence we hope the + /// developer checked that it is of an acceptable length. pub fn from_static(value: &'static str) -> Self { if value.len() > crate::MAX_NAMESPACE { panic!("Namespace '{value}' is too long!") @@ -109,7 +113,8 @@ pub struct Cookie { impl Cookie { /// Construct a new [`Cookie`] for a given namespace. /// - /// This cookie will only be valid for subsequent DISCOVER requests targeting the same namespace. + /// This cookie will only be valid for subsequent DISCOVER requests + /// targeting the same namespace. pub fn for_namespace(namespace: Namespace) -> Self { Self { id: rand::thread_rng().next_u64(), @@ -117,7 +122,8 @@ impl Cookie { } } - /// Construct a new [`Cookie`] for a DISCOVER request that inquires about all namespaces. + /// Construct a new [`Cookie`] for a DISCOVER request that inquires about + /// all namespaces. pub fn for_all_namespaces() -> Self { Self { id: rand::random(), diff --git a/protocols/rendezvous/src/lib.rs b/protocols/rendezvous/src/lib.rs index 7c607085f20..221178728af 100644 --- a/protocols/rendezvous/src/lib.rs +++ b/protocols/rendezvous/src/lib.rs @@ -22,9 +22,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; use libp2p_swarm::StreamProtocol; +pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; + mod codec; /// If unspecified, rendezvous nodes should assume a TTL of 2h. diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 45a525d9573..86b82c263d6 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -18,25 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; -use crate::{MAX_TTL, MIN_TTL}; +use std::{ + collections::{HashMap, HashSet}, + iter, + task::{ready, Context, Poll}, + time::Duration, +}; + use bimap::BiMap; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::{FutureExt, StreamExt}; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::ProtocolSupport; -use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, + behaviour::FromSwarm, + ConnectionDenied, + ConnectionId, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, ToSwarm, }; -use std::collections::{HashMap, HashSet}; -use std::iter; -use std::task::{ready, Context, Poll}; -use std::time::Duration; + +use crate::{ + codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}, + MAX_TTL, + MIN_TTL, +}; pub struct Behaviour { inner: libp2p_request_response::Behaviour, @@ -534,10 +543,9 @@ pub struct CookieNamespaceMismatch; #[cfg(test)] mod tests { - use web_time::SystemTime; - use libp2p_core::PeerRecord; use libp2p_identity as identity; + use web_time::SystemTime; use super::*; @@ -690,9 +698,10 @@ mod tests { registrations.no_event_for(3).await } - /// FuturesUnordered stop polling for ready futures when poll_next() is called until a None - /// value is returned. To prevent the next_expiry future from going to "sleep", next_expiry - /// is initialised with a future that always returns pending. This test ensures that + /// FuturesUnordered stop polling for ready futures when poll_next() is + /// called until a None value is returned. To prevent the next_expiry + /// future from going to "sleep", next_expiry is initialised with a + /// future that always returns pending. This test ensures that /// FuturesUnordered does not stop polling for ready futures. #[tokio::test] async fn given_all_registrations_expired_then_successfully_handle_new_registration_and_expiry() @@ -785,14 +794,16 @@ mod tests { futures::future::poll_fn(|cx| self.poll(cx)).await } - /// Polls [`Registrations`] for `seconds` and panics if it returns a event during this time. + /// Polls [`Registrations`] for `seconds` and panics if it returns a + /// event during this time. async fn no_event_for(&mut self, seconds: u64) { tokio::time::timeout(Duration::from_secs(seconds), self.next_event()) .await .unwrap_err(); } - /// Polls [`Registrations`] for at most `seconds` and panics if doesn't return an event within that time. + /// Polls [`Registrations`] for at most `seconds` and panics if doesn't + /// return an event within that time. async fn next_event_in_at_most(&mut self, seconds: u64) -> ExpiredRegistration { tokio::time::timeout(Duration::from_secs(seconds), self.next_event()) .await diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index d9200780ece..ed961933cab 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -18,16 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::Multiaddr; +use std::time::Duration; + +use futures::{stream::FuturesUnordered, StreamExt}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity as identity; use libp2p_rendezvous as rendezvous; use libp2p_rendezvous::client::RegisterError; use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] @@ -471,9 +470,12 @@ async fn new_combined_node() -> Swarm { } async fn new_impersonating_client() -> Swarm { - // In reality, if Eve were to try and fake someones identity, she would obviously only know the public key. - // Due to the type-safe API of the `Rendezvous` behaviour and `PeerRecord`, we actually cannot construct a bad `PeerRecord` (i.e. one that is claims to be someone else). - // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. + // In reality, if Eve were to try and fake someones identity, she would + // obviously only know the public key. Due to the type-safe API of the + // `Rendezvous` behaviour and `PeerRecord`, we actually cannot construct a bad + // `PeerRecord` (i.e. one that is claims to be someone else). As such, the + // best we can do is hand eve a completely different keypair from what she is + // using to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); eve.listen().with_memory_addr_external().await; diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs index a27d069e758..744d94cb961 100644 --- a/protocols/request-response/src/cbor.rs +++ b/protocols/request-response/src/cbor.rs @@ -37,19 +37,23 @@ /// } /// /// let behaviour = cbor::Behaviour::::new( -/// [(StreamProtocol::new("/my-cbor-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-cbor-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + use async_trait::async_trait; use cbor4ii::core::error::DecodeError; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; /// Max request size in bytes const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; @@ -168,13 +172,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::cbor::codec::Codec; - use crate::Codec as _; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::{cbor::codec::Codec, Codec as _}; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/codec.rs b/protocols/request-response/src/codec.rs index d26b729acae..d396a75ad7b 100644 --- a/protocols/request-response/src/codec.rs +++ b/protocols/request-response/src/codec.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use async_trait::async_trait; use futures::prelude::*; -use std::io; /// A `Codec` defines the request and response types /// for a request-response [`Behaviour`](crate::Behaviour) protocol or diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index dbd7a0708ce..801db228879 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -20,26 +20,10 @@ pub(crate) mod protocol; -pub use protocol::ProtocolSupport; - -use crate::codec::Codec; -use crate::handler::protocol::Protocol; -use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; - -use futures::channel::mpsc; -use futures::{channel::oneshot, prelude::*}; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, -}; -use libp2p_swarm::{ - handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError}, - SubstreamProtocol, -}; -use smallvec::SmallVec; use std::{ collections::VecDeque, - fmt, io, + fmt, + io, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -48,7 +32,36 @@ use std::{ time::Duration, }; -/// A connection handler for a request response [`Behaviour`](super::Behaviour) protocol. +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; +use libp2p_swarm::{ + handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ListenUpgradeError, + StreamUpgradeError, + }, + SubstreamProtocol, +}; +pub use protocol::ProtocolSupport; +use smallvec::SmallVec; + +use crate::{ + codec::Codec, + handler::protocol::Protocol, + InboundRequestId, + OutboundRequestId, + EMPTY_QUEUE_SHRINK_THRESHOLD, +}; + +/// A connection handler for a request response [`Behaviour`](super::Behaviour) +/// protocol. pub struct Handler where TCodec: Codec, @@ -59,7 +72,8 @@ where codec: TCodec, /// Queue of events to emit in `poll()`. pending_events: VecDeque>, - /// Outbound upgrades waiting to be emitted as an `OutboundSubstreamRequest`. + /// Outbound upgrades waiting to be emitted as an + /// `OutboundSubstreamRequest`. pending_outbound: VecDeque>, requested_outbound: VecDeque>, @@ -69,7 +83,8 @@ where TCodec::Request, oneshot::Sender, )>, - /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound request. + /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound + /// request. inbound_sender: mpsc::Sender<( InboundRequestId, TCodec::Request, @@ -159,9 +174,10 @@ where } }; - // Inbound connections are reported to the upper layer from within the above task, - // so by failing to schedule it, it means the upper layer will never know about the - // inbound request. Because of that we do not report any inbound failure. + // Inbound connections are reported to the upper layer from within the above + // task, so by failing to schedule it, it means the upper layer will + // never know about the inbound request. Because of that we do not + // report any inbound failure. if self .worker_streams .try_push(RequestId::Inbound(request_id), recv.boxed()) diff --git a/protocols/request-response/src/json.rs b/protocols/request-response/src/json.rs index 85e78e7ddda..ce722b1522b 100644 --- a/protocols/request-response/src/json.rs +++ b/protocols/request-response/src/json.rs @@ -18,7 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -/// A request-response behaviour using [`serde_json`] for serializing and deserializing the messages. +/// A request-response behaviour using [`serde_json`] for serializing and +/// deserializing the messages. /// /// # Example /// @@ -36,18 +37,22 @@ /// } /// /// let behaviour = json::Behaviour::::new( -/// [(StreamProtocol::new("/my-json-protocol"), ProtocolSupport::Full)], -/// request_response::Config::default() +/// [( +/// StreamProtocol::new("/my-json-protocol"), +/// ProtocolSupport::Full, +/// )], +/// request_response::Config::default(), /// ); /// ``` pub type Behaviour = crate::Behaviour>; mod codec { + use std::{io, marker::PhantomData}; + use async_trait::async_trait; use futures::prelude::*; use libp2p_swarm::StreamProtocol; use serde::{de::DeserializeOwned, Serialize}; - use std::{io, marker::PhantomData}; /// Max request size in bytes const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; @@ -140,12 +145,13 @@ mod codec { #[cfg(test)] mod tests { - use crate::Codec; use futures::AsyncWriteExt; use futures_ringbuf::Endpoint; use libp2p_swarm::StreamProtocol; use serde::{Deserialize, Serialize}; + use crate::Codec; + #[async_std::test] async fn test_codec() { let expected_request = TestRequest { diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index e627f5668ff..ef7a52a68f2 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -41,8 +41,8 @@ //! //! ## Predefined codecs //! -//! In case your message types implement [`serde::Serialize`] and [`serde::Deserialize`], -//! you can use two predefined behaviours: +//! In case your message types implement [`serde::Serialize`] and +//! [`serde::Deserialize`], you can use two predefined behaviours: //! //! - [`cbor::Behaviour`] for CBOR-encoded messages //! - [`json::Behaviour`] for JSON-encoded messages @@ -73,28 +73,38 @@ mod handler; #[cfg(feature = "json")] pub mod json; -pub use codec::Codec; -pub use handler::ProtocolSupport; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + fmt, + io, + sync::{atomic::AtomicU64, Arc}, + task::{Context, Poll}, + time::Duration, +}; -use crate::handler::OutboundMessage; +pub use codec::Codec; use futures::channel::oneshot; use handler::Handler; +pub use handler::ProtocolSupport; use libp2p_core::{transport::PortUse, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, DialFailure, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, - PeerAddresses, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + NetworkBehaviour, + NotifyHandler, + PeerAddresses, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use smallvec::SmallVec; -use std::{ - collections::{HashMap, HashSet, VecDeque}, - fmt, io, - sync::{atomic::AtomicU64, Arc}, - task::{Context, Poll}, - time::Duration, -}; + +use crate::handler::OutboundMessage; /// An inbound request or response. #[derive(Debug)] @@ -326,7 +336,8 @@ impl Config { self } - /// Sets the upper bound for the number of concurrent inbound + outbound streams. + /// Sets the upper bound for the number of concurrent inbound + outbound + /// streams. pub fn with_max_concurrent_streams(mut self, num_streams: usize) -> Self { self.max_concurrent_streams = num_streams; self @@ -353,8 +364,8 @@ where /// Pending events to return from `poll`. pending_events: VecDeque, OutboundMessage>>, - /// The currently connected peers, their pending outbound and inbound responses and their known, - /// reachable addresses, if any. + /// The currently connected peers, their pending outbound and inbound + /// responses and their known, reachable addresses, if any. connected: HashMap>, /// Externally managed addresses via `add_address` and `remove_address`. addresses: PeerAddresses, @@ -367,7 +378,8 @@ impl Behaviour where TCodec: Codec + Default + Clone + Send + 'static, { - /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to construct the codec. + /// Creates a new `Behaviour` for the given protocols and configuration, + /// using [`Default`] to construct the codec. pub fn new(protocols: I, cfg: Config) -> Self where I: IntoIterator, @@ -475,7 +487,8 @@ where self.addresses.add(*peer, address) } - /// Removes an address of a peer previously added via [`Behaviour::add_address`]. + /// Removes an address of a peer previously added via + /// [`Behaviour::add_address`]. #[deprecated(note = "Will be removed with the next breaking release and won't be replaced.")] pub fn remove_address(&mut self, peer: &PeerId, address: &Multiaddr) { self.addresses.remove(peer, address); @@ -562,8 +575,8 @@ where /// Remove pending outbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`OutboundRequestId`] was previously present and is now removed. - /// Returns `false` otherwise. + /// alive and the [`OutboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. fn remove_pending_outbound_response( &mut self, peer: &PeerId, @@ -578,8 +591,8 @@ where /// Remove pending inbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`InboundRequestId`] was previously present and is now removed. - /// Returns `false` otherwise. + /// alive and the [`InboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. fn remove_pending_inbound_response( &mut self, peer: &PeerId, @@ -693,7 +706,8 @@ where } } - /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + /// Preloads a new [`Handler`] with requests that are waiting to be sent to + /// the newly connected peer. fn preload_new_handler( &mut self, handler: &mut Handler, @@ -848,7 +862,10 @@ where .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); } None => { - tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + tracing::debug!( + "Connection ({connection}) closed after `Event::Request` ({request_id}) \ + has been emitted." + ); } }, handler::Event::ResponseSent(request_id) => { @@ -946,7 +963,10 @@ where })); } else { // This happens when `read_request` fails. - tracing::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); + tracing::debug!( + "Inbound failure is reported for an unknown request_id ({request_id}): \ + {error}" + ); } } } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index 19f323e169f..5de44e346e7 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -1,3 +1,5 @@ +use std::{io, iter, pin::pin, time::Duration}; + use anyhow::{bail, Result}; use async_std::task::sleep; use async_trait::async_trait; @@ -8,11 +10,13 @@ use libp2p_request_response::ProtocolSupport; use libp2p_swarm::{StreamProtocol, Swarm}; use libp2p_swarm_test::SwarmExt; use request_response::{ - Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel, + Codec, + InboundFailure, + InboundRequestId, + OutboundFailure, + OutboundRequestId, + ResponseChannel, }; -use std::pin::pin; -use std::time::Duration; -use std::{io, iter}; use tracing_subscriber::EnvFilter; #[async_std::test] @@ -40,7 +44,8 @@ async fn report_outbound_failure_on_read_response() { assert_eq!(peer, peer2_id); assert_eq!(req_id_done, req_id); - // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead wait_no_events(&mut swarm1).await; }; @@ -84,7 +89,8 @@ async fn report_outbound_failure_on_write_request() { swarm2.connect(&mut swarm1).await; // Expects no events because `Event::Request` is produced after `read_request`. - // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. let server_task = wait_no_events(&mut swarm1); // Expects OutboundFailure::Io failure with `FailOnWriteRequest` error. @@ -140,7 +146,8 @@ async fn report_outbound_timeout_on_read_response() { assert_eq!(peer, peer2_id); assert_eq!(req_id_done, req_id); - // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead wait_no_events(&mut swarm1).await; }; @@ -183,7 +190,8 @@ async fn report_outbound_failure_on_max_streams() { .behaviour_mut() .send_request(&peer2_id, Action::FailOnMaxStreams); - // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. wait_no_events(&mut swarm1).await; }; @@ -226,7 +234,8 @@ async fn report_inbound_failure_on_read_request() { swarm2.connect(&mut swarm1).await; // Expects no events because `Event::Request` is produced after `read_request`. - // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. let server_task = wait_no_events(&mut swarm1); // Expects io::ErrorKind::UnexpectedEof @@ -300,8 +309,8 @@ async fn report_inbound_failure_on_write_response() { match error { OutboundFailure::ConnectionClosed => { - // ConnectionClosed is allowed here because we mainly test the behavior - // of `server_task`. + // ConnectionClosed is allowed here because we mainly test the + // behavior of `server_task`. } OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} e => panic!("Unexpected error: {e:?}"), @@ -357,8 +366,8 @@ async fn report_inbound_timeout_on_write_response() { match error { OutboundFailure::ConnectionClosed => { - // ConnectionClosed is allowed here because we mainly test the behavior - // of `server_task`. + // ConnectionClosed is allowed here because we mainly test the + // behavior of `server_task`. } OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} e => panic!("Unexpected error: {e:?}"), diff --git a/protocols/request-response/tests/peer_address.rs b/protocols/request-response/tests/peer_address.rs index 0ed7ffe5551..603e2d09dc0 100644 --- a/protocols/request-response/tests/peer_address.rs +++ b/protocols/request-response/tests/peer_address.rs @@ -1,10 +1,11 @@ +use std::iter; + use libp2p_core::ConnectedPoint; use libp2p_request_response as request_response; use libp2p_request_response::ProtocolSupport; use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use serde::{Deserialize, Serialize}; -use std::iter; use tracing_subscriber::EnvFilter; #[async_std::test] diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 827afae249c..b7cd7618b78 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -20,6 +20,8 @@ //! Integration tests for the `Behaviour`. +use std::{io, iter}; + use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_request_response as request_response; @@ -28,7 +30,6 @@ use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use rand::Rng; use serde::{Deserialize, Serialize}; -use std::{io, iter}; use tracing_subscriber::EnvFilter; #[async_std::test] @@ -236,11 +237,12 @@ async fn emits_inbound_connection_closed_failure() { } } -/// We expect the substream to be properly closed when response channel is dropped. -/// Since the ping protocol used here expects a response, the sender considers this -/// early close as a protocol violation which results in the connection being closed. -/// If the substream were not properly closed when dropped, the sender would instead -/// run into a timeout waiting for the response. +/// We expect the substream to be properly closed when response channel is +/// dropped. Since the ping protocol used here expects a response, the sender +/// considers this early close as a protocol violation which results in the +/// connection being closed. If the substream were not properly closed when +/// dropped, the sender would instead run into a timeout waiting for the +/// response. #[async_std::test] #[cfg(feature = "cbor")] async fn emits_inbound_connection_closed_if_channel_is_dropped() { diff --git a/protocols/stream/src/behaviour.rs b/protocols/stream/src/behaviour.rs index e72af8fbfce..0f383a9e226 100644 --- a/protocols/stream/src/behaviour.rs +++ b/protocols/stream/src/behaviour.rs @@ -8,11 +8,22 @@ use futures::{channel::mpsc, StreamExt}; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - self as swarm, dial_opts::DialOpts, ConnectionDenied, ConnectionId, FromSwarm, - NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + self as swarm, + dial_opts::DialOpts, + ConnectionDenied, + ConnectionId, + FromSwarm, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use swarm::{ - behaviour::ConnectionEstablished, dial_opts::PeerCondition, ConnectionClosed, DialError, + behaviour::ConnectionEstablished, + dial_opts::PeerCondition, + ConnectionClosed, + DialError, DialFailure, }; diff --git a/protocols/stream/src/control.rs b/protocols/stream/src/control.rs index 036d285b2a3..7f59d409564 100644 --- a/protocols/stream/src/control.rs +++ b/protocols/stream/src/control.rs @@ -6,17 +6,18 @@ use std::{ task::{Context, Poll}, }; -use crate::AlreadyRegistered; -use crate::{handler::NewStream, shared::Shared}; - use futures::{ channel::{mpsc, oneshot}, - SinkExt as _, StreamExt as _, + SinkExt as _, + StreamExt as _, }; use libp2p_identity::PeerId; use libp2p_swarm::{Stream, StreamProtocol}; -/// A (remote) control for opening new streams and registration of inbound protocols. +use crate::{handler::NewStream, shared::Shared, AlreadyRegistered}; + +/// A (remote) control for opening new streams and registration of inbound +/// protocols. /// /// A [`Control`] can be cloned and thus allows for concurrent access. #[derive(Clone)] @@ -31,15 +32,18 @@ impl Control { /// Attempt to open a new stream for the given protocol and peer. /// - /// In case we are currently not connected to the peer, we will attempt to make a new connection. + /// In case we are currently not connected to the peer, we will attempt to + /// make a new connection. /// /// ## Backpressure /// /// [`Control`]s support backpressure similarly to bounded channels: /// Each [`Control`] has a guaranteed slot for internal messages. - /// A single control will always open one stream at a time which is enforced by requiring `&mut self`. + /// A single control will always open one stream at a time which is enforced + /// by requiring `&mut self`. /// - /// This backpressure mechanism breaks if you clone [`Control`]s excessively. + /// This backpressure mechanism breaks if you clone [`Control`]s + /// excessively. pub async fn open_stream( &mut self, peer: PeerId, @@ -65,7 +69,8 @@ impl Control { /// Accept inbound streams for the provided protocol. /// - /// To stop accepting streams, simply drop the returned [`IncomingStreams`] handle. + /// To stop accepting streams, simply drop the returned [`IncomingStreams`] + /// handle. pub fn accept( &mut self, protocol: StreamProtocol, diff --git a/protocols/stream/src/handler.rs b/protocols/stream/src/handler.rs index b7ec516d3b1..c46be0dc0a4 100644 --- a/protocols/stream/src/handler.rs +++ b/protocols/stream/src/handler.rs @@ -13,7 +13,9 @@ use libp2p_identity::PeerId; use libp2p_swarm::{ self as swarm, handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, - ConnectionHandler, Stream, StreamProtocol, + ConnectionHandler, + Stream, + StreamProtocol, }; use crate::{shared::Shared, upgrade::Upgrade, OpenStreamError}; @@ -162,7 +164,8 @@ impl ConnectionHandler for Handler { } } -/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to negotiate a new outbound stream. +/// Message from a [`Control`](crate::Control) to a [`ConnectionHandler`] to +/// negotiate a new outbound stream. #[derive(Debug)] pub(crate) struct NewStream { pub(crate) protocol: StreamProtocol, diff --git a/protocols/stream/src/shared.rs b/protocols/stream/src/shared.rs index 48aa6613d83..647b8d42184 100644 --- a/protocols/stream/src/shared.rs +++ b/protocols/stream/src/shared.rs @@ -12,9 +12,11 @@ use rand::seq::IteratorRandom as _; use crate::{handler::NewStream, AlreadyRegistered, IncomingStreams}; pub(crate) struct Shared { - /// Tracks the supported inbound protocols created via [`Control::accept`](crate::Control::accept). + /// Tracks the supported inbound protocols created via + /// [`Control::accept`](crate::Control::accept). /// - /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding to the [`mpsc::Receiver`] in [`IncomingStreams`]. + /// For each [`StreamProtocol`], we hold the [`mpsc::Sender`] corresponding + /// to the [`mpsc::Receiver`] in [`IncomingStreams`]. supported_inbound_protocols: HashMap>, connections: HashMap, @@ -25,7 +27,8 @@ pub(crate) struct Shared { /// Sender for peers we want to dial. /// - /// We manage this through a channel to avoid locks as part of [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). + /// We manage this through a channel to avoid locks as part of + /// [`NetworkBehaviour::poll`](libp2p_swarm::NetworkBehaviour::poll). dial_sender: mpsc::Sender, } @@ -61,7 +64,8 @@ impl Shared { Ok(IncomingStreams::new(receiver)) } - /// Lists the protocols for which we have an active [`IncomingStreams`] instance. + /// Lists the protocols for which we have an active [`IncomingStreams`] + /// instance. pub(crate) fn supported_inbound_protocols(&mut self) -> Vec { self.supported_inbound_protocols .retain(|_, sender| !sender.is_closed()); diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index ee985042b68..66523fd5269 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -32,24 +32,34 @@ use std::{ time::Duration, }; -use crate::tokio::{is_addr_global, Gateway}; use futures::{channel::oneshot, Future, StreamExt}; use futures_timer::Delay; use igd_next::PortMappingProtocol; use libp2p_core::{ multiaddr, transport::{ListenerId, PortUse}, - Endpoint, Multiaddr, + Endpoint, + Multiaddr, }; use libp2p_swarm::{ - derive_prelude::PeerId, dummy, ConnectionDenied, ConnectionId, ExpiredListenAddr, FromSwarm, - NetworkBehaviour, NewListenAddr, ToSwarm, + derive_prelude::PeerId, + dummy, + ConnectionDenied, + ConnectionId, + ExpiredListenAddr, + FromSwarm, + NetworkBehaviour, + NewListenAddr, + ToSwarm, }; +use crate::tokio::{is_addr_global, Gateway}; + /// The duration in seconds of a port mapping on the gateway. const MAPPING_DURATION: u32 = 3600; -/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being unmapped. +/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being +/// unmapped. const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; /// A [`Gateway`] Request. @@ -118,7 +128,8 @@ impl Borrow for Mapping { /// Current state of a [`Mapping`]. #[derive(Debug)] enum MappingState { - /// Port mapping is inactive, will be requested or re-requested on the next iteration. + /// Port mapping is inactive, will be requested or re-requested on the next + /// iteration. Inactive, /// Port mapping/removal has been requested on the gateway. Pending, @@ -168,8 +179,8 @@ impl DerefMut for MappingList { } impl MappingList { - /// Queue for renewal the current mapped ports on the `Gateway` that are expiring, - /// and try to activate the inactive. + /// Queue for renewal the current mapped ports on the `Gateway` that are + /// expiring, and try to activate the inactive. fn renew(&mut self, gateway: &mut Gateway, cx: &mut Context<'_>) { for (mapping, state) in self.iter_mut() { match state { @@ -208,8 +219,9 @@ impl MappingList { } } -/// A [`NetworkBehaviour`] for UPnP port mapping. Automatically tries to map the external port -/// to an internal address on the gateway on a [`FromSwarm::NewListenAddr`]. +/// A [`NetworkBehaviour`] for UPnP port mapping. Automatically tries to map the +/// external port to an internal address on the gateway on a +/// [`FromSwarm::NewListenAddr`]. pub struct Behaviour { /// UPnP interface state. state: GatewayState, @@ -286,8 +298,9 @@ impl NetworkBehaviour for Behaviour { match &mut self.state { GatewayState::Searching(_) => { - // As the gateway is not yet available we add the mapping with `MappingState::Inactive` - // so that when and if it becomes available we map it. + // As the gateway is not yet available we add the mapping with + // `MappingState::Inactive` so that when and if it + // becomes available we map it. self.mappings.insert( Mapping { listener_id, @@ -379,8 +392,8 @@ impl NetworkBehaviour for Behaviour { return Poll::Ready(ToSwarm::GenerateEvent(event)); } - // Loop through the gateway state so that if it changes from `Searching` to `Available` - // we poll the pending mapping requests. + // Loop through the gateway state so that if it changes from `Searching` to + // `Available` we poll the pending mapping requests. loop { match self.state { GatewayState::Searching(ref mut fut) => match Pin::new(fut).poll(cx) { @@ -521,7 +534,8 @@ impl NetworkBehaviour for Behaviour { } } -/// Extracts a [`SocketAddrV4`] and [`PortMappingProtocol`] from a given [`Multiaddr`]. +/// Extracts a [`SocketAddrV4`] and [`PortMappingProtocol`] from a given +/// [`Multiaddr`]. /// /// Fails if the given [`Multiaddr`] does not begin with an IP /// protocol encapsulating a TCP or UDP port. diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs index 8a74d7e8f63..d7a746f78df 100644 --- a/protocols/upnp/src/lib.rs +++ b/protocols/upnp/src/lib.rs @@ -24,7 +24,6 @@ //! implements the [`libp2p_swarm::NetworkBehaviour`] trait. //! This struct will automatically try to map the ports externally to internal //! addresses on the gateway. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs index b2cad6fa5a7..df706618c6c 100644 --- a/protocols/upnp/src/tokio.rs +++ b/protocols/upnp/src/tokio.rs @@ -20,16 +20,17 @@ use std::{error::Error, net::IpAddr}; -use crate::behaviour::{GatewayEvent, GatewayRequest}; use futures::{ channel::{mpsc, oneshot}, - SinkExt, StreamExt, + SinkExt, + StreamExt, }; use igd_next::SearchOptions; pub use crate::behaviour::Behaviour; +use crate::behaviour::{GatewayEvent, GatewayRequest}; -//TODO: remove when `IpAddr::is_global` stabilizes. +// TODO: remove when `IpAddr::is_global` stabilizes. pub(crate) fn is_addr_global(addr: IpAddr) -> bool { match addr { IpAddr::V4(ip) => { diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000000..21248880b41 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,11 @@ +# Imports +reorder_imports = true +imports_granularity = "Crate" +group_imports = "StdExternalCrate" +imports_layout = "HorizontalVertical" + +# Docs +wrap_comments = true +format_strings = true +normalize_comments = true +format_code_in_doc_comments = true \ No newline at end of file diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 258c0b976c8..c6d18c8ce62 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -23,15 +23,15 @@ mod syn_ext; -use crate::syn_ext::RequireStrLit; use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; -use syn::punctuated::Punctuated; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token}; +use syn::{parse_macro_input, punctuated::Punctuated, Data, DataStruct, DeriveInput, Meta, Token}; + +use crate::syn_ext::RequireStrLit; -/// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See -/// the trait documentation for better description. +/// Generates a delegating `NetworkBehaviour` implementation for the struct this +/// is used for. See the trait documentation for better description. #[proc_macro_derive(NetworkBehaviour, attributes(behaviour))] pub fn hello_macro_derive(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); @@ -225,10 +225,12 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result, } -/// Parses the `value` of a key=value pair in the `#[behaviour]` attribute into the requested type. +/// Parses the `value` of a key=value pair in the `#[behaviour]` attribute into +/// the requested type. fn parse_attributes(ast: &DeriveInput) -> syn::Result { let mut attributes = BehaviourAttributes { prelude_path: syn::parse_quote! { ::libp2p::swarm::derive_prelude }, diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index bcab6e5b700..02c9f450e20 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -18,58 +18,75 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt::Debug, future::IntoFuture, time::Duration}; + use async_trait::async_trait; -use futures::future::{BoxFuture, Either}; -use futures::{FutureExt, StreamExt}; +use futures::{ + future::{BoxFuture, Either}, + FutureExt, + StreamExt, +}; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; -use std::fmt::Debug; -use std::future::IntoFuture; -use std::time::Duration; - -/// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. +use libp2p_swarm::{ + dial_opts::{DialOpts, PeerCondition}, + NetworkBehaviour, + Swarm, + SwarmEvent, +}; + +/// An extension trait for [`Swarm`] that makes it easier to set up a network of +/// [`Swarm`]s for tests. #[async_trait] pub trait SwarmExt { type NB: NetworkBehaviour; - /// Create a new [`Swarm`] with an ephemeral identity and the `async-std` runtime. + /// Create a new [`Swarm`] with an ephemeral identity and the `async-std` + /// runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] + /// together with a [`libp2p_plaintext::Config`] authentication layer and + /// [`libp2p_yamux::Config`] as the multiplexer. However, these details + /// should not be relied upon by the test and may change at any time. #[cfg(feature = "async-std")] fn new_ephemeral(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized; - /// Create a new [`Swarm`] with an ephemeral identity and the `tokio` runtime. + /// Create a new [`Swarm`] with an ephemeral identity and the `tokio` + /// runtime. /// - /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] together with a [`libp2p_plaintext::Config`] authentication layer and - /// [`libp2p_yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test - /// and may change at any time. + /// The swarm will use a [`libp2p_core::transport::MemoryTransport`] + /// together with a [`libp2p_plaintext::Config`] authentication layer and + /// [`libp2p_yamux::Config`] as the multiplexer. However, these details + /// should not be relied upon by the test and may change at any time. #[cfg(feature = "tokio")] fn new_ephemeral_tokio(behaviour_fn: impl FnOnce(libp2p_identity::Keypair) -> Self::NB) -> Self where Self: Sized; - /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// Establishes a connection to the given [`Swarm`], polling both of them + /// until the connection is established. /// - /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. - /// By default, this iterator will not yield any addresses. - /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. + /// This will take addresses from the `other` [`Swarm`] via + /// [`Swarm::external_addresses`]. By default, this iterator will not + /// yield any addresses. To add listen addresses as external addresses, + /// use [`ListenFuture::with_memory_addr_external`] or + /// [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, ::ToSwarm: Debug; - /// Dial the provided address and wait until a connection has been established. + /// Dial the provided address and wait until a connection has been + /// established. /// - /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but that is not always possible. - /// This function only abstracts away the "dial and wait for `ConnectionEstablished` event" part. + /// In a normal test scenario, you should prefer [`SwarmExt::connect`] but + /// that is not always possible. This function only abstracts away the + /// "dial and wait for `ConnectionEstablished` event" part. /// - /// Because we don't have access to the other [`Swarm`], we can't guarantee that it makes progress. + /// Because we don't have access to the other [`Swarm`], we can't guarantee + /// that it makes progress. async fn dial_and_wait(&mut self, addr: Multiaddr) -> PeerId; /// Wait for specified condition to return `Some`. @@ -78,19 +95,23 @@ pub trait SwarmExt { P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; - /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. + /// Listens for incoming connections, polling the [`Swarm`] until the + /// transport is ready to accept connections. /// - /// The first address is for the memory transport, the second one for the TCP transport. + /// The first address is for the memory transport, the second one for the + /// TCP transport. fn listen(&mut self) -> ListenFuture<&mut Self>; /// Returns the next [`SwarmEvent`] or times out after 10 seconds. /// - /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. + /// If the 10s timeout does not fit your usecase, please fall back to + /// `StreamExt::next`. async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm>; /// Returns the next behaviour event or times out after 10 seconds. /// - /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. + /// If the 10s timeout does not fit your usecase, please fall back to + /// `StreamExt::next`. async fn next_behaviour_event(&mut self) -> ::ToSwarm; async fn loop_on_next(self); @@ -102,31 +123,41 @@ pub trait SwarmExt { /// /// ## Number of events /// -/// The number of events is configured via const generics based on the array size of the return type. -/// This allows the compiler to infer how many events you are expecting based on how you use this function. -/// For example, if you expect the first [`Swarm`] to emit 2 events, you should assign the first variable of the returned tuple value to an array of size 2. -/// This works especially well if you directly pattern-match on the return value. +/// The number of events is configured via const generics based on the array +/// size of the return type. This allows the compiler to infer how many events +/// you are expecting based on how you use this function. For example, if you +/// expect the first [`Swarm`] to emit 2 events, you should assign the first +/// variable of the returned tuple value to an array of size 2. This works +/// especially well if you directly pattern-match on the return value. /// /// ## Type of event /// /// This function utilizes the [`TryIntoOutput`] trait. -/// Similar as to the number of expected events, the type of event is inferred based on your usage. -/// If you match against a [`SwarmEvent`], the first [`SwarmEvent`] will be returned. -/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. +/// Similar as to the number of expected events, the type of event is inferred +/// based on your usage. If you match against a [`SwarmEvent`], the first +/// [`SwarmEvent`] will be returned. If you match against your +/// [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not +/// [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a +/// behaviour event. /// -/// You can implement the [`TryIntoOutput`] for any other type to further customize this behaviour. +/// You can implement the [`TryIntoOutput`] for any other type to further +/// customize this behaviour. /// /// # Difference to [`futures::future::join`] /// -/// This function is similar to joining two futures with two crucial differences: +/// This function is similar to joining two futures with two crucial +/// differences: /// 1. As described above, it allows you to obtain more than a single event. -/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they already has emitted all expected events**. +/// 2. More importantly, it will continue to poll the [`Swarm`]s **even if they +/// already has emitted all expected events**. /// /// Especially (2) is crucial for our usage of this function. /// If a [`Swarm`] is not polled, nothing within it makes progress. -/// This can "starve" the other swarm which for example may wait for another message to be sent on a connection. +/// This can "starve" the other swarm which for example may wait for another +/// message to be sent on a connection. /// -/// Using [`drive`] instead of [`futures::future::join`] ensures that a [`Swarm`] continues to be polled, even after it emitted its events. +/// Using [`drive`] instead of [`futures::future::join`] ensures that a +/// [`Swarm`] continues to be polled, even after it emitted its events. pub async fn drive< TBehaviour1, const NUM_EVENTS_SWARM_1: usize, @@ -231,7 +262,11 @@ where behaviour_fn(identity), peer_id, libp2p_swarm::Config::with_async_std_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + .with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need + * connections to be kept + * alive beyond what the + * individual behaviour + * configures., */ ) } @@ -259,7 +294,11 @@ where behaviour_fn(identity), peer_id, libp2p_swarm::Config::with_tokio_executor() - .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + .with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need + * connections to be kept + * alive beyond what the + * individual behaviour + * configures., */ ) } @@ -385,20 +424,26 @@ pub struct ListenFuture { } impl ListenFuture { - /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the memory address we are starting to listen on as an external + /// address using [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because within a process, memory + /// addresses are "globally" reachable. However, some tests depend on + /// which addresses are external and need this to be configurable so it is + /// not a good default. pub fn with_memory_addr_external(mut self) -> Self { self.add_memory_external = true; self } - /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// Adds the TCP address we are starting to listen on as an external address + /// using [`Swarm::add_external_address`]. /// - /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. - /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + /// This is typically "safe" for tests because on the same machine, + /// 127.0.0.1 is reachable for other [`Swarm`]s. However, some tests + /// depend on which addresses are external and need this to be configurable + /// so it is not a good default. pub fn with_tcp_addr_external(mut self) -> Self { self.add_tcp_external = true; diff --git a/swarm/benches/connection_handler.rs b/swarm/benches/connection_handler.rs index 09340421f83..e02a4a5e06c 100644 --- a/swarm/benches/connection_handler.rs +++ b/swarm/benches/connection_handler.rs @@ -1,11 +1,17 @@ +use std::{convert::Infallible, sync::atomic::AtomicUsize}; + use async_std::stream::StreamExt; use criterion::{criterion_group, criterion_main, Criterion}; use libp2p_core::{ - transport::MemoryTransport, InboundUpgrade, Multiaddr, OutboundUpgrade, Transport, UpgradeInfo, + transport::MemoryTransport, + InboundUpgrade, + Multiaddr, + OutboundUpgrade, + Transport, + UpgradeInfo, }; use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionHandler, NetworkBehaviour, StreamProtocol}; -use std::{convert::Infallible, sync::atomic::AtomicUsize}; use web_time::Duration; macro_rules! gen_behaviour { @@ -82,7 +88,7 @@ benchmarks! { SpinningBehaviour20::bench().name(m).poll_count(500).protocols_per_behaviour(100), ]; } -//fn main() {} +// fn main() {} trait BigBehaviour: Sized { fn behaviours(&mut self) -> &mut [SpinningBehaviour]; diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 35aed12fba5..9dbc5c17543 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -24,48 +24,61 @@ mod listen_addresses; mod peer_addresses; pub mod toggle; +use std::task::{Context, Poll}; + pub use external_addresses::ExternalAddresses; +use libp2p_core::{ + transport::{ListenerId, PortUse}, + ConnectedPoint, + Endpoint, + Multiaddr, +}; +use libp2p_identity::PeerId; pub use listen_addresses::ListenAddresses; pub use peer_addresses::PeerAddresses; -use crate::connection::ConnectionId; -use crate::dial_opts::DialOpts; -use crate::listen_opts::ListenOpts; use crate::{ - ConnectionDenied, ConnectionError, ConnectionHandler, DialError, ListenError, THandler, - THandlerInEvent, THandlerOutEvent, + connection::ConnectionId, + dial_opts::DialOpts, + listen_opts::ListenOpts, + ConnectionDenied, + ConnectionError, + ConnectionHandler, + DialError, + ListenError, + THandler, + THandlerInEvent, + THandlerOutEvent, }; -use libp2p_core::{ - transport::{ListenerId, PortUse}, - ConnectedPoint, Endpoint, Multiaddr, -}; -use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; -/// A [`NetworkBehaviour`] defines the behaviour of the local node on the network. +/// A [`NetworkBehaviour`] defines the behaviour of the local node on the +/// network. /// -/// In contrast to [`Transport`](libp2p_core::Transport) which defines **how** to send bytes on the -/// network, [`NetworkBehaviour`] defines **what** bytes to send and **to whom**. +/// In contrast to [`Transport`](libp2p_core::Transport) which defines **how** +/// to send bytes on the network, [`NetworkBehaviour`] defines **what** bytes to +/// send and **to whom**. /// -/// Each protocol (e.g. `libp2p-ping`, `libp2p-identify` or `libp2p-kad`) implements -/// [`NetworkBehaviour`]. Multiple implementations of [`NetworkBehaviour`] can be composed into a -/// hierarchy of [`NetworkBehaviour`]s where parent implementations delegate to child -/// implementations. Finally the root of the [`NetworkBehaviour`] hierarchy is passed to -/// [`Swarm`](crate::Swarm) where it can then control the behaviour of the local node on a libp2p -/// network. +/// Each protocol (e.g. `libp2p-ping`, `libp2p-identify` or `libp2p-kad`) +/// implements [`NetworkBehaviour`]. Multiple implementations of +/// [`NetworkBehaviour`] can be composed into a hierarchy of +/// [`NetworkBehaviour`]s where parent implementations delegate to child +/// implementations. Finally the root of the [`NetworkBehaviour`] hierarchy is +/// passed to [`Swarm`](crate::Swarm) where it can then control the behaviour of +/// the local node on a libp2p network. /// /// # Hierarchy of [`NetworkBehaviour`] /// -/// To compose multiple [`NetworkBehaviour`] implementations into a single [`NetworkBehaviour`] -/// implementation, potentially building a multi-level hierarchy of [`NetworkBehaviour`]s, one can -/// use one of the [`NetworkBehaviour`] combinators, and/or use the [`NetworkBehaviour`] derive +/// To compose multiple [`NetworkBehaviour`] implementations into a single +/// [`NetworkBehaviour`] implementation, potentially building a multi-level +/// hierarchy of [`NetworkBehaviour`]s, one can use one of the +/// [`NetworkBehaviour`] combinators, and/or use the [`NetworkBehaviour`] derive /// macro. /// /// ## Combinators /// -/// [`NetworkBehaviour`] combinators wrap one or more [`NetworkBehaviour`] implementations and -/// implement [`NetworkBehaviour`] themselves. Example is the -/// [`Toggle`](crate::behaviour::toggle::Toggle) [`NetworkBehaviour`]. +/// [`NetworkBehaviour`] combinators wrap one or more [`NetworkBehaviour`] +/// implementations and implement [`NetworkBehaviour`] themselves. Example is +/// the [`Toggle`](crate::behaviour::toggle::Toggle) [`NetworkBehaviour`]. /// /// ``` rust /// # use libp2p_swarm::dummy; @@ -76,22 +89,25 @@ use std::{task::Context, task::Poll}; /// /// ## Custom [`NetworkBehaviour`] with the Derive Macro /// -/// One can derive [`NetworkBehaviour`] for a custom `struct` via the `#[derive(NetworkBehaviour)]` -/// proc macro re-exported by the `libp2p` crate. The macro generates a delegating `trait` -/// implementation for the custom `struct`. Each [`NetworkBehaviour`] trait method is simply -/// delegated to each `struct` member in the order the `struct` is defined. For example for -/// [`NetworkBehaviour::poll`] it will first poll the first `struct` member until it returns -/// [`Poll::Pending`] before moving on to later members. +/// One can derive [`NetworkBehaviour`] for a custom `struct` via the +/// `#[derive(NetworkBehaviour)]` proc macro re-exported by the `libp2p` crate. +/// The macro generates a delegating `trait` implementation for the custom +/// `struct`. Each [`NetworkBehaviour`] trait method is simply delegated to each +/// `struct` member in the order the `struct` is defined. For example for +/// [`NetworkBehaviour::poll`] it will first poll the first `struct` member +/// until it returns [`Poll::Pending`] before moving on to later members. /// -/// Events ([`NetworkBehaviour::ToSwarm`]) returned by each `struct` member are wrapped in a new -/// `enum` event, with an `enum` variant for each `struct` member. Users can define this event -/// `enum` themselves and provide the name to the derive macro via `#[behaviour(to_swarm = -/// "MyCustomOutEvent")]`. If the user does not specify an `to_swarm`, the derive macro generates +/// Events ([`NetworkBehaviour::ToSwarm`]) returned by each `struct` member are +/// wrapped in a new `enum` event, with an `enum` variant for each `struct` +/// member. Users can define this event `enum` themselves and provide the name +/// to the derive macro via `#[behaviour(to_swarm = "MyCustomOutEvent")]`. If +/// the user does not specify an `to_swarm`, the derive macro generates /// the event definition itself, naming it `Event`. /// -/// The aforementioned conversion of each of the event types generated by the struct members to the -/// custom `to_swarm` is handled by [`From`] implementations which the user needs to define in -/// addition to the event `enum` itself. +/// The aforementioned conversion of each of the event types generated by the +/// struct members to the custom `to_swarm` is handled by [`From`] +/// implementations which the user needs to define in addition to the event +/// `enum` itself. /// /// ``` rust /// # use libp2p_identify as identify; @@ -101,40 +117,43 @@ use std::{task::Context, task::Poll}; /// #[behaviour(to_swarm = "Event")] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { -/// identify: identify::Behaviour, -/// ping: ping::Behaviour, +/// identify: identify::Behaviour, +/// ping: ping::Behaviour, /// } /// /// enum Event { -/// Identify(identify::Event), -/// Ping(ping::Event), +/// Identify(identify::Event), +/// Ping(ping::Event), /// } /// /// impl From for Event { -/// fn from(event: identify::Event) -> Self { -/// Self::Identify(event) -/// } +/// fn from(event: identify::Event) -> Self { +/// Self::Identify(event) +/// } /// } /// /// impl From for Event { -/// fn from(event: ping::Event) -> Self { -/// Self::Ping(event) -/// } +/// fn from(event: ping::Event) -> Self { +/// Self::Ping(event) +/// } /// } /// ``` pub trait NetworkBehaviour: 'static { /// Handler for all the protocols the network behaviour supports. type ConnectionHandler: ConnectionHandler; - /// Event generated by the `NetworkBehaviour` and that the swarm will report back. + /// Event generated by the `NetworkBehaviour` and that the swarm will report + /// back. type ToSwarm: Send + 'static; /// Callback that is invoked for every new inbound connection. /// - /// At this point in the connection lifecycle, only the remote's and our local address are known. - /// We have also already allocated a [`ConnectionId`]. + /// At this point in the connection lifecycle, only the remote's and our + /// local address are known. We have also already allocated a + /// [`ConnectionId`]. /// - /// Any error returned from this function will immediately abort the dial attempt. + /// Any error returned from this function will immediately abort the dial + /// attempt. fn handle_pending_inbound_connection( &mut self, _connection_id: ConnectionId, @@ -148,12 +167,13 @@ pub trait NetworkBehaviour: 'static { /// /// This is invoked once another peer has successfully dialed us. /// - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. + /// At this point, we have verified their [`PeerId`] and we know, which + /// particular [`Multiaddr`] succeeded in the dial. In order to actually + /// use this connection, this function must return a [`ConnectionHandler`]. /// Returning an error will immediately close the connection. /// - /// Note when any composed behaviour returns an error the connection will be closed and a - /// [`FromSwarm::ListenFailure`] event will be emitted. + /// Note when any composed behaviour returns an error the connection will be + /// closed and a [`FromSwarm::ListenFailure`] event will be emitted. fn handle_established_inbound_connection( &mut self, _connection_id: ConnectionId, @@ -166,14 +186,22 @@ pub trait NetworkBehaviour: 'static { /// /// We have access to: /// - /// - The [`PeerId`], if known. Remember that we can dial without a [`PeerId`]. + /// - The [`PeerId`], if known. Remember that we can dial without a + /// [`PeerId`]. /// - All addresses passed to [`DialOpts`] are passed in here too. - /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. Typically, this is set to [`Endpoint::Dialer`] except if we are attempting a hole-punch. - /// - The [`ConnectionId`] identifying the future connection resulting from this dial, if successful. + /// - The effective [`Role`](Endpoint) of this peer in the dial attempt. + /// Typically, this is set to [`Endpoint::Dialer`] except if we are + /// attempting a hole-punch. + /// - The [`ConnectionId`] identifying the future connection resulting from + /// this dial, if successful. /// - /// Note that the addresses returned from this function are only used for dialing if [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) is set. + /// Note that the addresses returned from this function are only used for + /// dialing if + /// [`WithPeerIdWithAddresses::extend_addresses_through_behaviour`](crate::dial_opts::WithPeerIdWithAddresses::extend_addresses_through_behaviour) + /// is set. /// - /// Any error returned from this function will immediately abort the dial attempt. + /// Any error returned from this function will immediately abort the dial + /// attempt. fn handle_pending_outbound_connection( &mut self, _connection_id: ConnectionId, @@ -187,12 +215,13 @@ pub trait NetworkBehaviour: 'static { /// Callback that is invoked for every established outbound connection. /// /// This is invoked once we have successfully dialed a peer. - /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. + /// At this point, we have verified their [`PeerId`] and we know, which + /// particular [`Multiaddr`] succeeded in the dial. In order to actually + /// use this connection, this function must return a [`ConnectionHandler`]. /// Returning an error will immediately close the connection. /// - /// Note when any composed behaviour returns an error the connection will be closed and a - /// [`FromSwarm::DialFailure`] event will be emitted. + /// Note when any composed behaviour returns an error the connection will be + /// closed and a [`FromSwarm::DialFailure`] event will be emitted. fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, @@ -205,11 +234,13 @@ pub trait NetworkBehaviour: 'static { /// Informs the behaviour about an event from the [`Swarm`](crate::Swarm). fn on_swarm_event(&mut self, event: FromSwarm); - /// Informs the behaviour about an event generated by the [`ConnectionHandler`] - /// dedicated to the peer identified by `peer_id`. for the behaviour. + /// Informs the behaviour about an event generated by the + /// [`ConnectionHandler`] dedicated to the peer identified by `peer_id`. + /// for the behaviour. /// /// The [`PeerId`] is guaranteed to be in a connected state. In other words, - /// [`FromSwarm::ConnectionEstablished`] has previously been received with this [`PeerId`]. + /// [`FromSwarm::ConnectionEstablished`] has previously been received with + /// this [`PeerId`]. fn on_connection_handler_event( &mut self, _peer_id: PeerId, @@ -219,8 +250,8 @@ pub trait NetworkBehaviour: 'static { /// Polls for things that swarm should do. /// - /// This API mimics the API of the `Stream` trait. The method may register the current task in - /// order to wake it up at a later point in time. + /// This API mimics the API of the `Stream` trait. The method may register + /// the current task in order to wake it up at a later point in time. fn poll(&mut self, cx: &mut Context<'_>) -> Poll>>; } @@ -236,12 +267,15 @@ pub enum ToSwarm { /// Instructs the swarm to start a dial. /// - /// On success, [`NetworkBehaviour::on_swarm_event`] with `ConnectionEstablished` is invoked. - /// On failure, [`NetworkBehaviour::on_swarm_event`] with `DialFailure` is invoked. + /// On success, [`NetworkBehaviour::on_swarm_event`] with + /// `ConnectionEstablished` is invoked. On failure, + /// [`NetworkBehaviour::on_swarm_event`] with `DialFailure` is invoked. /// - /// [`DialOpts`] provides access to the [`ConnectionId`] via [`DialOpts::connection_id`]. - /// This [`ConnectionId`] will be used throughout the connection's lifecycle to associate events with it. - /// This allows a [`NetworkBehaviour`] to identify a connection that resulted out of its own dial request. + /// [`DialOpts`] provides access to the [`ConnectionId`] via + /// [`DialOpts::connection_id`]. This [`ConnectionId`] will be used + /// throughout the connection's lifecycle to associate events with it. + /// This allows a [`NetworkBehaviour`] to identify a connection that + /// resulted out of its own dial request. Dial { opts: DialOpts }, /// Instructs the [`Swarm`](crate::Swarm) to listen on the provided address. @@ -253,18 +287,21 @@ pub enum ToSwarm { /// Instructs the `Swarm` to send an event to the handler dedicated to a /// connection with a peer. /// - /// If the `Swarm` is connected to the peer, the message is delivered to the [`ConnectionHandler`] - /// instance identified by the peer ID and connection ID. + /// If the `Swarm` is connected to the peer, the message is delivered to the + /// [`ConnectionHandler`] instance identified by the peer ID and + /// connection ID. /// - /// If the specified connection no longer exists, the event is silently dropped. + /// If the specified connection no longer exists, the event is silently + /// dropped. /// /// Typically the connection ID given is the same as the one passed to - /// [`NetworkBehaviour::on_connection_handler_event`], i.e. whenever the behaviour wishes to - /// respond to a request on the same connection (and possibly the same - /// substream, as per the implementation of [`ConnectionHandler`]). + /// [`NetworkBehaviour::on_connection_handler_event`], i.e. whenever the + /// behaviour wishes to respond to a request on the same connection (and + /// possibly the same substream, as per the implementation of + /// [`ConnectionHandler`]). /// - /// Note that even if the peer is currently connected, connections can get closed - /// at any time and thus the event may not reach a handler. + /// Note that even if the peer is currently connected, connections can get + /// closed at any time and thus the event may not reach a handler. NotifyHandler { /// The peer for whom a [`ConnectionHandler`] should be notified. peer_id: PeerId, @@ -274,15 +311,18 @@ pub enum ToSwarm { event: TInEvent, }, - /// Reports a **new** candidate for an external address to the [`Swarm`](crate::Swarm). + /// Reports a **new** candidate for an external address to the + /// [`Swarm`](crate::Swarm). /// /// The emphasis on a **new** candidate is important. /// Protocols MUST take care to only emit a candidate once per "source". - /// For example, the observed address of a TCP connection does not change throughout its lifetime. - /// Thus, only one candidate should be emitted per connection. + /// For example, the observed address of a TCP connection does not change + /// throughout its lifetime. Thus, only one candidate should be emitted + /// per connection. /// - /// This makes the report frequency of an address a meaningful data-point for consumers of this event. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. + /// This makes the report frequency of an address a meaningful data-point + /// for consumers of this event. This address will be shared with all + /// [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. /// /// This address could come from a variety of sources: /// - A protocol such as identify obtained it from a remote. @@ -290,25 +330,34 @@ pub enum ToSwarm { /// - We made an educated guess based on one of our listen addresses. NewExternalAddrCandidate(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. + /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is + /// confirmed to be externally reachable. /// - /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally reachable on this address. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// This is intended to be issued in response to a + /// [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally + /// reachable on this address. This address will be shared with all + /// [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. ExternalAddrConfirmed(Multiaddr), - /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under the provided address. + /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer + /// externally reachable under the provided address. /// - /// This expires an address that was earlier confirmed via [`ToSwarm::ExternalAddrConfirmed`]. - /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// This expires an address that was earlier confirmed via + /// [`ToSwarm::ExternalAddrConfirmed`]. This address will be shared with + /// all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. ExternalAddrExpired(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. + /// Instructs the `Swarm` to initiate a graceful close of one or all + /// connections with the given peer. /// - /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. - /// In most cases, stopping to "use" a connection is enough to have it closed. - /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll + /// [`ConnectionHandler::poll_close`] to completion. In most cases, + /// stopping to "use" a connection is enough to have it closed. + /// The keep-alive algorithm will close a connection automatically once all + /// [`ConnectionHandler`]s are idle. /// - /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Use this command if you want to close a connection _despite_ it still + /// being in use by one or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, @@ -316,7 +365,8 @@ pub enum ToSwarm { connection: CloseConnection, }, - /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) and through that to other [`NetworkBehaviour`]s. + /// Reports external address of a remote peer to the [`Swarm`](crate::Swarm) + /// and through that to other [`NetworkBehaviour`]s. NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } @@ -428,8 +478,8 @@ pub enum FromSwarm<'a> { /// Informs the behaviour about a closed connection to a peer. /// /// This event is always paired with an earlier - /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID - /// and endpoint. + /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection + /// ID and endpoint. ConnectionClosed(ConnectionClosed<'a>), /// Informs the behaviour that the [`ConnectedPoint`] of an existing /// connection has changed. @@ -440,8 +490,8 @@ pub enum FromSwarm<'a> { /// Informs the behaviour that an error /// happened on an incoming connection during its initial handshake. /// - /// This can include, for example, an error during the handshake of the encryption layer, or the - /// connection unexpectedly closed. + /// This can include, for example, an error during the handshake of the + /// encryption layer, or the connection unexpectedly closed. ListenFailure(ListenFailure<'a>), /// Informs the behaviour that a new listener was created. NewListener(NewListener), @@ -455,17 +505,22 @@ pub enum FromSwarm<'a> { ListenerError(ListenerError<'a>), /// Informs the behaviour that a listener closed. ListenerClosed(ListenerClosed<'a>), - /// Informs the behaviour that we have discovered a new candidate for an external address for us. + /// Informs the behaviour that we have discovered a new candidate for an + /// external address for us. NewExternalAddrCandidate(NewExternalAddrCandidate<'a>), - /// Informs the behaviour that an external address of the local node was confirmed. + /// Informs the behaviour that an external address of the local node was + /// confirmed. ExternalAddrConfirmed(ExternalAddrConfirmed<'a>), - /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed. + /// Informs the behaviour that an external address of the local node + /// expired, i.e. is no-longer confirmed. ExternalAddrExpired(ExternalAddrExpired<'a>), - /// Informs the behaviour that we have discovered a new external address for a remote peer. + /// Informs the behaviour that we have discovered a new external address for + /// a remote peer. NewExternalAddrOfPeer(NewExternalAddrOfPeer<'a>), } -/// [`FromSwarm`] variant that informs the behaviour about a newly established connection to a peer. +/// [`FromSwarm`] variant that informs the behaviour about a newly established +/// connection to a peer. #[derive(Debug, Clone, Copy)] pub struct ConnectionEstablished<'a> { pub peer_id: PeerId, @@ -475,7 +530,8 @@ pub struct ConnectionEstablished<'a> { pub other_established: usize, } -/// [`FromSwarm`] variant that informs the behaviour about a closed connection to a peer. +/// [`FromSwarm`] variant that informs the behaviour about a closed connection +/// to a peer. /// /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID @@ -489,8 +545,8 @@ pub struct ConnectionClosed<'a> { pub remaining_established: usize, } -/// [`FromSwarm`] variant that informs the behaviour that the [`ConnectedPoint`] of an existing -/// connection has changed. +/// [`FromSwarm`] variant that informs the behaviour that the [`ConnectedPoint`] +/// of an existing connection has changed. #[derive(Debug, Clone, Copy)] pub struct AddressChange<'a> { pub peer_id: PeerId, @@ -511,8 +567,8 @@ pub struct DialFailure<'a> { /// [`FromSwarm`] variant that informs the behaviour that an error /// happened on an incoming connection during its initial handshake. /// -/// This can include, for example, an error during the handshake of the encryption layer, or the -/// connection unexpectedly closed. +/// This can include, for example, an error during the handshake of the +/// encryption layer, or the connection unexpectedly closed. #[derive(Debug, Clone, Copy)] pub struct ListenFailure<'a> { pub local_addr: &'a Multiaddr, @@ -522,7 +578,8 @@ pub struct ListenFailure<'a> { pub peer_id: Option, } -/// [`FromSwarm`] variant that informs the behaviour that a new listener was created. +/// [`FromSwarm`] variant that informs the behaviour that a new listener was +/// created. #[derive(Debug, Clone, Copy)] pub struct NewListener { pub listener_id: ListenerId, @@ -545,7 +602,8 @@ pub struct ExpiredListenAddr<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that a listener experienced an error. +/// [`FromSwarm`] variant that informs the behaviour that a listener experienced +/// an error. #[derive(Debug, Clone, Copy)] pub struct ListenerError<'a> { pub listener_id: ListenerId, @@ -559,25 +617,29 @@ pub struct ListenerClosed<'a> { pub reason: Result<(), &'a std::io::Error>, } -/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us. +/// [`FromSwarm`] variant that informs the behaviour about a new candidate for +/// an external address for us. #[derive(Debug, Clone, Copy)] pub struct NewExternalAddrCandidate<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that an external address was confirmed. +/// [`FromSwarm`] variant that informs the behaviour that an external address +/// was confirmed. #[derive(Debug, Clone, Copy)] pub struct ExternalAddrConfirmed<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that an external address was removed. +/// [`FromSwarm`] variant that informs the behaviour that an external address +/// was removed. #[derive(Debug, Clone, Copy)] pub struct ExternalAddrExpired<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that a new external address for a remote peer was detected. +/// [`FromSwarm`] variant that informs the behaviour that a new external address +/// for a remote peer was detected. #[derive(Clone, Copy, Debug)] pub struct NewExternalAddrOfPeer<'a> { pub peer_id: PeerId, diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index 7a51303e74d..0140b48cab1 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -18,16 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{self, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; +use std::task::{Context, Poll}; + use either::Either; -use libp2p_core::transport::PortUse; -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; -/// Implementation of [`NetworkBehaviour`] that can be either of two implementations. +use crate::{ + behaviour::{self, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + ConnectionDenied, + THandler, + THandlerInEvent, + THandlerOutEvent, +}; + +/// Implementation of [`NetworkBehaviour`] that can be either of two +/// implementations. impl NetworkBehaviour for Either where L: NetworkBehaviour, diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 579f46fe486..a4e352d8e7c 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -1,12 +1,14 @@ -use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; use libp2p_core::Multiaddr; +use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; + /// The maximum number of local external addresses. When reached any /// further externally reported addresses are ignored. The behaviour always /// tracks all its listen addresses. const MAX_LOCAL_EXTERNAL_ADDRS: usize = 20; -/// Utility struct for tracking the external addresses of a [`Swarm`](crate::Swarm). +/// Utility struct for tracking the external addresses of a +/// [`Swarm`](crate::Swarm). #[derive(Debug, Clone, Default)] pub struct ExternalAddresses { addresses: Vec, @@ -78,17 +80,20 @@ impl ExternalAddresses { } fn push_front(&mut self, addr: &Multiaddr) { - self.addresses.insert(0, addr.clone()); // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so this isn't very expensive. + self.addresses.insert(0, addr.clone()); // We have at most + // `MAX_LOCAL_EXTERNAL_ADDRS` so + // this isn't very expensive. } } #[cfg(test)] mod tests { - use super::*; use libp2p_core::multiaddr::Protocol; use once_cell::sync::Lazy; use rand::Rng; + use super::*; + #[test] fn new_external_addr_returns_correct_changed_value() { let mut addresses = ExternalAddresses::default(); diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs index 6076f5e7923..e42554c17e2 100644 --- a/swarm/src/behaviour/listen_addresses.rs +++ b/swarm/src/behaviour/listen_addresses.rs @@ -1,8 +1,11 @@ -use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; -use libp2p_core::Multiaddr; use std::collections::HashSet; -/// Utility struct for tracking the addresses a [`Swarm`](crate::Swarm) is listening on. +use libp2p_core::Multiaddr; + +use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; + +/// Utility struct for tracking the addresses a [`Swarm`](crate::Swarm) is +/// listening on. #[derive(Debug, Default, Clone)] pub struct ListenAddresses { addresses: HashSet, @@ -32,10 +35,11 @@ impl ListenAddresses { #[cfg(test)] mod tests { - use super::*; use libp2p_core::{multiaddr::Protocol, transport::ListenerId}; use once_cell::sync::Lazy; + use super::*; + #[test] fn new_listen_addr_returns_correct_changed_value() { let mut addresses = ListenAddresses::default(); diff --git a/swarm/src/behaviour/peer_addresses.rs b/swarm/src/behaviour/peer_addresses.rs index 1eeead56ca1..71c3d940b77 100644 --- a/swarm/src/behaviour/peer_addresses.rs +++ b/swarm/src/behaviour/peer_addresses.rs @@ -1,19 +1,19 @@ -use crate::behaviour::FromSwarm; -use crate::{DialError, DialFailure, NewExternalAddrOfPeer}; +use std::num::NonZeroUsize; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; - use lru::LruCache; -use std::num::NonZeroUsize; +use crate::{behaviour::FromSwarm, DialError, DialFailure, NewExternalAddrOfPeer}; -/// Struct for tracking peers' external addresses of the [`Swarm`](crate::Swarm). +/// Struct for tracking peers' external addresses of the +/// [`Swarm`](crate::Swarm). #[derive(Debug)] pub struct PeerAddresses(LruCache>); impl PeerAddresses { - /// Creates a [`PeerAddresses`] cache with capacity for the given number of peers. + /// Creates a [`PeerAddresses`] cache with capacity for the given number of + /// peers. /// /// For each peer, we will at most store 10 addresses. pub fn new(number_of_peers: NonZeroUsize) -> Self { @@ -46,7 +46,6 @@ impl PeerAddresses { /// Appends address to the existing set if peer addresses already exist. /// Creates a new cache entry for peer_id if no addresses are present. /// Returns true if the newly added address was not previously in the cache. - /// pub fn add(&mut self, peer: PeerId, address: Multiaddr) -> bool { match prepare_addr(&peer, &address) { Ok(address) => { @@ -98,17 +97,17 @@ impl Default for PeerAddresses { #[cfg(test)] mod tests { - use super::*; use std::io; - use crate::ConnectionId; use libp2p_core::{ multiaddr::Protocol, transport::{memory::MemoryTransportError, TransportError}, }; - use once_cell::sync::Lazy; + use super::*; + use crate::ConnectionId; + #[test] fn new_peer_addr_returns_correct_changed_value() { let mut cache = PeerAddresses::default(); diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 3dde364bf19..b82787d83a7 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -18,24 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::FromSwarm; -use crate::connection::ConnectionId; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; -use crate::{ - ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, -}; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use libp2p_core::transport::PortUse; -use libp2p_core::{upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use std::{task::Context, task::Poll}; -/// Implementation of `NetworkBehaviour` that can be either in the disabled or enabled state. +use crate::{ + behaviour::FromSwarm, + connection::ConnectionId, + handler::{ + AddressChange, + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::SendWrapper, + ConnectionDenied, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, +}; + +/// Implementation of `NetworkBehaviour` that can be either in the disabled or +/// enabled state. /// /// The state can only be chosen at initialization. pub struct Toggle { @@ -242,12 +256,12 @@ where // Ignore listen upgrade errors in disabled state. (None, Either::Right(())) => return, (Some(_), Either::Right(())) => panic!( - "Unexpected `Either::Right` inbound info through \ - `on_listen_upgrade_error` in enabled state.", + "Unexpected `Either::Right` inbound info through `on_listen_upgrade_error` in \ + enabled state.", ), (None, Either::Left(_)) => panic!( - "Unexpected `Either::Left` inbound info through \ - `on_listen_upgrade_error` in disabled state.", + "Unexpected `Either::Left` inbound info through `on_listen_upgrade_error` in \ + disabled state.", ), }; diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 78c007fd71d..17508c6c4fa 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -23,42 +23,61 @@ mod error; pub(crate) mod pool; mod supported_protocols; +use std::{ + collections::{HashMap, HashSet}, + fmt, + fmt::{Display, Formatter}, + future::Future, + io, + mem, + pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, + task::{Context, Poll, Waker}, + time::Duration, +}; + pub use error::ConnectionError; pub(crate) use error::{ - PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, + PendingConnectionError, + PendingInboundConnectionError, + PendingOutboundConnectionError, +}; +use futures::{future::BoxFuture, stream, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures_timer::Delay; +use libp2p_core::{ + connection::ConnectedPoint, + multiaddr::Multiaddr, + muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}, + transport::PortUse, + upgrade, + upgrade::{NegotiationError, ProtocolError}, + Endpoint, }; -use libp2p_core::transport::PortUse; +use libp2p_identity::PeerId; pub use supported_protocols::SupportedProtocols; +use web_time::Instant; -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, FullyNegotiatedInbound, - FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsChange, UpgradeInfoSend, -}; -use crate::stream::ActiveStreamCounter; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; use crate::{ - ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, + handler::{ + AddressChange, + ConnectionEvent, + ConnectionHandler, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ListenUpgradeError, + ProtocolSupport, + ProtocolsChange, + UpgradeInfoSend, + }, + stream::ActiveStreamCounter, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, + ConnectionHandlerEvent, + Stream, + StreamProtocol, + StreamUpgradeError, + SubstreamProtocol, }; -use futures::future::BoxFuture; -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use futures::{stream, FutureExt}; -use futures_timer::Delay; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{NegotiationError, ProtocolError}; -use libp2p_core::Endpoint; -use libp2p_identity::PeerId; -use std::collections::{HashMap, HashSet}; -use std::fmt::{Display, Formatter}; -use std::future::Future; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::task::Waker; -use std::time::Duration; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; -use web_time::Instant; static NEXT_CONNECTION_ID: AtomicUsize = AtomicUsize::new(1); @@ -69,10 +88,11 @@ pub struct ConnectionId(usize); impl ConnectionId { /// Creates an _unchecked_ [`ConnectionId`]. /// - /// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and not reused. - /// This constructor does not, hence the _unchecked_. + /// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and + /// not reused. This constructor does not, hence the _unchecked_. /// - /// It is primarily meant for allowing manual tests of [`NetworkBehaviour`](crate::NetworkBehaviour)s. + /// It is primarily meant for allowing manual tests of + /// [`NetworkBehaviour`](crate::NetworkBehaviour)s. pub fn new_unchecked(id: usize) -> Self { Self(id) } @@ -143,12 +163,13 @@ where /// Note: This only enforces a limit on the number of concurrently /// negotiating inbound streams. The total number of inbound streams on a /// connection is the sum of negotiating and negotiated streams. A limit on - /// the total number of streams can be enforced at the [`StreamMuxerBox`] level. + /// the total number of streams can be enforced at the [`StreamMuxerBox`] + /// level. max_negotiating_inbound_streams: usize, /// Contains all upgrades that are waiting for a new outbound substream. /// - /// The upgrade timeout is already ticking here so this may fail in case the remote is not quick - /// enough in providing us with a new stream. + /// The upgrade timeout is already ticking here so this may fail in case the + /// remote is not quick enough in providing us with a new stream. requested_substreams: FuturesUnordered< SubstreamRequested, >, @@ -223,7 +244,9 @@ where self.handler.on_behaviour_event(event); } - /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete. + /// Begins an orderly shutdown of the connection, returning a stream of + /// final events and a `Future` that resolves when connection shutdown is + /// complete. pub(crate) fn close( self, ) -> ( @@ -242,8 +265,8 @@ where ) } - /// Polls the handler and the substream, forwarding events from the former to the latter and - /// vice versa. + /// Polls the handler and the substream, forwarding events from the former + /// to the latter and vice versa. #[tracing::instrument(level = "debug", name = "Connection::poll", skip(self, cx))] pub(crate) fn poll( self: Pin<&mut Self>, @@ -320,7 +343,8 @@ where } } - // In case the [`ConnectionHandler`] can not make any more progress, poll the negotiating outbound streams. + // In case the [`ConnectionHandler`] can not make any more progress, poll the + // negotiating outbound streams. match negotiating_out.poll_next_unpin(cx) { Poll::Pending | Poll::Ready(None) => {} Poll::Ready(Some((info, Ok(protocol)))) => { @@ -337,8 +361,9 @@ where } } - // In case both the [`ConnectionHandler`] and the negotiating outbound streams can not - // make any more progress, poll the negotiating inbound streams. + // In case both the [`ConnectionHandler`] and the negotiating outbound streams + // can not make any more progress, poll the negotiating inbound + // streams. match negotiating_in.poll_next_unpin(cx) { Poll::Pending | Poll::Ready(None) => {} Poll::Ready(Some((info, Ok(protocol)))) => { @@ -368,7 +393,8 @@ where } // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams or have any active streams shutdown is always postponed. + // As long as we're still negotiating substreams or have any active streams + // shutdown is always postponed. if negotiating_in.is_empty() && negotiating_out.is_empty() && requested_substreams.is_empty() @@ -419,7 +445,8 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + continue; // Go back to the top, handler can potentially + // make progress again. } } } @@ -436,7 +463,8 @@ where stream_counter.clone(), )); - continue; // Go back to the top, handler can potentially make progress again. + continue; // Go back to the top, handler can potentially + // make progress again. } } } @@ -451,10 +479,12 @@ where for change in changes { handler.on_connection_event(ConnectionEvent::LocalProtocolsChange(change)); } - continue; // Go back to the top, handler can potentially make progress again. + continue; // Go back to the top, handler can potentially make + // progress again. } - return Poll::Pending; // Nothing can make progress, return `Pending`. + return Poll::Pending; // Nothing can make progress, return + // `Pending`. } } @@ -482,7 +512,8 @@ fn compute_new_shutdown( ) -> Option { match (current_shutdown, handler_keep_alive) { (_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap), - (Shutdown::Later(_), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick. + (Shutdown::Later(_), false) => None, // Do nothing, i.e. let the shutdown timer continue + // to tick. (_, false) => { let now = Instant::now(); let safe_keep_alive = checked_add_fraction(now, idle_timeout); @@ -493,10 +524,13 @@ fn compute_new_shutdown( } } -/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds. +/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until +/// [`Instant::checked_add`] succeeds. /// -/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent. -/// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. +/// [`Instant`] depends on the underlying platform and has a limit of which +/// points in time it can represent. The [`Duration`] computed by the this +/// function may not be the longest possible that we can add to `now` but it +/// will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); @@ -507,7 +541,8 @@ fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { duration } -/// Borrowed information about an incoming connection currently being negotiated. +/// Borrowed information about an incoming connection currently being +/// negotiated. #[derive(Debug, Copy, Clone)] pub(crate) struct IncomingInfo<'a> { /// Local connection address. @@ -658,10 +693,12 @@ enum SubstreamRequested { user_data: UserData, timeout: Delay, upgrade: Upgrade, - /// A waker to notify our [`FuturesUnordered`] that we have extracted the data. + /// A waker to notify our [`FuturesUnordered`] that we have extracted + /// the data. /// - /// This will ensure that we will get polled again in the next iteration which allows us to - /// resolve with `Ok(())` and be removed from the [`FuturesUnordered`]. + /// This will ensure that we will get polled again in the next iteration + /// which allows us to resolve with `Ok(())` and be removed from + /// the [`FuturesUnordered`]. extracted_waker: Option, }, Done, @@ -746,8 +783,8 @@ enum Shutdown { Later(Delay), } -// Structure used to avoid allocations when storing the protocols in the `HashMap. -// Instead of allocating a new `String` for the key, +// Structure used to avoid allocations when storing the protocols in the +// `HashMap. Instead of allocating a new `String` for the key, // we use `T::as_ref()` in `Hash`, `Eq` and `PartialEq` requirements. pub(crate) struct AsStrHashEq(pub(crate) T); @@ -767,19 +804,23 @@ impl> std::hash::Hash for AsStrHashEq { #[cfg(test)] mod tests { - use super::*; - use crate::dummy; - use futures::future; - use futures::AsyncRead; - use futures::AsyncWrite; - use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; - use libp2p_core::StreamMuxer; + use std::{ + convert::Infallible, + sync::{Arc, Weak}, + time::Instant, + }; + + use futures::{future, AsyncRead, AsyncWrite}; + use libp2p_core::{ + upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}, + StreamMuxer, + }; use quickcheck::*; - use std::convert::Infallible; - use std::sync::{Arc, Weak}; - use std::time::Instant; use tracing_subscriber::EnvFilter; + use super::*; + use crate::dummy; + #[test] fn max_negotiating_inbound_streams() { let _ = tracing_subscriber::fmt() @@ -906,7 +947,8 @@ mod tests { ); assert!(connection.handler.remote_removed.is_empty()); - // Third, stop listening on a protocol it never advertised (we can't control what handlers do so this needs to be handled gracefully). + // Third, stop listening on a protocol it never advertised (we can't control + // what handlers do so this needs to be handled gracefully). connection.handler.remote_removes_support_for(&["/baz"]); let _ = connection.poll_noop_waker(); diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 33aa81c19a9..eeff75e5845 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -18,11 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::transport::TransportError; -use crate::Multiaddr; -use crate::{ConnectedPoint, PeerId}; use std::{fmt, io}; +use crate::{transport::TransportError, ConnectedPoint, Multiaddr, PeerId}; + /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] pub enum ConnectionError { @@ -62,9 +61,9 @@ impl From for ConnectionError { /// Errors that can occur in the context of a pending outgoing `Connection`. /// -/// Note: Addresses for an outbound connection are dialed in parallel. Thus, compared to -/// [`PendingInboundConnectionError`], one or more [`TransportError`]s can occur for a single -/// connection. +/// Note: Addresses for an outbound connection are dialed in parallel. Thus, +/// compared to [`PendingInboundConnectionError`], one or more +/// [`TransportError`]s can occur for a single connection. pub(crate) type PendingOutboundConnectionError = PendingConnectionError)>>; @@ -74,7 +73,8 @@ pub(crate) type PendingInboundConnectionError = PendingConnectionError { - /// An error occurred while negotiating the transport protocol(s) on a connection. + /// An error occurred while negotiating the transport protocol(s) on a + /// connection. Transport(TTransErr), /// Pending connection attempt has been aborted. diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index b2accf745ef..6f7933a0288 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -18,41 +18,52 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::connection::{Connection, ConnectionId, PendingPoint}; -use crate::{ - connection::{ - Connected, ConnectionError, IncomingInfo, PendingConnectionError, - PendingInboundConnectionError, PendingOutboundConnectionError, - }, - transport::TransportError, - ConnectedPoint, ConnectionHandler, Executor, Multiaddr, PeerId, +use std::{ + collections::HashMap, + convert::Infallible, + fmt, + num::{NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll, Waker}, }; + use concurrent_dial::ConcurrentDial; use fnv::FnvHashMap; -use futures::prelude::*; -use futures::stream::SelectAll; use futures::{ channel::{mpsc, oneshot}, future::{poll_fn, BoxFuture, Either}, + prelude::*, ready, - stream::FuturesUnordered, + stream::{FuturesUnordered, SelectAll}, }; -use libp2p_core::connection::Endpoint; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::PortUse; -use std::convert::Infallible; -use std::task::Waker; -use std::{ - collections::HashMap, - fmt, - num::{NonZeroU8, NonZeroUsize}, - pin::Pin, - task::Context, - task::Poll, +use libp2p_core::{ + connection::Endpoint, + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::PortUse, }; use tracing::Instrument; use web_time::{Duration, Instant}; +use crate::{ + connection::{ + Connected, + Connection, + ConnectionError, + ConnectionId, + IncomingInfo, + PendingConnectionError, + PendingInboundConnectionError, + PendingOutboundConnectionError, + PendingPoint, + }, + transport::TransportError, + ConnectedPoint, + ConnectionHandler, + Executor, + Multiaddr, + PeerId, +}; + mod concurrent_dial; mod task; @@ -92,7 +103,8 @@ where /// The connection counter(s). counters: ConnectionCounters, - /// The managed connections of each peer that are currently considered established. + /// The managed connections of each peer that are currently considered + /// established. established: FnvHashMap< PeerId, FnvHashMap>, @@ -104,22 +116,25 @@ where /// Size of the task command buffer (per task). task_command_buffer_size: usize, - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. dial_concurrency_factor: NonZeroU8, /// The configured override for substream protocol upgrades, if any. substream_upgrade_protocol_override: Option, - /// The maximum number of inbound streams concurrently negotiating on a connection. + /// The maximum number of inbound streams concurrently negotiating on a + /// connection. /// /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, - /// How many [`task::EstablishedConnectionEvent`]s can be buffered before the connection is back-pressured. + /// How many [`task::EstablishedConnectionEvent`]s can be buffered before + /// the connection is back-pressured. per_connection_event_buffer_size: usize, - /// The executor to use for running connection tasks. Can either be a global executor - /// or a local queue. + /// The executor to use for running connection tasks. Can either be a global + /// executor or a local queue. executor: ExecSwitch, /// Sender distributed to pending tasks for reporting events back @@ -171,7 +186,8 @@ impl EstablishedConnection { /// Checks if `notify_handler` is ready to accept an event. /// - /// Returns `Ok(())` if the handler is ready to receive an event via `notify_handler`. + /// Returns `Ok(())` if the handler is ready to receive an event via + /// `notify_handler`. /// /// Returns `Err(())` if the background task associated with the connection /// is terminating and the connection is about to close. @@ -201,7 +217,8 @@ struct PendingConnection { endpoint: PendingPoint, /// When dropped, notifies the task which then knows to terminate. abort_notifier: Option>, - /// The moment we became aware of this possible connection, useful for timing metrics. + /// The moment we became aware of this possible connection, useful for + /// timing metrics. accepted_at: Instant, } @@ -247,13 +264,12 @@ pub(crate) enum PoolEvent { /// /// A connection may close if /// - /// * it encounters an error, which includes the connection being - /// closed by the remote. In this case `error` is `Some`. + /// * it encounters an error, which includes the connection being closed + /// by the remote. In this case `error` is `Some`. /// * it was actively closed by [`EstablishedConnection::start_close`], /// i.e. a successful, orderly close. - /// * it was actively closed by [`Pool::disconnect`], i.e. - /// dropped without an orderly close. - /// + /// * it was actively closed by [`Pool::disconnect`], i.e. dropped without + /// an orderly close. ConnectionClosed { id: ConnectionId, /// Information about the connection that errored. @@ -354,7 +370,8 @@ where /// Returns true if we are connected to the given peer. /// - /// This will return true only after a `NodeReached` event has been produced by `poll()`. + /// This will return true only after a `NodeReached` event has been produced + /// by `poll()`. pub(crate) fn is_connected(&self, id: PeerId) -> bool { self.established.contains_key(&id) } @@ -811,10 +828,12 @@ where /// Opaque type for a new connection. /// -/// This connection has just been established but isn't part of the [`Pool`] yet. -/// It either needs to be spawned via [`Pool::spawn_connection`] or dropped if undesired. +/// This connection has just been established but isn't part of the [`Pool`] +/// yet. It either needs to be spawned via [`Pool::spawn_connection`] or dropped +/// if undesired. /// -/// On drop, this type send the connection back to the [`Pool`] where it will be gracefully closed. +/// On drop, this type send the connection back to the [`Pool`] where it will be +/// gracefully closed. #[derive(Debug)] pub(crate) struct NewConnection { connection: Option, @@ -967,17 +986,19 @@ pub(crate) struct PoolConfig { pub(crate) executor: Option>, /// Size of the task command buffer (per task). pub(crate) task_command_buffer_size: usize, - /// Size of the pending connection task event buffer and the established connection task event - /// buffer. + /// Size of the pending connection task event buffer and the established + /// connection task event buffer. pub(crate) per_connection_event_buffer_size: usize, - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. pub(crate) dial_concurrency_factor: NonZeroU8, /// How long a connection should be kept alive once it is idling. pub(crate) idle_connection_timeout: Duration, /// The configured override for substream protocol upgrades, if any. substream_upgrade_protocol_override: Option, - /// The maximum number of inbound streams concurrently negotiating on a connection. + /// The maximum number of inbound streams concurrently negotiating on a + /// connection. /// /// See [`Connection::max_negotiating_inbound_streams`]. max_negotiating_inbound_streams: usize, @@ -997,29 +1018,31 @@ impl PoolConfig { } /// Sets the maximum number of events sent to a connection's background task - /// that may be buffered, if the task cannot keep up with their consumption and - /// delivery to the connection handler. + /// that may be buffered, if the task cannot keep up with their consumption + /// and delivery to the connection handler. /// - /// When the buffer for a particular connection is full, `notify_handler` will no - /// longer be able to deliver events to the associated [`Connection`], - /// thus exerting back-pressure on the connection and peer API. + /// When the buffer for a particular connection is full, `notify_handler` + /// will no longer be able to deliver events to the associated + /// [`Connection`], thus exerting back-pressure on the connection and + /// peer API. pub(crate) fn with_notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { self.task_command_buffer_size = n.get() - 1; self } - /// Sets the maximum number of buffered connection events (beyond a guaranteed - /// buffer of 1 event per connection). + /// Sets the maximum number of buffered connection events (beyond a + /// guaranteed buffer of 1 event per connection). /// - /// When the buffer is full, the background tasks of all connections will stall. - /// In this way, the consumers of network events exert back-pressure on - /// the network connection I/O. + /// When the buffer is full, the background tasks of all connections will + /// stall. In this way, the consumers of network events exert + /// back-pressure on the network connection I/O. pub(crate) fn with_per_connection_event_buffer_size(mut self, n: usize) -> Self { self.per_connection_event_buffer_size = n; self } - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. pub(crate) fn with_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.dial_concurrency_factor = factor; self @@ -1034,7 +1057,8 @@ impl PoolConfig { self } - /// The maximum number of inbound streams concurrently negotiating on a connection. + /// The maximum number of inbound streams concurrently negotiating on a + /// connection. /// /// See [`Connection::max_negotiating_inbound_streams`]. pub(crate) fn with_max_negotiating_inbound_streams(mut self, v: usize) -> Self { diff --git a/swarm/src/connection/pool/concurrent_dial.rs b/swarm/src/connection/pool/concurrent_dial.rs index 57e4b078098..99f0b385884 100644 --- a/swarm/src/connection/pool/concurrent_dial.rs +++ b/swarm/src/connection/pool/concurrent_dial.rs @@ -18,7 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{transport::TransportError, Multiaddr}; +use std::{ + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Future}, ready, @@ -26,11 +31,8 @@ use futures::{ }; use libp2p_core::muxing::StreamMuxerBox; use libp2p_identity::PeerId; -use std::{ - num::NonZeroU8, - pin::Pin, - task::{Context, Poll}, -}; + +use crate::{transport::TransportError, Multiaddr}; type Dial = BoxFuture< 'static, diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index 3b808a30fd1..b8c28364fa5 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -19,25 +19,33 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Async functions driving pending and established connections in the form of a task. +//! Async functions driving pending and established connections in the form of a +//! task. + +use std::{convert::Infallible, pin::Pin}; + +use futures::{ + channel::{mpsc, oneshot}, + future::{poll_fn, Either, Future}, + SinkExt, + StreamExt, +}; +use libp2p_core::muxing::StreamMuxerBox; use super::concurrent_dial::ConcurrentDial; use crate::{ connection::{ - self, ConnectionError, ConnectionId, PendingInboundConnectionError, + self, + ConnectionError, + ConnectionId, + PendingInboundConnectionError, PendingOutboundConnectionError, }, transport::TransportError, - ConnectionHandler, Multiaddr, PeerId, -}; -use futures::{ - channel::{mpsc, oneshot}, - future::{poll_fn, Either, Future}, - SinkExt, StreamExt, + ConnectionHandler, + Multiaddr, + PeerId, }; -use libp2p_core::muxing::StreamMuxerBox; -use std::convert::Infallible; -use std::pin::Pin; /// Commands that can be sent to a task driving an established connection. #[derive(Debug)] diff --git a/swarm/src/connection/supported_protocols.rs b/swarm/src/connection/supported_protocols.rs index 124ec93d669..c167bf88649 100644 --- a/swarm/src/connection/supported_protocols.rs +++ b/swarm/src/connection/supported_protocols.rs @@ -1,7 +1,7 @@ -use crate::handler::ProtocolsChange; -use crate::StreamProtocol; use std::collections::HashSet; +use crate::{handler::ProtocolsChange, StreamProtocol}; + #[derive(Default, Clone, Debug)] pub struct SupportedProtocols { protocols: HashSet, diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index 4f5b621327c..559e41bdecc 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -19,14 +19,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::ConnectionId; -use libp2p_core::connection::Endpoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::PortUse; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; use std::num::NonZeroU8; +use libp2p_core::{connection::Endpoint, multiaddr::Protocol, transport::PortUse, Multiaddr}; +use libp2p_identity::PeerId; + +use crate::ConnectionId; + macro_rules! fn_override_role { () => { /// Override role of local node on connection. I.e. execute the dial _as a @@ -45,8 +44,8 @@ macro_rules! fn_override_role { macro_rules! fn_allocate_new_port { () => { /// Enforce the allocation of a new port. - /// Default behaviour is best effort reuse of existing ports. If there is no existing - /// fitting listener, a new port is allocated. + /// Default behaviour is best effort reuse of existing ports. If there is no + /// existing fitting listener, a new port is allocated. pub fn allocate_new_port(mut self) -> Self { self.port_use = PortUse::New; self @@ -110,8 +109,9 @@ impl DialOpts { WithoutPeerId {} } - /// Retrieves the [`PeerId`] from the [`DialOpts`] if specified or otherwise tries to extract it - /// from the multihash in the `/p2p` part of the address, if present. + /// Retrieves the [`PeerId`] from the [`DialOpts`] if specified or otherwise + /// tries to extract it from the multihash in the `/p2p` part of the + /// address, if present. pub fn get_peer_id(&self) -> Option { if let Some(peer_id) = self.peer_id { return Some(peer_id); @@ -130,7 +130,8 @@ impl DialOpts { /// Get the [`ConnectionId`] of this dial attempt. /// /// All future events of this dial will be associated with this ID. - /// See [`DialFailure`](crate::DialFailure) and [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). + /// See [`DialFailure`](crate::DialFailure) and + /// [`ConnectionEstablished`](crate::behaviour::ConnectionEstablished). pub fn connection_id(&self) -> ConnectionId { self.connection_id } @@ -189,7 +190,8 @@ impl WithPeerId { } /// Override - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. pub fn override_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.dial_concurrency_factor_override = Some(factor); self @@ -255,7 +257,8 @@ impl WithPeerIdWithAddresses { fn_allocate_new_port!(); /// Override - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. pub fn override_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.dial_concurrency_factor_override = Some(factor); self @@ -324,8 +327,8 @@ impl WithoutPeerIdWithAddress { /// # use libp2p_identity::PeerId; /// # /// DialOpts::peer_id(PeerId::random()) -/// .condition(PeerCondition::Disconnected) -/// .build(); +/// .condition(PeerCondition::Disconnected) +/// .build(); /// ``` #[derive(Debug, Copy, Clone, Default)] pub enum PeerCondition { diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index b87ef32c8f7..c5c3cb3c200 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -1,19 +1,23 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}; -use crate::connection::ConnectionId; -use crate::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, +use std::{ + convert::Infallible, + task::{Context, Poll}, }; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; + use crate::{ - ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler, - THandlerInEvent, THandlerOutEvent, + behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}, + connection::ConnectionId, + handler::{ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound}, + ConnectionDenied, + ConnectionHandlerEvent, + StreamUpgradeError, + SubstreamProtocol, + THandler, + THandlerInEvent, + THandlerOutEvent, }; -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::Endpoint; -use libp2p_core::Multiaddr; -use libp2p_identity::PeerId; -use std::convert::Infallible; -use std::task::{Context, Poll}; /// Implementation of [`NetworkBehaviour`] that doesn't do anything. pub struct Behaviour; @@ -61,7 +65,8 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, _event: FromSwarm) {} } -/// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. +/// An implementation of [`ConnectionHandler`] that neither handles any +/// protocols nor does it keep the connection alive. #[derive(Clone)] pub struct ConnectionHandler; diff --git a/swarm/src/executor.rs b/swarm/src/executor.rs index a2abbbde6ef..cc3e2782370 100644 --- a/swarm/src/executor.rs +++ b/swarm/src/executor.rs @@ -1,14 +1,19 @@ //! Provides executors for spawning background tasks. -use futures::executor::ThreadPool; use std::{future::Future, pin::Pin}; +use futures::executor::ThreadPool; + /// Implemented on objects that can run a `Future` in the background. /// -/// > **Note**: While it may be tempting to implement this trait on types such as -/// > [`futures::stream::FuturesUnordered`], please note that passing an `Executor` is -/// > optional, and that `FuturesUnordered` (or a similar struct) will automatically -/// > be used as fallback by libp2p. The `Executor` trait should therefore only be -/// > about running `Future`s on a separate task. +/// > **Note**: While it may be tempting to implement this trait on types such +/// > as +/// > [`futures::stream::FuturesUnordered`], please note that passing an +/// > `Executor` is +/// > optional, and that `FuturesUnordered` (or a similar struct) will +/// > automatically +/// > be used as fallback by libp2p. The `Executor` trait should therefore only +/// > be +/// > about running `Future`s on a separate task. pub trait Executor { /// Run the given future in the background until it ends. #[track_caller] diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 9e31592d68d..352848d3ded 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -18,25 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Once a connection to a remote peer is established, a [`ConnectionHandler`] negotiates -//! and handles one or more specific protocols on the connection. +//! Once a connection to a remote peer is established, a [`ConnectionHandler`] +//! negotiates and handles one or more specific protocols on the connection. //! -//! Protocols are negotiated and used on individual substreams of the connection. Thus a -//! [`ConnectionHandler`] defines the inbound and outbound upgrades to apply when creating a new -//! inbound or outbound substream, respectively, and is notified by a [`Swarm`](crate::Swarm) when -//! these upgrades have been successfully applied, including the final output of the upgrade. A -//! [`ConnectionHandler`] can then continue communicating with the peer over the substream using the -//! negotiated protocol(s). +//! Protocols are negotiated and used on individual substreams of the +//! connection. Thus a [`ConnectionHandler`] defines the inbound and outbound +//! upgrades to apply when creating a new inbound or outbound substream, +//! respectively, and is notified by a [`Swarm`](crate::Swarm) when +//! these upgrades have been successfully applied, including the final output of +//! the upgrade. A [`ConnectionHandler`] can then continue communicating with +//! the peer over the substream using the negotiated protocol(s). //! -//! Two [`ConnectionHandler`]s can be composed with [`ConnectionHandler::select()`] -//! in order to build a new handler supporting the combined set of protocols, -//! with methods being dispatched to the appropriate handler according to the -//! used protocol(s) determined by the associated types of the handlers. +//! Two [`ConnectionHandler`]s can be composed with +//! [`ConnectionHandler::select()`] in order to build a new handler supporting +//! the combined set of protocols, with methods being dispatched to the +//! appropriate handler according to the used protocol(s) determined by the +//! associated types of the handlers. //! -//! > **Note**: A [`ConnectionHandler`] handles one or more protocols in the context of a single -//! > connection with a remote. In order to handle a protocol that requires knowledge of -//! > the network as a whole, see the -//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. +//! > **Note**: A [`ConnectionHandler`] handles one or more protocols in the +//! > context of a single +//! > connection with a remote. In order to handle a protocol that requires +//! > knowledge of +//! > the network as a whole, see the +//! > [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) trait. pub mod either; mod map_in; @@ -46,8 +50,17 @@ mod one_shot; mod pending; mod select; -use crate::connection::AsStrHashEq; -pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use core::slice; +use std::{ + collections::{HashMap, HashSet}, + error, + fmt, + io, + task::{Context, Poll}, + time::Duration, +}; + +use libp2p_core::Multiaddr; pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; @@ -55,11 +68,8 @@ pub use pending::PendingConnectionHandler; pub use select::ConnectionHandlerSelect; use smallvec::SmallVec; -use crate::StreamProtocol; -use core::slice; -use libp2p_core::Multiaddr; -use std::collections::{HashMap, HashSet}; -use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; +pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; +use crate::{connection::AsStrHashEq, StreamProtocol}; /// A handler for a set of protocols used on a connection with a remote. /// @@ -68,36 +78,49 @@ use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// /// # Handling a protocol /// -/// Communication with a remote over a set of protocols is initiated in one of two ways: +/// Communication with a remote over a set of protocols is initiated in one of +/// two ways: /// /// 1. Dialing by initiating a new outbound substream. In order to do so, -/// [`ConnectionHandler::poll()`] must return an [`ConnectionHandlerEvent::OutboundSubstreamRequest`], -/// providing an instance of [`libp2p_core::upgrade::OutboundUpgrade`] that is used to negotiate the -/// protocol(s). Upon success, [`ConnectionHandler::on_connection_event`] is called with -/// [`ConnectionEvent::FullyNegotiatedOutbound`] translating the final output of the upgrade. -/// -/// 2. Listening by accepting a new inbound substream. When a new inbound substream -/// is created on a connection, [`ConnectionHandler::listen_protocol`] is called -/// to obtain an instance of [`libp2p_core::upgrade::InboundUpgrade`] that is used to +/// [`ConnectionHandler::poll()`] must return an +/// [`ConnectionHandlerEvent::OutboundSubstreamRequest`], providing an +/// instance of [`libp2p_core::upgrade::OutboundUpgrade`] that is used to /// negotiate the protocol(s). Upon success, -/// [`ConnectionHandler::on_connection_event`] is called with [`ConnectionEvent::FullyNegotiatedInbound`] -/// translating the final output of the upgrade. +/// [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionEvent::FullyNegotiatedOutbound`] translating the final +/// output of the upgrade. +/// +/// 2. Listening by accepting a new inbound substream. When a new inbound +/// substream is created on a connection, +/// [`ConnectionHandler::listen_protocol`] is called to obtain an instance +/// of [`libp2p_core::upgrade::InboundUpgrade`] that is used to negotiate +/// the protocol(s). Upon success, +/// [`ConnectionHandler::on_connection_event`] is called with +/// [`ConnectionEvent::FullyNegotiatedInbound`] translating the final +/// output of the upgrade. /// /// /// # Connection Keep-Alive /// -/// A [`ConnectionHandler`] can influence the lifetime of the underlying connection -/// through [`ConnectionHandler::connection_keep_alive`]. That is, the protocol -/// implemented by the handler can include conditions for terminating the connection. -/// The lifetime of successfully negotiated substreams is fully controlled by the handler. +/// A [`ConnectionHandler`] can influence the lifetime of the underlying +/// connection through [`ConnectionHandler::connection_keep_alive`]. That is, +/// the protocol implemented by the handler can include conditions for +/// terminating the connection. The lifetime of successfully negotiated +/// substreams is fully controlled by the handler. /// -/// Implementors of this trait should keep in mind that the connection can be closed at any time. -/// When a connection is closed gracefully, the substreams used by the handler may still -/// continue reading data until the remote closes its side of the connection. +/// Implementors of this trait should keep in mind that the connection can be +/// closed at any time. When a connection is closed gracefully, the substreams +/// used by the handler may still continue reading data until the remote closes +/// its side of the connection. pub trait ConnectionHandler: Send + 'static { - /// A type representing the message(s) a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) + /// A type representing the message(s) a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a + /// [`ConnectionHandler`] via + /// [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) type FromBehaviour: fmt::Debug + Send + 'static; - /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. + /// A type representing message(s) a [`ConnectionHandler`] can send to a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via + /// [`ConnectionHandlerEvent::NotifyBehaviour`]. type ToBehaviour: fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; @@ -105,16 +128,20 @@ pub trait ConnectionHandler: Send + 'static { type OutboundProtocol: OutboundUpgradeSend; /// The type of additional information returned from `listen_protocol`. type InboundOpenInfo: Send + 'static; - /// The type of additional information passed to an `OutboundSubstreamRequest`. + /// The type of additional information passed to an + /// `OutboundSubstreamRequest`. type OutboundOpenInfo: Send + 'static; - /// The [`InboundUpgrade`](libp2p_core::upgrade::InboundUpgrade) to apply on inbound - /// substreams to negotiate the desired protocols. + /// The [`InboundUpgrade`](libp2p_core::upgrade::InboundUpgrade) to apply on + /// inbound substreams to negotiate the desired protocols. /// - /// > **Note**: The returned `InboundUpgrade` should always accept all the generally - /// > supported protocols, even if in a specific context a particular one is - /// > not supported, (eg. when only allowing one substream at a time for a protocol). - /// > This allows a remote to put the list of supported protocols in a cache. + /// > **Note**: The returned `InboundUpgrade` should always accept all the + /// > generally + /// > supported protocols, even if in a specific context a particular one is + /// > not supported, (eg. when only allowing one substream at a time for a + /// > protocol). + /// > This allows a remote to put the list of supported protocols in a + /// > cache. fn listen_protocol(&self) -> SubstreamProtocol; /// Returns whether the connection should be kept alive. @@ -127,15 +154,21 @@ pub trait ConnectionHandler: Send + 'static { /// - We are negotiating inbound or outbound streams. /// - There are active [`Stream`](crate::Stream)s on the connection. /// - /// The combination of the above means that _most_ protocols will not need to override this method. - /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle. + /// The combination of the above means that _most_ protocols will not need + /// to override this method. This method is only invoked when all of the + /// above are `false`, i.e. when the connection is entirely idle. /// /// ## Exceptions /// - /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method. - /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams. + /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) + /// need to keep a connection alive beyond these circumstances and can + /// thus override this method. + /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) + /// **don't** want to keep a connection alive despite an active streams. /// - /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm. + /// In that case, protocol authors can use + /// [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) + /// to opt-out a particular stream from the keep-alive algorithm. fn connection_keep_alive(&self) -> bool { false } @@ -150,17 +183,20 @@ pub trait ConnectionHandler: Send + 'static { /// Gracefully close the [`ConnectionHandler`]. /// - /// The contract for this function is equivalent to a [`Stream`](futures::Stream). - /// When a connection is being shut down, we will first poll this function to completion. - /// Following that, the physical connection will be shut down. + /// The contract for this function is equivalent to a + /// [`Stream`](futures::Stream). When a connection is being shut down, + /// we will first poll this function to completion. Following that, the + /// physical connection will be shut down. /// - /// This is also called when the shutdown was initiated due to an error on the connection. - /// We therefore cannot guarantee that performing IO within here will succeed. + /// This is also called when the shutdown was initiated due to an error on + /// the connection. We therefore cannot guarantee that performing IO + /// within here will succeed. /// /// To signal completion, [`Poll::Ready(None)`] should be returned. /// - /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. - /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. + /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like + /// behaviour. That is, [`Poll::Ready(None)`] MUST be returned on + /// repeated calls to [`ConnectionHandler::poll_close`]. fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(None) } @@ -192,7 +228,8 @@ pub trait ConnectionHandler: Send + 'static { ConnectionHandlerSelect::new(self, other) } - /// Informs the handler about an event from the [`NetworkBehaviour`](super::NetworkBehaviour). + /// Informs the handler about an event from the + /// [`NetworkBehaviour`](super::NetworkBehaviour). fn on_behaviour_event(&mut self, _event: Self::FromBehaviour); fn on_connection_event( @@ -210,19 +247,25 @@ pub trait ConnectionHandler: Send + 'static { /// to pass to [`on_connection_event`](ConnectionHandler::on_connection_event). #[non_exhaustive] pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> { - /// Informs the handler about the output of a successful upgrade on a new inbound substream. + /// Informs the handler about the output of a successful upgrade on a new + /// inbound substream. FullyNegotiatedInbound(FullyNegotiatedInbound), - /// Informs the handler about the output of a successful upgrade on a new outbound stream. + /// Informs the handler about the output of a successful upgrade on a new + /// outbound stream. FullyNegotiatedOutbound(FullyNegotiatedOutbound), /// Informs the handler about a change in the address of the remote. AddressChange(AddressChange<'a>), - /// Informs the handler that upgrading an outbound substream to the given protocol has failed. + /// Informs the handler that upgrading an outbound substream to the given + /// protocol has failed. DialUpgradeError(DialUpgradeError), - /// Informs the handler that upgrading an inbound substream to the given protocol has failed. + /// Informs the handler that upgrading an inbound substream to the given + /// protocol has failed. ListenUpgradeError(ListenUpgradeError), - /// The local [`ConnectionHandler`] added or removed support for one or more protocols. + /// The local [`ConnectionHandler`] added or removed support for one or more + /// protocols. LocalProtocolsChange(ProtocolsChange<'a>), - /// The remote [`ConnectionHandler`] now supports a different set of protocols. + /// The remote [`ConnectionHandler`] now supports a different set of + /// protocols. RemoteProtocolsChange(ProtocolsChange<'a>), } @@ -297,10 +340,11 @@ impl /// [`ConnectionEvent`] variant that informs the handler about /// the output of a successful upgrade on a new inbound substream. /// -/// Note that it is up to the [`ConnectionHandler`] implementation to manage the lifetime of the -/// negotiated inbound substreams. E.g. the implementation has to enforce a limit on the number -/// of simultaneously open negotiated inbound substreams. In other words it is up to the -/// [`ConnectionHandler`] implementation to stop a malicious remote node to open and keep alive +/// Note that it is up to the [`ConnectionHandler`] implementation to manage the +/// lifetime of the negotiated inbound substreams. E.g. the implementation has +/// to enforce a limit on the number of simultaneously open negotiated inbound +/// substreams. In other words it is up to the [`ConnectionHandler`] +/// implementation to stop a malicious remote node to open and keep alive /// an excessive amount of inbound substreams. #[derive(Debug)] pub struct FullyNegotiatedInbound { @@ -308,7 +352,8 @@ pub struct FullyNegotiatedInbound { pub info: IOI, } -/// [`ConnectionEvent`] variant that informs the handler about successful upgrade on a new outbound stream. +/// [`ConnectionEvent`] variant that informs the handler about successful +/// upgrade on a new outbound stream. /// /// The `protocol` field is the information that was previously passed to /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. @@ -318,13 +363,15 @@ pub struct FullyNegotiatedOutbound { pub info: OOI, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the address of the remote. +/// [`ConnectionEvent`] variant that informs the handler about a change in the +/// address of the remote. #[derive(Debug)] pub struct AddressChange<'a> { pub new_address: &'a Multiaddr, } -/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported on the connection. +/// [`ConnectionEvent`] variant that informs the handler about a change in the +/// protocols supported on the connection. #[derive(Debug, Clone)] pub enum ProtocolsChange<'a> { Added(ProtocolsAdded<'a>), @@ -349,9 +396,11 @@ impl<'a> ProtocolsChange<'a> { }) } - /// Compute the [`ProtocolsChange`] that results from adding `to_add` to `existing_protocols`. + /// Compute the [`ProtocolsChange`] that results from adding `to_add` to + /// `existing_protocols`. /// - /// Returns `None` if the change is a no-op, i.e. `to_add` is a subset of `existing_protocols`. + /// Returns `None` if the change is a no-op, i.e. `to_add` is a subset of + /// `existing_protocols`. pub(crate) fn add( existing_protocols: &HashSet, to_add: HashSet, @@ -373,9 +422,12 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from `existing_protocols`. Removes the protocols from `existing_protocols`. + /// Compute the [`ProtocolsChange`] that results from removing `to_remove` + /// from `existing_protocols`. Removes the protocols from + /// `existing_protocols`. /// - /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in `existing_protocols`. + /// Returns `None` if the change is a no-op, i.e. none of the protocols in + /// `to_remove` are in `existing_protocols`. pub(crate) fn remove( existing_protocols: &mut HashSet, to_remove: HashSet, @@ -397,7 +449,8 @@ impl<'a> ProtocolsChange<'a> { })) } - /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to `new_protocols`. + /// Compute the [`ProtocolsChange`]s required to go from + /// `existing_protocols` to `new_protocols`. pub(crate) fn from_full_sets>( existing_protocols: &mut HashMap, bool>, new_protocols: impl IntoIterator, @@ -405,12 +458,14 @@ impl<'a> ProtocolsChange<'a> { ) -> SmallVec<[Self; 2]> { buffer.clear(); - // Initially, set the boolean for all protocols to `false`, meaning "not visited". + // Initially, set the boolean for all protocols to `false`, meaning "not + // visited". for v in existing_protocols.values_mut() { *v = false; } - let mut new_protocol_count = 0; // We can only iterate `new_protocols` once, so keep track of its length separately. + let mut new_protocol_count = 0; // We can only iterate `new_protocols` once, so keep track of its length + // separately. for new_protocol in new_protocols { existing_protocols .entry(AsStrHashEq(new_protocol)) @@ -429,7 +484,8 @@ impl<'a> ProtocolsChange<'a> { let num_new_protocols = buffer.len(); // Drain all protocols that we haven't visited. - // For existing protocols that are not in `new_protocols`, the boolean will be false, meaning we need to remove it. + // For existing protocols that are not in `new_protocols`, the boolean will be + // false, meaning we need to remove it. existing_protocols.retain(|p, &mut is_supported| { if !is_supported { buffer.extend(StreamProtocol::try_from_owned(p.0.as_ref().to_owned()).ok()); @@ -499,8 +555,9 @@ pub struct ListenUpgradeError { /// Configuration of inbound or outbound substream protocol(s) /// for a [`ConnectionHandler`]. /// -/// The inbound substream protocol(s) are defined by [`ConnectionHandler::listen_protocol`] -/// and the outbound substream protocol(s) by [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. +/// The inbound substream protocol(s) are defined by +/// [`ConnectionHandler::listen_protocol`] and the outbound substream +/// protocol(s) by [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct SubstreamProtocol { upgrade: TUpgrade, @@ -566,7 +623,8 @@ impl SubstreamProtocol { &self.timeout } - /// Converts the substream protocol configuration into the contained upgrade. + /// Converts the substream protocol configuration into the contained + /// upgrade. pub fn into_upgrade(self) -> (TUpgrade, TInfo) { (self.upgrade, self.info) } @@ -584,7 +642,8 @@ pub enum ConnectionHandlerEvent /// We learned something about the protocols supported by the remote. ReportRemoteProtocols(ProtocolSupport), - /// Event that is sent to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour). + /// Event that is sent to a + /// [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour). NotifyBehaviour(TCustom), } @@ -624,8 +683,8 @@ impl } } - /// If this is an `OutboundSubstreamRequest`, maps the protocol (`TConnectionUpgrade`) - /// to something else. + /// If this is an `OutboundSubstreamRequest`, maps the protocol + /// (`TConnectionUpgrade`) to something else. pub fn map_protocol(self, map: F) -> ConnectionHandlerEvent where F: FnOnce(TConnectionUpgrade) -> I, @@ -670,7 +729,8 @@ impl /// Error that can happen on an outbound substream opening attempt. #[derive(Debug)] pub enum StreamUpgradeError { - /// The opening attempt timed out before the negotiation was fully completed. + /// The opening attempt timed out before the negotiation was fully + /// completed. Timeout, /// The upgrade produced an error. Apply(TUpgrErr), diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index a5aab9b5fee..07878630f9f 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -18,14 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::SendWrapper; +use std::task::{Context, Poll}; + use either::Either; use futures::future; -use std::task::{Context, Poll}; + +use crate::{ + handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + FullyNegotiatedInbound, + InboundUpgradeSend, + ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::SendWrapper, +}; impl FullyNegotiatedInbound, SendWrapper>, Either> @@ -71,8 +80,8 @@ where } } -/// Implementation of a [`ConnectionHandler`] that represents either of two [`ConnectionHandler`] -/// implementations. +/// Implementation of a [`ConnectionHandler`] that represents either of two +/// [`ConnectionHandler`] implementations. impl ConnectionHandler for Either where L: ConnectionHandler, diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index 9316ef4d2ce..6941aef341c 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -18,12 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + fmt::Debug, + marker::PhantomData, + task::{Context, Poll}, +}; + use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + SubstreamProtocol, }; -use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; -/// Wrapper around a protocol handler that turns the input event into something else. +/// Wrapper around a protocol handler that turns the input event into something +/// else. #[derive(Debug)] pub struct MapInEvent { inner: TConnectionHandler, diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index f877bfa6f64..459d0f54830 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -18,14 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, +use std::{ + fmt::Debug, + task::{Context, Poll}, }; + use futures::ready; -use std::fmt::Debug; -use std::task::{Context, Poll}; -/// Wrapper around a protocol handler that turns the output event into something else. +use crate::handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + SubstreamProtocol, +}; + +/// Wrapper around a protocol handler that turns the output event into something +/// else. #[derive(Debug)] pub struct MapOutEvent { inner: TConnectionHandler, diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 5efcde5c2bb..086e2585031 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -18,17 +18,9 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! A [`ConnectionHandler`] implementation that combines multiple other [`ConnectionHandler`]s -//! indexed by some key. +//! A [`ConnectionHandler`] implementation that combines multiple other +//! [`ConnectionHandler`]s indexed by some key. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, -}; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; -use crate::Stream; -use futures::{future::BoxFuture, prelude::*, ready}; -use rand::Rng; use std::{ cmp, collections::{HashMap, HashSet}, @@ -40,7 +32,27 @@ use std::{ time::Duration, }; -/// A [`ConnectionHandler`] for multiple [`ConnectionHandler`]s of the same type. +use futures::{future::BoxFuture, prelude::*, ready}; +use rand::Rng; + +use crate::{ + handler::{ + AddressChange, + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + ListenUpgradeError, + SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}, + Stream, +}; + +/// A [`ConnectionHandler`] for multiple [`ConnectionHandler`]s of the same +/// type. #[derive(Clone)] pub struct MultiHandler { handlers: HashMap, @@ -65,7 +77,8 @@ where { /// Create and populate a `MultiHandler` from the given handler iterator. /// - /// It is an error for any two protocols handlers to share the same protocol name. + /// It is an error for any two protocols handlers to share the same protocol + /// name. pub fn try_from_iter(iter: I) -> Result where I: IntoIterator, @@ -242,13 +255,14 @@ where ) -> Poll< ConnectionHandlerEvent, > { - // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to avoid - // that situation. + // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to + // avoid that situation. if self.handlers.is_empty() { return Poll::Pending; } - // Not always polling handlers in the same order should give anyone the chance to make progress. + // Not always polling handlers in the same order should give anyone the chance + // to make progress. let pos = rand::thread_rng().gen_range(0..self.handlers.len()); for (k, h) in self.handlers.iter_mut().skip(pos) { diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 7c84f4bb11a..a7b9651abb5 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -18,14 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, +use std::{ + error, + fmt::Debug, + task::{Context, Poll}, + time::Duration, }; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; -use crate::StreamUpgradeError; + use smallvec::SmallVec; -use std::{error, fmt::Debug, task::Context, task::Poll, time::Duration}; + +use crate::{ + handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + SubstreamProtocol, + }, + upgrade::{InboundUpgradeSend, OutboundUpgradeSend}, + StreamUpgradeError, +}; /// A [`ConnectionHandler`] that opens a new substream for each request. // TODO: Debug @@ -70,16 +84,18 @@ where /// Returns a reference to the listen protocol configuration. /// - /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > **Note**: If you modify the protocol, modifications will only applies + /// > to future inbound + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { &self.listen_protocol } /// Returns a mutable reference to the listen protocol configuration. /// - /// > **Note**: If you modify the protocol, modifications will only applies to future inbound - /// > substreams, not the ones already being negotiated. + /// > **Note**: If you modify the protocol, modifications will only applies + /// > to future inbound + /// > substreams, not the ones already being negotiated. pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { &mut self.listen_protocol } @@ -212,12 +228,12 @@ impl Default for OneShotHandlerConfig { #[cfg(test)] mod tests { - use super::*; + use std::convert::Infallible; - use futures::executor::block_on; - use futures::future::poll_fn; + use futures::{executor::block_on, future::poll_fn}; use libp2p_core::upgrade::DeniedUpgrade; - use std::convert::Infallible; + + use super::*; #[test] fn do_not_keep_idle_connection_alive() { diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 656a38849d5..0979a47e94d 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -19,13 +19,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - FullyNegotiatedOutbound, SubstreamProtocol, +use std::{ + convert::Infallible, + task::{Context, Poll}, }; + use libp2p_core::upgrade::PendingUpgrade; -use std::convert::Infallible; -use std::task::{Context, Poll}; + +use crate::handler::{ + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + SubstreamProtocol, +}; /// Implementation of [`ConnectionHandler`] that returns a pending upgrade. #[derive(Clone, Debug)] diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index e049252d448..b93216546ff 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -18,18 +18,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, - FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, ListenUpgradeError, - OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, +use std::{ + cmp, + task::{Context, Poll}, }; -use crate::upgrade::SendWrapper; + use either::Either; use futures::{future, ready}; use libp2p_core::upgrade::SelectUpgrade; -use std::{cmp, task::Context, task::Poll}; -/// Implementation of [`ConnectionHandler`] that combines two protocols into one. +use crate::{ + handler::{ + AddressChange, + ConnectionEvent, + ConnectionHandler, + ConnectionHandlerEvent, + DialUpgradeError, + FullyNegotiatedInbound, + FullyNegotiatedOutbound, + InboundUpgradeSend, + ListenUpgradeError, + OutboundUpgradeSend, + StreamUpgradeError, + SubstreamProtocol, + }, + upgrade::SendWrapper, +}; + +/// Implementation of [`ConnectionHandler`] that combines two protocols into +/// one. #[derive(Debug, Clone)] pub struct ConnectionHandlerSelect { /// The first protocol. diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 12280e99f07..e4d7bef3159 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -31,12 +31,12 @@ //! Creating a `Swarm` requires three things: //! //! 1. A network identity of the local node in form of a [`PeerId`]. -//! 2. An implementation of the [`Transport`] trait. This is the type that -//! will be used in order to reach nodes on the network based on their -//! address. See the `transport` module for more information. +//! 2. An implementation of the [`Transport`] trait. This is the type that will +//! be used in order to reach nodes on the network based on their address. +//! See the `transport` module for more information. //! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state -//! machine that defines how the swarm should behave once it is connected -//! to a node. +//! machine that defines how the swarm should behave once it is connected to +//! a node. //! //! # Network Behaviour //! @@ -51,7 +51,6 @@ //! The [`ConnectionHandler`] trait defines how each active connection to a //! remote should behave: how to handle incoming substreams, which protocols //! are supported, when to open a new outbound substream, etc. -//! #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -70,105 +69,136 @@ pub mod handler; mod listen_opts; mod translation; -/// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro. +/// Bundles all symbols required for the +/// [`libp2p_swarm_derive::NetworkBehaviour`] macro. #[doc(hidden)] pub mod derive_prelude { - pub use crate::behaviour::AddressChange; - pub use crate::behaviour::ConnectionClosed; - pub use crate::behaviour::ConnectionEstablished; - pub use crate::behaviour::DialFailure; - pub use crate::behaviour::ExpiredListenAddr; - pub use crate::behaviour::ExternalAddrConfirmed; - pub use crate::behaviour::ExternalAddrExpired; - pub use crate::behaviour::FromSwarm; - pub use crate::behaviour::ListenFailure; - pub use crate::behaviour::ListenerClosed; - pub use crate::behaviour::ListenerError; - pub use crate::behaviour::NewExternalAddrCandidate; - pub use crate::behaviour::NewExternalAddrOfPeer; - pub use crate::behaviour::NewListenAddr; - pub use crate::behaviour::NewListener; - pub use crate::connection::ConnectionId; - pub use crate::ConnectionDenied; - pub use crate::ConnectionHandler; - pub use crate::ConnectionHandlerSelect; - pub use crate::DialError; - pub use crate::NetworkBehaviour; - pub use crate::THandler; - pub use crate::THandlerInEvent; - pub use crate::THandlerOutEvent; - pub use crate::ToSwarm; pub use either::Either; pub use futures::prelude as futures; - pub use libp2p_core::transport::{ListenerId, PortUse}; - pub use libp2p_core::ConnectedPoint; - pub use libp2p_core::Endpoint; - pub use libp2p_core::Multiaddr; + pub use libp2p_core::{ + transport::{ListenerId, PortUse}, + ConnectedPoint, + Endpoint, + Multiaddr, + }; pub use libp2p_identity::PeerId; + + pub use crate::{ + behaviour::{ + AddressChange, + ConnectionClosed, + ConnectionEstablished, + DialFailure, + ExpiredListenAddr, + ExternalAddrConfirmed, + ExternalAddrExpired, + FromSwarm, + ListenFailure, + ListenerClosed, + ListenerError, + NewExternalAddrCandidate, + NewExternalAddrOfPeer, + NewListenAddr, + NewListener, + }, + connection::ConnectionId, + ConnectionDenied, + ConnectionHandler, + ConnectionHandlerSelect, + DialError, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, + }; } -pub use behaviour::{ - AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredListenAddr, - ExternalAddrExpired, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, - ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, - NewExternalAddrOfPeer, NewListenAddr, NotifyHandler, PeerAddresses, ToSwarm, -}; -pub use connection::pool::ConnectionCounters; -pub use connection::{ConnectionError, ConnectionId, SupportedProtocols}; -pub use executor::Executor; -pub use handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, OneShotHandler, - OneShotHandlerConfig, StreamUpgradeError, SubstreamProtocol, +use std::{ + collections::{HashMap, HashSet, VecDeque}, + error, + fmt, + io, + num::{NonZeroU32, NonZeroU8, NonZeroUsize}, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; -#[cfg(feature = "macros")] -pub use libp2p_swarm_derive::NetworkBehaviour; -pub use listen_opts::ListenOpts; -pub use stream::Stream; -pub use stream_protocol::{InvalidProtocol, StreamProtocol}; -use crate::behaviour::ExternalAddrConfirmed; -use crate::handler::UpgradeInfoSend; -use connection::pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}; -use connection::IncomingInfo; +pub use behaviour::{ + AddressChange, + CloseConnection, + ConnectionClosed, + DialFailure, + ExpiredListenAddr, + ExternalAddrExpired, + ExternalAddresses, + FromSwarm, + ListenAddresses, + ListenFailure, + ListenerClosed, + ListenerError, + NetworkBehaviour, + NewExternalAddrCandidate, + NewExternalAddrOfPeer, + NewListenAddr, + NotifyHandler, + PeerAddresses, + ToSwarm, +}; +pub use connection::{pool::ConnectionCounters, ConnectionError, ConnectionId, SupportedProtocols}; use connection::{ - PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, + pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}, + IncomingInfo, + PendingConnectionError, + PendingInboundConnectionError, + PendingOutboundConnectionError, }; use dial_opts::{DialOpts, PeerCondition}; +pub use executor::Executor; use futures::{prelude::*, stream::FusedStream}; - +pub use handler::{ + ConnectionHandler, + ConnectionHandlerEvent, + ConnectionHandlerSelect, + OneShotHandler, + OneShotHandlerConfig, + StreamUpgradeError, + SubstreamProtocol, +}; use libp2p_core::{ connection::ConnectedPoint, muxing::StreamMuxerBox, transport::{self, ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, + Multiaddr, + Transport, }; use libp2p_identity::PeerId; - +#[cfg(feature = "macros")] +pub use libp2p_swarm_derive::NetworkBehaviour; +pub use listen_opts::ListenOpts; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; -use std::time::Duration; -use std::{ - error, fmt, io, - pin::Pin, - task::{Context, Poll}, -}; +pub use stream::Stream; +pub use stream_protocol::{InvalidProtocol, StreamProtocol}; use tracing::Instrument; #[doc(hidden)] pub use translation::_address_translation; +use crate::{behaviour::ExternalAddrConfirmed, handler::UpgradeInfoSend}; + /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; -/// [`ConnectionHandler`] of the [`NetworkBehaviour`] for all the protocols the [`NetworkBehaviour`] -/// supports. +/// [`ConnectionHandler`] of the [`NetworkBehaviour`] for all the protocols the +/// [`NetworkBehaviour`] supports. pub type THandler = ::ConnectionHandler; /// Custom event that can be received by the [`ConnectionHandler`] of the /// [`NetworkBehaviour`]. pub type THandlerInEvent = as ConnectionHandler>::FromBehaviour; -/// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. +/// Custom event that can be produced by the [`ConnectionHandler`] of the +/// [`NetworkBehaviour`]. pub type THandlerOutEvent = as ConnectionHandler>::ToBehaviour; /// Event generated by the `Swarm`. @@ -185,8 +215,8 @@ pub enum SwarmEvent { connection_id: ConnectionId, /// Endpoint of the connection that has been opened. endpoint: ConnectedPoint, - /// Number of established connections to this peer, including the one that has just been - /// opened. + /// Number of established connections to this peer, including the one + /// that has just been opened. num_established: NonZeroU32, /// [`Some`] when the new connection is an outgoing connection. /// Addresses are dialed concurrently. Contains the addresses and errors @@ -210,31 +240,33 @@ pub enum SwarmEvent { /// active close. cause: Option, }, - /// A new connection arrived on a listener and is in the process of protocol negotiation. + /// A new connection arrived on a listener and is in the process of protocol + /// negotiation. /// - /// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) or - /// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event will later be - /// generated for this connection. + /// A corresponding + /// [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) or + /// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event + /// will later be generated for this connection. IncomingConnection { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, }, /// An error happened on an inbound connection during its initial handshake. /// - /// This can include, for example, an error during the handshake of the encryption layer, or - /// the connection unexpectedly closed. + /// This can include, for example, an error during the handshake of the + /// encryption layer, or the connection unexpectedly closed. IncomingConnectionError { /// Identifier of the connection. connection_id: ConnectionId, /// Local connection address. - /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) - /// event. + /// This address has been earlier reported with a + /// [`NewListenAddr`](SwarmEvent::NewListenAddr) event. local_addr: Multiaddr, /// Address used to send back data to the remote. send_back_addr: Multiaddr, @@ -268,12 +300,13 @@ pub enum SwarmEvent { ListenerClosed { /// The listener that closed. listener_id: ListenerId, - /// The addresses that the listener was listening on. These addresses are now considered - /// expired, similar to if a [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event + /// The addresses that the listener was listening on. These addresses + /// are now considered expired, similar to if a + /// [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event /// has been generated for each of them. addresses: Vec, - /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` - /// if the stream produced an error. + /// Reason for the closure. Contains `Ok(())` if the stream produced + /// `None`, or `Err` if the stream produced an error. reason: Result<(), io::Error>, }, /// One of the listeners reported a non-fatal error. @@ -301,14 +334,16 @@ pub enum SwarmEvent { NewExternalAddrCandidate { address: Multiaddr }, /// An external address of the local node was confirmed. ExternalAddrConfirmed { address: Multiaddr }, - /// An external address of the local node expired, i.e. is no-longer confirmed. + /// An external address of the local node expired, i.e. is no-longer + /// confirmed. ExternalAddrExpired { address: Multiaddr }, /// We have discovered a new address of a peer. NewExternalAddrOfPeer { peer_id: PeerId, address: Multiaddr }, } impl SwarmEvent { - /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. + /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is + /// the `Behaviour` variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { match self { @@ -326,7 +361,8 @@ pub struct Swarm where TBehaviour: NetworkBehaviour, { - /// [`Transport`] for dialing remote peers and listening for incoming connection. + /// [`Transport`] for dialing remote peers and listening for incoming + /// connection. transport: transport::Boxed<(PeerId, StreamMuxerBox)>, /// The nodes currently active. @@ -335,8 +371,8 @@ where /// The local peer ID. local_peer_id: PeerId, - /// Handles which nodes to connect to and how to handle the events sent back by the protocol - /// handlers. + /// Handles which nodes to connect to and how to handle the events sent back + /// by the protocol handlers. behaviour: TBehaviour, /// List of protocols that the behaviour says it supports. @@ -361,8 +397,8 @@ impl Swarm where TBehaviour: NetworkBehaviour, { - /// Creates a new [`Swarm`] from the given [`Transport`], [`NetworkBehaviour`], [`PeerId`] and - /// [`Config`]. + /// Creates a new [`Swarm`] from the given [`Transport`], + /// [`NetworkBehaviour`], [`PeerId`] and [`Config`]. pub fn new( transport: transport::Boxed<(PeerId, StreamMuxerBox)>, behaviour: TBehaviour, @@ -397,8 +433,9 @@ where /// Starts listening on the given address. /// Returns an error if the address is not supported. /// - /// Listeners report their new listening addresses as [`SwarmEvent::NewListenAddr`]. - /// Depending on the underlying transport, one listener may have multiple listening addresses. + /// Listeners report their new listening addresses as + /// [`SwarmEvent::NewListenAddr`]. Depending on the underlying + /// transport, one listener may have multiple listening addresses. pub fn listen_on(&mut self, addr: Multiaddr) -> Result> { let opts = ListenOpts::new(addr); let id = opts.listener_id(); @@ -570,7 +607,8 @@ where Ok(()) } - /// Returns an iterator that produces the list of addresses we're listening on. + /// Returns an iterator that produces the list of addresses we're listening + /// on. pub fn listeners(&self) -> impl Iterator { self.listened_addrs.values().flatten() } @@ -609,8 +647,9 @@ where /// Add a **confirmed** external address for the local node. /// - /// This function should only be called with addresses that are guaranteed to be reachable. - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + /// This function should only be called with addresses that are guaranteed + /// to be reachable. The address is broadcast to all + /// [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. pub fn add_external_address(&mut self, a: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { @@ -621,7 +660,8 @@ where /// Remove an external address for the local node. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::ExternalAddrExpired`]. pub fn remove_external_address(&mut self, addr: &Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); @@ -630,7 +670,8 @@ where /// Add a new external address of a remote peer. /// - /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrOfPeer`]. + /// The address is broadcast to all [`NetworkBehaviour`]s via + /// [`FromSwarm::NewExternalAddrOfPeer`]. pub fn add_peer_address(&mut self, peer_id: PeerId, addr: Multiaddr) { self.behaviour .on_swarm_event(FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { @@ -641,10 +682,13 @@ where /// Disconnects a peer by its peer ID, closing all connections to said peer. /// - /// Returns `Ok(())` if there was one or more established connections to the peer. + /// Returns `Ok(())` if there was one or more established connections to the + /// peer. /// - /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. - /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll + /// [`ConnectionHandler::poll_close`] to completion. Use this function + /// if you want to close a connection _despite_ it still being in use by one + /// or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -659,8 +703,9 @@ where /// Attempt to gracefully close a connection. /// - /// Closing a connection is asynchronous but this function will return immediately. - /// A [`SwarmEvent::ConnectionClosed`] event will be emitted once the connection is actually closed. + /// Closing a connection is asynchronous but this function will return + /// immediately. A [`SwarmEvent::ConnectionClosed`] event will be + /// emitted once the connection is actually closed. /// /// # Returns /// @@ -1192,8 +1237,8 @@ where mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - // We use a `this` variable because the compiler can't mutably borrow multiple times - // across a `Deref`. + // We use a `this` variable because the compiler can't mutably borrow multiple + // times across a `Deref`. let this = &mut *self; // This loop polls the components below in a prioritized order. @@ -1202,17 +1247,19 @@ where // 2. Connection [`Pool`] // 3. [`ListenersStream`] // - // (1) is polled before (2) to prioritize local work over work coming from a remote. + // (1) is polled before (2) to prioritize local work over work coming from a + // remote. // - // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. + // (2) is polled before (3) to prioritize existing connections over upgrading + // new incoming connections. loop { if let Some(swarm_event) = this.pending_swarm_events.pop_front() { return Poll::Ready(swarm_event); } match this.pending_handler_event.take() { - // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous - // iteration to the connection handler(s). + // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the + // previous iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { PendingNotifyHandler::One(conn_id) => { match this.pool.get_established(conn_id) { @@ -1373,7 +1420,8 @@ where } } -/// The stream of swarm events never terminates, so we can implement fused for it. +/// The stream of swarm events never terminates, so we can implement fused for +/// it. impl FusedStream for Swarm where TBehaviour: NetworkBehaviour, @@ -1388,8 +1436,8 @@ pub struct Config { } impl Config { - /// Creates a new [`Config`] from the given executor. The [`Swarm`] is obtained via - /// [`Swarm::new`]. + /// Creates a new [`Config`] from the given executor. The [`Swarm`] is + /// obtained via [`Swarm::new`]. pub fn with_executor(executor: impl Executor + Send + 'static) -> Self { Self { pool_config: PoolConfig::new(Some(Box::new(executor))), @@ -1450,23 +1498,24 @@ impl Config { self } - /// Configures the size of the buffer for events sent by a [`ConnectionHandler`] to the - /// [`NetworkBehaviour`]. + /// Configures the size of the buffer for events sent by a + /// [`ConnectionHandler`] to the [`NetworkBehaviour`]. /// /// Each connection has its own buffer. /// - /// The ideal value depends on the executor used, the CPU speed and the volume of events. - /// If this value is too low, then the [`ConnectionHandler`]s will be sleeping more often - /// than necessary. Increasing this value increases the overall memory - /// usage, and more importantly the latency between the moment when an - /// event is emitted and the moment when it is received by the - /// [`NetworkBehaviour`]. + /// The ideal value depends on the executor used, the CPU speed and the + /// volume of events. If this value is too low, then the + /// [`ConnectionHandler`]s will be sleeping more often than necessary. + /// Increasing this value increases the overall memory usage, and more + /// importantly the latency between the moment when an event is emitted + /// and the moment when it is received by the [`NetworkBehaviour`]. pub fn with_per_connection_event_buffer_size(mut self, n: usize) -> Self { self.pool_config = self.pool_config.with_per_connection_event_buffer_size(n); self } - /// Number of addresses concurrently dialed for a single outbound connection attempt. + /// Number of addresses concurrently dialed for a single outbound connection + /// attempt. pub fn with_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.pool_config = self.pool_config.with_dial_concurrency_factor(factor); self @@ -1518,14 +1567,17 @@ impl Config { pub enum DialError { /// The peer identity obtained on the connection matches the local peer. LocalPeerId { endpoint: ConnectedPoint }, - /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] and [`DialOpts`]. + /// No addresses have been provided by + /// [`NetworkBehaviour::handle_pending_outbound_connection`] and + /// [`DialOpts`]. NoAddresses, /// The provided [`dial_opts::PeerCondition`] evaluated to false and thus /// the dial was aborted. DialPeerConditionFalse(dial_opts::PeerCondition), /// Pending connection attempt has been aborted. Aborted, - /// The peer identity obtained on the connection did not match the one that was expected. + /// The peer identity obtained on the connection did not match the one that + /// was expected. WrongPeerId { obtained: PeerId, endpoint: ConnectedPoint, @@ -1534,7 +1586,8 @@ pub enum DialError { /// via [`NetworkBehaviour::handle_pending_outbound_connection`] or /// [`NetworkBehaviour::handle_established_outbound_connection`]. Denied { cause: ConnectionDenied }, - /// An error occurred while negotiating the transport protocol(s) on a connection. + /// An error occurred while negotiating the transport protocol(s) on a + /// connection. Transport(Vec<(Multiaddr, TransportError)>), } @@ -1559,10 +1612,28 @@ impl fmt::Display for DialError { f, "Dial error: tried to dial local peer id at {endpoint:?}." ), - DialError::DialPeerConditionFalse(PeerCondition::Disconnected) => write!(f, "Dial error: dial condition was configured to only happen when disconnected (`PeerCondition::Disconnected`), but node is already connected, thus cancelling new dial."), - DialError::DialPeerConditionFalse(PeerCondition::NotDialing) => write!(f, "Dial error: dial condition was configured to only happen if there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but a dial is in progress, thus cancelling new dial."), - DialError::DialPeerConditionFalse(PeerCondition::DisconnectedAndNotDialing) => write!(f, "Dial error: dial condition was configured to only happen when both disconnected (`PeerCondition::Disconnected`) and there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but node is already connected or dial is in progress, thus cancelling new dial."), - DialError::DialPeerConditionFalse(PeerCondition::Always) => unreachable!("Dial peer condition is by definition true."), + DialError::DialPeerConditionFalse(PeerCondition::Disconnected) => write!( + f, + "Dial error: dial condition was configured to only happen when disconnected \ + (`PeerCondition::Disconnected`), but node is already connected, thus cancelling \ + new dial." + ), + DialError::DialPeerConditionFalse(PeerCondition::NotDialing) => write!( + f, + "Dial error: dial condition was configured to only happen if there is currently \ + no ongoing dialing attempt (`PeerCondition::NotDialing`), but a dial is in \ + progress, thus cancelling new dial." + ), + DialError::DialPeerConditionFalse(PeerCondition::DisconnectedAndNotDialing) => write!( + f, + "Dial error: dial condition was configured to only happen when both disconnected \ + (`PeerCondition::Disconnected`) and there is currently no ongoing dialing \ + attempt (`PeerCondition::NotDialing`), but node is already connected or dial is \ + in progress, thus cancelling new dial." + ), + DialError::DialPeerConditionFalse(PeerCondition::Always) => { + unreachable!("Dial peer condition is by definition true.") + } DialError::Aborted => write!( f, "Dial error: Pending connection attempt has been aborted." @@ -1619,7 +1690,8 @@ impl error::Error for DialError { pub enum ListenError { /// Pending connection attempt has been aborted. Aborted, - /// The peer identity obtained on the connection did not match the one that was expected. + /// The peer identity obtained on the connection did not match the one that + /// was expected. WrongPeerId { obtained: PeerId, endpoint: ConnectedPoint, @@ -1631,7 +1703,8 @@ pub enum ListenError { Denied { cause: ConnectionDenied, }, - /// An error occurred while negotiating the transport protocol(s) on a connection. + /// An error occurred while negotiating the transport protocol(s) on a + /// connection. Transport(TransportError), } @@ -1688,7 +1761,8 @@ impl error::Error for ListenError { /// A connection was denied. /// -/// To figure out which [`NetworkBehaviour`] denied the connection, use [`ConnectionDenied::downcast`]. +/// To figure out which [`NetworkBehaviour`] denied the connection, use +/// [`ConnectionDenied::downcast`]. #[derive(Debug)] pub struct ConnectionDenied { inner: Box, @@ -1701,7 +1775,8 @@ impl ConnectionDenied { } } - /// Attempt to downcast to a particular reason for why the connection was denied. + /// Attempt to downcast to a particular reason for why the connection was + /// denied. pub fn downcast(self) -> Result where E: error::Error + Send + Sync + 'static, @@ -1714,7 +1789,8 @@ impl ConnectionDenied { Ok(*inner) } - /// Attempt to downcast to a particular reason for why the connection was denied. + /// Attempt to downcast to a particular reason for why the connection was + /// denied. pub fn downcast_ref(&self) -> Option<&E> where E: error::Error + Send + Sync + 'static, @@ -1759,18 +1835,22 @@ impl NetworkInfo { #[cfg(test)] mod tests { - use super::*; - use crate::test::{CallTraceBehaviour, MockBehaviour}; - use libp2p_core::multiaddr::multiaddr; - use libp2p_core::transport::memory::MemoryTransportError; - use libp2p_core::transport::{PortUse, TransportEvent}; - use libp2p_core::Endpoint; - use libp2p_core::{multiaddr, transport, upgrade}; + use libp2p_core::{ + multiaddr, + multiaddr::multiaddr, + transport, + transport::{memory::MemoryTransportError, PortUse, TransportEvent}, + upgrade, + Endpoint, + }; use libp2p_identity as identity; use libp2p_plaintext as plaintext; use libp2p_yamux as yamux; use quickcheck::*; + use super::*; + use crate::test::{CallTraceBehaviour, MockBehaviour}; + // Test execution state. // Connection => Disconnecting => Connecting. enum State { @@ -1840,10 +1920,12 @@ mod tests { } /// Establishes multiple connections between two peers, - /// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`]. + /// after which one peer disconnects the other using + /// [`Swarm::disconnect_peer_id`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of + /// [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_swarm_disconnect() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1905,8 +1987,9 @@ mod tests { /// after which one peer disconnects the other /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of + /// [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_all() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -1972,8 +2055,9 @@ mod tests { /// after which one peer closes a single connection /// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`]. /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] + /// The test expects both behaviours to be notified via calls to + /// [`NetworkBehaviour::on_swarm_event`] with pairs of + /// [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] #[tokio::test] async fn test_behaviour_disconnect_one() { let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); @@ -2083,8 +2167,8 @@ mod tests { transports.push(transport); } - // Have swarm dial each listener and wait for each listener to receive the incoming - // connections. + // Have swarm dial each listener and wait for each listener to receive the + // incoming connections. swarm .dial( DialOpts::peer_id(PeerId::random()) @@ -2117,8 +2201,8 @@ mod tests { #[tokio::test] async fn invalid_peer_id() { - // Checks whether dialing an address containing the wrong peer id raises an error - // for the expected peer id instead of the obtained peer id. + // Checks whether dialing an address containing the wrong peer id raises an + // error for the expected peer id instead of the obtained peer id. let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); @@ -2175,8 +2259,10 @@ mod tests { // Dialing the same address we're listening should result in three events: // // - The incoming connection notification (before we know the incoming peer ID). - // - The connection error for the dialing endpoint (once we've determined that it's our own ID). - // - The connection error for the listening endpoint (once we've determined that it's our own ID). + // - The connection error for the dialing endpoint (once we've determined that + // it's our own ID). + // - The connection error for the listening endpoint (once we've determined that + // it's our own ID). // // The last two can happen in any order. @@ -2190,7 +2276,8 @@ mod tests { }) .await; - swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would otherwise be filtered. + swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would + // otherwise be filtered. swarm.dial(local_address.clone()).unwrap(); @@ -2237,8 +2324,8 @@ mod tests { #[tokio::test] async fn dial_self_by_id() { - // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first - // place. + // Trying to dial self by passing the same `PeerId` shouldn't even be possible + // in the first place. let swarm = new_test_swarm(Config::with_tokio_executor()); let peer_id = *swarm.local_peer_id(); assert!(!swarm.is_connected(&peer_id)); @@ -2246,7 +2333,8 @@ mod tests { #[tokio::test] async fn multiple_addresses_err() { - // Tries dialing multiple addresses, and makes sure there's one dialing error per address. + // Tries dialing multiple addresses, and makes sure there's one dialing error + // per address. let target = PeerId::random(); @@ -2342,7 +2430,12 @@ mod tests { let string = format!("{error}"); - // Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do. - assert_eq!("Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on the given port.)]", string) + // Unfortunately, we have some "empty" errors that lead to multiple colons + // without text but that is the best we can do. + assert_eq!( + "Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on \ + the given port.)]", + string + ) } } diff --git a/swarm/src/listen_opts.rs b/swarm/src/listen_opts.rs index 9c4d69a6fa0..1fcb33cd348 100644 --- a/swarm/src/listen_opts.rs +++ b/swarm/src/listen_opts.rs @@ -1,6 +1,7 @@ -use crate::ListenerId; use libp2p_core::Multiaddr; +use crate::ListenerId; + #[derive(Debug)] pub struct ListenOpts { id: ListenerId, diff --git a/swarm/src/stream.rs b/swarm/src/stream.rs index 871352f3c6a..9dfd46ed499 100644 --- a/swarm/src/stream.rs +++ b/swarm/src/stream.rs @@ -1,6 +1,3 @@ -use futures::{AsyncRead, AsyncWrite}; -use libp2p_core::muxing::SubstreamBox; -use libp2p_core::Negotiated; use std::{ io::{IoSlice, IoSliceMut}, pin::Pin, @@ -8,6 +5,9 @@ use std::{ task::{Context, Poll}, }; +use futures::{AsyncRead, AsyncWrite}; +use libp2p_core::{muxing::SubstreamBox, Negotiated}; + /// Counter for the number of active streams on a connection. #[derive(Debug, Clone)] pub(crate) struct ActiveStreamCounter(Arc<()>); @@ -40,14 +40,16 @@ impl Stream { } } - /// Ignore this stream in the [Swarm](crate::Swarm)'s connection-keep-alive algorithm. + /// Ignore this stream in the [Swarm](crate::Swarm)'s connection-keep-alive + /// algorithm. /// - /// By default, any active stream keeps a connection alive. For most protocols, - /// this is a good default as it ensures that the protocol is completed before - /// a connection is shut down. + /// By default, any active stream keeps a connection alive. For most + /// protocols, this is a good default as it ensures that the protocol is + /// completed before a connection is shut down. /// Some protocols like libp2p's [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) /// for example never complete and are of an auxiliary nature. - /// These protocols should opt-out of the keep alive algorithm using this method. + /// These protocols should opt-out of the keep alive algorithm using this + /// method. pub fn ignore_for_keep_alive(&mut self) { self.counter.take(); } diff --git a/swarm/src/stream_protocol.rs b/swarm/src/stream_protocol.rs index f746429a3d7..a6cb1d41d7f 100644 --- a/swarm/src/stream_protocol.rs +++ b/swarm/src/stream_protocol.rs @@ -1,12 +1,16 @@ +use std::{ + fmt, + hash::{Hash, Hasher}, + sync::Arc, +}; + use either::Either; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::sync::Arc; /// Identifies a protocol for a stream. /// -/// libp2p nodes use stream protocols to negotiate what to do with a newly opened stream. -/// Stream protocols are string-based and must start with a forward slash: `/`. +/// libp2p nodes use stream protocols to negotiate what to do with a newly +/// opened stream. Stream protocols are string-based and must start with a +/// forward slash: `/`. #[derive(Clone, Eq)] pub struct StreamProtocol { inner: Either<&'static str, Arc>, @@ -17,7 +21,8 @@ impl StreamProtocol { /// /// # Panics /// - /// This function panics if the protocol does not start with a forward slash: `/`. + /// This function panics if the protocol does not start with a forward + /// slash: `/`. pub const fn new(s: &'static str) -> Self { match s.as_bytes() { [b'/', ..] => {} @@ -31,15 +36,17 @@ impl StreamProtocol { /// Attempt to construct a protocol from an owned string. /// - /// This function will fail if the protocol does not start with a forward slash: `/`. - /// Where possible, you should use [`StreamProtocol::new`] instead to avoid allocations. + /// This function will fail if the protocol does not start with a forward + /// slash: `/`. Where possible, you should use [`StreamProtocol::new`] + /// instead to avoid allocations. pub fn try_from_owned(protocol: String) -> Result { if !protocol.starts_with('/') { return Err(InvalidProtocol::missing_forward_slash()); } Ok(StreamProtocol { - inner: Either::Right(Arc::from(protocol)), // FIXME: Can we somehow reuse the allocation from the owned string? + inner: Either::Right(Arc::from(protocol)), /* FIXME: Can we somehow reuse the + * allocation from the owned string? */ }) } } diff --git a/swarm/src/test.rs b/swarm/src/test.rs index a6cb7c4d4eb..ddbf372609e 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -18,19 +18,42 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{ - ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, ExternalAddrExpired, - FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, NewListenAddr, NewListener, +use std::{ + collections::HashMap, + task::{Context, Poll}, }; -use crate::{ - ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + +use libp2p_core::{ + multiaddr::Multiaddr, + transport::{ListenerId, PortUse}, + ConnectedPoint, + Endpoint, }; -use libp2p_core::transport::PortUse; -use libp2p_core::{multiaddr::Multiaddr, transport::ListenerId, ConnectedPoint, Endpoint}; use libp2p_identity::PeerId; -use std::collections::HashMap; -use std::task::{Context, Poll}; + +use crate::{ + behaviour::{ + ConnectionClosed, + ConnectionEstablished, + DialFailure, + ExpiredListenAddr, + ExternalAddrExpired, + FromSwarm, + ListenerClosed, + ListenerError, + NewExternalAddrCandidate, + NewListenAddr, + NewListener, + }, + ConnectionDenied, + ConnectionHandler, + ConnectionId, + NetworkBehaviour, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, +}; /// A `MockBehaviour` is a `NetworkBehaviour` that allows for /// the instrumentation of return values, without keeping @@ -42,9 +65,12 @@ where TOutEvent: Send + 'static, { /// The prototype protocols handler that is cloned for every - /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and [`NetworkBehaviour::handle_established_outbound_connection`] + /// invocation of + /// [`NetworkBehaviour::handle_established_inbound_connection`] and + /// [`NetworkBehaviour::handle_established_outbound_connection`] pub(crate) handler_proto: THandler, - /// The addresses to return from [`NetworkBehaviour::handle_established_outbound_connection`]. + /// The addresses to return from + /// [`NetworkBehaviour::handle_established_outbound_connection`]. pub(crate) addresses: HashMap>, /// The next action to return from `poll`. /// @@ -218,8 +244,9 @@ where .count() } - /// Checks that when the expected number of established connection notifications are received, - /// a given number of expected connections have been received as well. + /// Checks that when the expected number of established connection + /// notifications are received, a given number of expected connections + /// have been received as well. /// /// Returns if the first condition is met. pub(crate) fn assert_connected( @@ -266,8 +293,9 @@ where }) .take(other_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. + // Ensure that the number of previous connections is consistent with + // this if let Some(&prev) = other_peer_connections.next() { if prev < other_established { assert_eq!( @@ -319,8 +347,9 @@ where }) .take(remaining_established); - // We are informed that there are `other_established` additional connections. Ensure that the - // number of previous connections is consistent with this + // We are informed that there are `other_established` additional connections. + // Ensure that the number of previous connections is consistent with + // this if let Some(&prev) = other_closed_connections.next() { if prev < remaining_established { assert_eq!( @@ -338,8 +367,8 @@ where .iter() .any(|(peer, conn_id, endpoint, _)| (peer, conn_id, endpoint) == (&peer_id, &connection_id, endpoint)), - "`on_swarm_event` with `FromSwarm::ConnectionClosed is called only for connections for\ - which `on_swarm_event` with `FromSwarm::ConnectionEstablished` was called first." + "`on_swarm_event` with `FromSwarm::ConnectionClosed is called only for connections \ + forwhich `on_swarm_event` with `FromSwarm::ConnectionEstablished` was called first." ); self.on_connection_closed.push(( peer_id, diff --git a/swarm/src/translation.rs b/swarm/src/translation.rs index baa80c907b5..d28b8c5b8ae 100644 --- a/swarm/src/translation.rs +++ b/swarm/src/translation.rs @@ -22,17 +22,18 @@ use libp2p_core::{multiaddr::Protocol, Multiaddr}; /// Perform IP address translation. /// -/// Given an `original` [`Multiaddr`] and some `observed` [`Multiaddr`], replace the first protocol -/// of the `original` with the first protocol of the `observed` [`Multiaddr`] and return this -/// translated [`Multiaddr`]. +/// Given an `original` [`Multiaddr`] and some `observed` [`Multiaddr`], replace +/// the first protocol of the `original` with the first protocol of the +/// `observed` [`Multiaddr`] and return this translated [`Multiaddr`]. /// -/// This function can for example be useful when handling tcp connections. Tcp does not listen and -/// dial on the same port by default. Thus when receiving an observed address on a connection that -/// we initiated, it will contain our dialing port, not our listening port. We need to take the ip -/// address or dns address from the observed address and the port from the original address. +/// This function can for example be useful when handling tcp connections. Tcp +/// does not listen and dial on the same port by default. Thus when receiving an +/// observed address on a connection that we initiated, it will contain our +/// dialing port, not our listening port. We need to take the ip address or dns +/// address from the observed address and the port from the original address. /// -/// This is a mixed-mode translation, i.e. an IPv4 / DNS4 address may be replaced by an IPv6 / DNS6 -/// address and vice versa. +/// This is a mixed-mode translation, i.e. an IPv4 / DNS4 address may be +/// replaced by an IPv6 / DNS6 address and vice versa. /// /// If the first [`Protocol`]s are not IP addresses, `None` is returned instead. #[doc(hidden)] diff --git a/swarm/src/upgrade.rs b/swarm/src/upgrade.rs index f6c6648a373..596cfd64116 100644 --- a/swarm/src/upgrade.rs +++ b/swarm/src/upgrade.rs @@ -18,13 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::Stream; - use futures::prelude::*; use libp2p_core::upgrade; -/// Implemented automatically on all types that implement [`UpgradeInfo`](upgrade::UpgradeInfo) -/// and `Send + 'static`. +use crate::Stream; + +/// Implemented automatically on all types that implement +/// [`UpgradeInfo`](upgrade::UpgradeInfo) and `Send + 'static`. /// /// Do not implement this trait yourself. Instead, please implement /// [`UpgradeInfo`](upgrade::UpgradeInfo). @@ -34,7 +34,8 @@ pub trait UpgradeInfoSend: Send + 'static { /// Equivalent to [`UpgradeInfo::InfoIter`](upgrade::UpgradeInfo::InfoIter). type InfoIter: Iterator + Send + 'static; - /// Equivalent to [`UpgradeInfo::protocol_info`](upgrade::UpgradeInfo::protocol_info). + /// Equivalent to + /// [`UpgradeInfo::protocol_info`](upgrade::UpgradeInfo::protocol_info). fn protocol_info(&self) -> Self::InfoIter; } @@ -58,14 +59,18 @@ where /// Do not implement this trait yourself. Instead, please implement /// [`OutboundUpgrade`](upgrade::OutboundUpgrade). pub trait OutboundUpgradeSend: UpgradeInfoSend { - /// Equivalent to [`OutboundUpgrade::Output`](upgrade::OutboundUpgrade::Output). + /// Equivalent to + /// [`OutboundUpgrade::Output`](upgrade::OutboundUpgrade::Output). type Output: Send + 'static; - /// Equivalent to [`OutboundUpgrade::Error`](upgrade::OutboundUpgrade::Error). + /// Equivalent to + /// [`OutboundUpgrade::Error`](upgrade::OutboundUpgrade::Error). type Error: Send + 'static; - /// Equivalent to [`OutboundUpgrade::Future`](upgrade::OutboundUpgrade::Future). + /// Equivalent to + /// [`OutboundUpgrade::Future`](upgrade::OutboundUpgrade::Future). type Future: Future> + Send + 'static; - /// Equivalent to [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). + /// Equivalent to + /// [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). fn upgrade_outbound(self, socket: Stream, info: Self::Info) -> Self::Future; } @@ -92,14 +97,17 @@ where /// Do not implement this trait yourself. Instead, please implement /// [`InboundUpgrade`](upgrade::InboundUpgrade). pub trait InboundUpgradeSend: UpgradeInfoSend { - /// Equivalent to [`InboundUpgrade::Output`](upgrade::InboundUpgrade::Output). + /// Equivalent to + /// [`InboundUpgrade::Output`](upgrade::InboundUpgrade::Output). type Output: Send + 'static; /// Equivalent to [`InboundUpgrade::Error`](upgrade::InboundUpgrade::Error). type Error: Send + 'static; - /// Equivalent to [`InboundUpgrade::Future`](upgrade::InboundUpgrade::Future). + /// Equivalent to + /// [`InboundUpgrade::Future`](upgrade::InboundUpgrade::Future). type Future: Future> + Send + 'static; - /// Equivalent to [`InboundUpgrade::upgrade_inbound`](upgrade::InboundUpgrade::upgrade_inbound). + /// Equivalent to + /// [`InboundUpgrade::upgrade_inbound`](upgrade::InboundUpgrade::upgrade_inbound). fn upgrade_inbound(self, socket: Stream, info: Self::Info) -> Self::Future; } @@ -120,13 +128,15 @@ where } } -/// Wraps around a type that implements [`OutboundUpgradeSend`], [`InboundUpgradeSend`], or +/// Wraps around a type that implements [`OutboundUpgradeSend`], +/// [`InboundUpgradeSend`], or /// /// both, and implements [`OutboundUpgrade`](upgrade::OutboundUpgrade) and/or /// [`InboundUpgrade`](upgrade::InboundUpgrade). /// -/// > **Note**: This struct is mostly an implementation detail of the library and normally -/// > doesn't need to be used directly. +/// > **Note**: This struct is mostly an implementation detail of the library +/// > and normally +/// > doesn't need to be used directly. pub struct SendWrapper(pub T); impl upgrade::UpgradeInfo for SendWrapper { diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index 1d1a25eb84b..cfdfe70e4ab 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -1,16 +1,27 @@ -use libp2p_core::transport::PortUse; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::{Endpoint, Multiaddr}; +use std::{ + convert::Infallible, + task::{Context, Poll}, +}; + +use libp2p_core::{transport::PortUse, upgrade::DeniedUpgrade, Endpoint, Multiaddr}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::ConnectionEvent; use libp2p_swarm::{ - ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, FromSwarm, - NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + handler::ConnectionEvent, + ConnectionDenied, + ConnectionHandler, + ConnectionHandlerEvent, + ConnectionId, + FromSwarm, + NetworkBehaviour, + SubstreamProtocol, + Swarm, + SwarmEvent, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; use libp2p_swarm_test::SwarmExt; -use std::convert::Infallible; -use std::task::{Context, Poll}; #[async_std::test] async fn sends_remaining_events_to_behaviour_on_connection_close() { diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs index 74b23cf3f7f..e5708c78c28 100644 --- a/swarm/tests/listener.rs +++ b/swarm/tests/listener.rs @@ -7,15 +7,28 @@ use std::{ use libp2p_core::{ multiaddr::Protocol, transport::{ListenerId, PortUse}, - Endpoint, Multiaddr, + Endpoint, + Multiaddr, }; use libp2p_identity::PeerId; use libp2p_swarm::{ - derive_prelude::NewListener, dummy, ConnectionDenied, ConnectionId, FromSwarm, ListenOpts, - ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, Swarm, SwarmEvent, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + derive_prelude::NewListener, + dummy, + ConnectionDenied, + ConnectionId, + FromSwarm, + ListenOpts, + ListenerClosed, + ListenerError, + NetworkBehaviour, + NewListenAddr, + Swarm, + SwarmEvent, + THandler, + THandlerInEvent, + THandlerOutEvent, + ToSwarm, }; - use libp2p_swarm_test::SwarmExt; #[async_std::test] diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 334d1b9d304..c51779d2522 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -18,27 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt::Debug; + use futures::StreamExt; use libp2p_core::{transport::PortUse, Endpoint, Multiaddr}; use libp2p_identify as identify; use libp2p_ping as ping; use libp2p_swarm::{ - behaviour::FromSwarm, dummy, ConnectionDenied, NetworkBehaviour, SwarmEvent, THandler, - THandlerInEvent, THandlerOutEvent, + behaviour::FromSwarm, + dummy, + ConnectionDenied, + NetworkBehaviour, + SwarmEvent, + THandler, + THandlerInEvent, + THandlerOutEvent, }; -use std::fmt::Debug; /// Small utility to check that a type implements `NetworkBehaviour`. #[allow(dead_code)] fn require_net_behaviour() {} // TODO: doesn't compile -/*#[test] -fn empty() { - #[allow(dead_code)] - #[derive(NetworkBehaviour)] - struct Foo {} -}*/ +// #[test] +// fn empty() { +// #[allow(dead_code)] +// #[derive(NetworkBehaviour)] +// struct Foo {} +// } #[test] fn one_field() { @@ -378,7 +385,8 @@ fn with_generics_constrained() { struct Marked; impl Mark for Marked {} - /// A struct with a generic constraint, for which we manually implement `NetworkBehaviour`. + /// A struct with a generic constraint, for which we manually implement + /// `NetworkBehaviour`. #[allow(dead_code)] struct Bar { a: A, @@ -537,10 +545,10 @@ fn multiple_behaviour_attributes() { #[test] fn custom_out_event_no_type_parameters() { + use std::task::{Context, Poll}; + use libp2p_identity::PeerId; use libp2p_swarm::{ConnectionId, ToSwarm}; - use std::task::Context; - use std::task::Poll; pub(crate) struct TemplatedBehaviour { _data: T, diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 7d92cc8ecfc..02a28a32625 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -21,15 +21,16 @@ //! # [DNS name resolution](https://github.com/libp2p/specs/blob/master/addressing/README.md#ip-and-name-resolution) //! [`Transport`] for libp2p. //! -//! This crate provides the type [`async_std::Transport`] and [`tokio::Transport`] -//! for use with `async-std` and `tokio`, +//! This crate provides the type [`async_std::Transport`] and +//! [`tokio::Transport`] for use with `async-std` and `tokio`, //! respectively. //! -//! A [`Transport`] is an address-rewriting [`libp2p_core::Transport`] wrapper around -//! an inner `Transport`. The composed transport behaves like the inner -//! transport, except that [`libp2p_core::Transport::dial`] resolves `/dns/...`, `/dns4/...`, -//! `/dns6/...` and `/dnsaddr/...` components of the given `Multiaddr` through -//! a DNS, replacing them with the resolved protocols (typically TCP/IP). +//! A [`Transport`] is an address-rewriting [`libp2p_core::Transport`] wrapper +//! around an inner `Transport`. The composed transport behaves like the inner +//! transport, except that [`libp2p_core::Transport::dial`] resolves `/dns/...`, +//! `/dns4/...`, `/dns6/...` and `/dnsaddr/...` components of the given +//! `Multiaddr` through a DNS, replacing them with the resolved protocols +//! (typically TCP/IP). //! //! The `async-std` feature and hence the [`async_std::Transport`] are //! enabled by default. Tokio users can furthermore opt-in @@ -54,12 +55,14 @@ //! platform specific APIs to extract the host's DNS configuration (if possible) //! and provide a custom [`ResolverConfig`]. //! -//![trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https +//! [trust-dns-resolver]: https://docs.rs/trust-dns-resolver/latest/trust_dns_resolver/#dns-over-tls-and-dns-over-https #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "async-std")] pub mod async_std { + use std::{io, sync::Arc}; + use async_std_resolver::AsyncStdResolver; use futures::FutureExt; use hickory_resolver::{ @@ -67,20 +70,21 @@ pub mod async_std { system_conf, }; use parking_lot::Mutex; - use std::{io, sync::Arc}; - /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses - /// using `async-std` for all async I/O. + /// A `Transport` wrapper for performing DNS lookups when dialing + /// `Multiaddr`esses using `async-std` for all async I/O. pub type Transport = crate::Transport; impl Transport { - /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + /// Creates a new [`Transport`] from the OS's DNS configuration and + /// defaults. pub async fn system(inner: T) -> Result, io::Error> { let (cfg, opts) = system_conf::read_system_conf()?; Ok(Self::custom(inner, cfg, opts).await) } - /// Creates a [`Transport`] with a custom resolver configuration and options. + /// Creates a [`Transport`] with a custom resolver configuration and + /// options. pub async fn custom(inner: T, cfg: ResolverConfig, opts: ResolverOpts) -> Transport { Transport { inner: Arc::new(Mutex::new(inner)), @@ -116,16 +120,18 @@ pub mod async_std { #[cfg(feature = "tokio")] pub mod tokio { + use std::sync::Arc; + use hickory_resolver::{system_conf, TokioAsyncResolver}; use parking_lot::Mutex; - use std::sync::Arc; - /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses - /// using `tokio` for all async I/O. + /// A `Transport` wrapper for performing DNS lookups when dialing + /// `Multiaddr`esses using `tokio` for all async I/O. pub type Transport = crate::Transport; impl Transport { - /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + /// Creates a new [`Transport`] from the OS's DNS configuration and + /// defaults. pub fn system(inner: T) -> Result, std::io::Error> { let (cfg, opts) = system_conf::read_system_conf()?; Ok(Self::custom(inner, cfg, opts)) @@ -146,18 +152,12 @@ pub mod tokio { } } -use async_trait::async_trait; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{ - multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, ListenerId, TransportError, TransportEvent}, -}; -use parking_lot::Mutex; -use smallvec::SmallVec; -use std::io; -use std::net::{Ipv4Addr, Ipv6Addr}; use std::{ - error, fmt, iter, + error, + fmt, + io, + iter, + net::{Ipv4Addr, Ipv6Addr}, ops::DerefMut, pin::Pin, str, @@ -165,12 +165,24 @@ use std::{ task::{Context, Poll}, }; -pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; -pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; -use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; -use hickory_resolver::lookup_ip::LookupIp; -use hickory_resolver::name_server::ConnectionProvider; -use hickory_resolver::AsyncResolver; +use async_trait::async_trait; +use futures::{future::BoxFuture, prelude::*}; +pub use hickory_resolver::{ + config::{ResolverConfig, ResolverOpts}, + error::{ResolveError, ResolveErrorKind}, +}; +use hickory_resolver::{ + lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}, + lookup_ip::LookupIp, + name_server::ConnectionProvider, + AsyncResolver, +}; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, +}; +use parking_lot::Mutex; +use smallvec::SmallVec; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -191,7 +203,8 @@ const MAX_DNS_LOOKUPS: usize = 32; const MAX_TXT_RECORDS: usize = 16; /// A [`Transport`] for performing DNS lookups when dialing `Multiaddr`esses. -/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or [`async_std::Transport`] instead. +/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or +/// [`async_std::Transport`] instead. #[derive(Debug)] pub struct Transport { /// The underlying transport. @@ -404,7 +417,8 @@ pub enum Error { /// DNS resolution failed. #[allow(clippy::enum_variant_names)] ResolveError(ResolveError), - /// DNS resolution was successful, but the underlying transport refused the resolved address. + /// DNS resolution was successful, but the underlying transport refused the + /// resolved address. MultiaddrNotSupported(Multiaddr), /// DNS resolution involved too many lookups. /// @@ -458,9 +472,9 @@ enum Resolved<'a> { Addrs(Vec), } -/// Asynchronously resolves the domain name of a `Dns`, `Dns4`, `Dns6` or `Dnsaddr` protocol -/// component. If the given protocol is of a different type, it is returned unchanged as a -/// [`Resolved::One`]. +/// Asynchronously resolves the domain name of a `Dns`, `Dns4`, `Dns6` or +/// `Dnsaddr` protocol component. If the given protocol is of a different type, +/// it is returned unchanged as a [`Resolved::One`]. fn resolve<'a, E: 'a + Send, R: Resolver>( proto: &Protocol<'a>, resolver: &'a R, @@ -613,15 +627,17 @@ where #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { - use super::*; use futures::future::BoxFuture; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{PortUse, TransportError, TransportEvent}, - Endpoint, Transport, + Endpoint, + Transport, }; use libp2p_identity::PeerId; + use super::*; + #[test] fn basic_resolve() { let _ = tracing_subscriber::fmt() @@ -713,7 +729,13 @@ mod tests { // an entry with suffix `/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN`, // i.e. a bootnode with such a peer ID. let _ = transport - .dial("/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN".parse().unwrap(), dial_opts) + .dial( + "/dnsaddr/bootstrap.libp2p.io/p2p/\ + QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN" + .parse() + .unwrap(), + dial_opts, + ) .unwrap() .await .unwrap(); diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index 9cd4cfed52a..f22d5c1ee56 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -22,18 +22,19 @@ mod framed; pub(crate) mod handshake; -use asynchronous_codec::Framed; -use bytes::Bytes; -use framed::{Codec, MAX_FRAME_LEN}; -use futures::prelude::*; -use futures::ready; use std::{ cmp::min, - fmt, io, + fmt, + io, pin::Pin, task::{Context, Poll}, }; +use asynchronous_codec::Framed; +use bytes::Bytes; +use framed::{Codec, MAX_FRAME_LEN}; +use futures::{prelude::*, ready}; + /// A noise session to a remote. /// /// `T` is the type of the underlying I/O resource. diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 17254efb0a9..9bbb57e4e2c 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -18,18 +18,21 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Provides a [`Codec`] type implementing the [`Encoder`] and [`Decoder`] traits. +//! Provides a [`Codec`] type implementing the [`Encoder`] and [`Decoder`] +//! traits. //! -//! Alongside a [`asynchronous_codec::Framed`] this provides a [Sink](futures::Sink) -//! and [Stream](futures::Stream) for length-delimited Noise protocol messages. +//! Alongside a [`asynchronous_codec::Framed`] this provides a +//! [Sink](futures::Sink) and [Stream](futures::Stream) for length-delimited +//! Noise protocol messages. + +use std::{io, mem::size_of}; -use super::handshake::proto; -use crate::{protocol::PublicKey, Error}; use asynchronous_codec::{Decoder, Encoder}; use bytes::{Buf, Bytes, BytesMut}; use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; -use std::io; -use std::mem::size_of; + +use super::handshake::proto; +use crate::{protocol::PublicKey, Error}; /// Max. size of a noise message. const MAX_NOISE_MSG_LEN: usize = 65535; @@ -73,16 +76,18 @@ impl Codec { !self.session.is_initiator() } - /// Converts the underlying Noise session from the [`snow::HandshakeState`] to a - /// [`snow::TransportState`] once the handshake is complete, including the static - /// DH [`PublicKey`] of the remote if received. + /// Converts the underlying Noise session from the [`snow::HandshakeState`] + /// to a [`snow::TransportState`] once the handshake is complete, + /// including the static DH [`PublicKey`] of the remote if received. /// /// If the Noise protocol session state does not permit transitioning to - /// transport mode because the handshake is incomplete, an error is returned. + /// transport mode because the handshake is incomplete, an error is + /// returned. /// - /// An error is also returned if the remote's static DH key is not present or - /// cannot be parsed, as that indicates a fatal handshake error for the noise - /// `XX` pattern, which is the only handshake protocol libp2p currently supports. + /// An error is also returned if the remote's static DH key is not present + /// or cannot be parsed, as that indicates a fatal handshake error for + /// the noise `XX` pattern, which is the only handshake protocol libp2p + /// currently supports. pub(crate) fn into_transport(self) -> Result<(PublicKey, Codec), Error> { let dh_remote_pubkey = self.session.get_remote_static().ok_or_else(|| { Error::Io(io::Error::new( @@ -170,7 +175,8 @@ impl Decoder for Codec { /// Encrypts the given cleartext to `dst`. /// -/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across different session states of the noise protocol. +/// This is a standalone function to allow us reusing the `encrypt_buffer` and +/// to use to across different session states of the noise protocol. fn encrypt( cleartext: &[u8], dst: &mut BytesMut, @@ -191,8 +197,9 @@ fn encrypt( /// Encrypts the given ciphertext. /// -/// This is a standalone function so we can use it across different session states of the noise protocol. -/// In case `ciphertext` does not contain enough bytes to decrypt the entire frame, `Ok(None)` is returned. +/// This is a standalone function so we can use it across different session +/// states of the noise protocol. In case `ciphertext` does not contain enough +/// bytes to decrypt the entire frame, `Ok(None)` is returned. fn decrypt( ciphertext: &mut BytesMut, decrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 8993a5795b6..af4861d6b1d 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -23,21 +23,23 @@ pub(super) mod proto { #![allow(unreachable_pub)] include!("../generated/mod.rs"); - pub use self::payload::proto::NoiseExtensions; - pub use self::payload::proto::NoiseHandshakePayload; + pub use self::payload::proto::{NoiseExtensions, NoiseHandshakePayload}; } -use super::framed::Codec; -use crate::io::Output; -use crate::protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}; -use crate::Error; +use std::{collections::HashSet, io, mem}; + use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_identity as identity; use multihash::Multihash; use quick_protobuf::MessageWrite; -use std::collections::HashSet; -use std::{io, mem}; + +use super::framed::Codec; +use crate::{ + io::Output, + protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}, + Error, +}; ////////////////////////////////////////////////////////////////////////////// // Internal @@ -116,7 +118,8 @@ where // Check WebTransport certhashes that responder reported back to us. if is_initiator { - // We check only if we care (i.e. Config::with_webtransport_certhashes was used). + // We check only if we care (i.e. Config::with_webtransport_certhashes was + // used). if let Some(expected_certhashes) = self.responder_webtransport_certhashes { let ext = self.remote_extensions.ok_or_else(|| { Error::UnknownWebTransportCerthashes( @@ -142,12 +145,16 @@ where } } -/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the [`snow::TransportState`]. +/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the +/// [`snow::TransportState`]. /// -/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its [`FramedParts`](asynchronous_codec::FramedParts). -/// However, we need to retain the original [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write buffers. +/// This is a bit tricky because [`Framed`] cannot just be de-composed but only +/// into its [`FramedParts`](asynchronous_codec::FramedParts). However, we need +/// to retain the original [`FramedParts`](asynchronous_codec::FramedParts) +/// because they contain the active read & write buffers. /// -/// Those are likely **not** empty because the remote may directly write to the stream again after the noise handshake finishes. +/// Those are likely **not** empty because the remote may directly write to the +/// stream again after the noise handshake finishes. fn map_into_transport( framed: Framed>, ) -> Result<(PublicKey, Framed>), Error> @@ -218,7 +225,8 @@ where Ok(()) } -/// A future for receiving a Noise handshake message with a payload identifying the remote. +/// A future for receiving a Noise handshake message with a payload identifying +/// the remote. pub(crate) async fn recv_identity(state: &mut State) -> Result<(), Error> where T: AsyncRead + Unpin, @@ -237,7 +245,8 @@ where Ok(()) } -/// Send a Noise handshake message with a payload identifying the local node to the remote. +/// Send a Noise handshake message with a payload identifying the local node to +/// the remote. pub(crate) async fn send_identity(state: &mut State) -> Result<(), Error> where T: AsyncRead + AsyncWrite + Unpin, @@ -249,7 +258,8 @@ where pb.identity_sig.clone_from(&state.identity.signature); - // If this is the responder then send WebTransport certhashes to initiator, if any. + // If this is the responder then send WebTransport certhashes to initiator, if + // any. if state.io.codec().is_responder() { if let Some(ref certhashes) = state.responder_webtransport_certhashes { let ext = pb diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 2557e76e276..0c3b0fe32e5 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -20,33 +20,39 @@ //! [Noise protocol framework][noise] support for libp2p. //! -//! > **Note**: This crate is still experimental and subject to major breaking changes -//! > both on the API and the wire protocol. +//! > **Note**: This crate is still experimental and subject to major breaking +//! > changes +//! > both on the API and the wire protocol. //! -//! This crate provides `libp2p_core::InboundUpgrade` and `libp2p_core::OutboundUpgrade` -//! implementations for various noise handshake patterns (currently `IK`, `IX`, and `XX`) -//! over a particular choice of Diffie–Hellman key agreement (currently only X25519). +//! This crate provides `libp2p_core::InboundUpgrade` and +//! `libp2p_core::OutboundUpgrade` implementations for various noise handshake +//! patterns (currently `IK`, `IX`, and `XX`) over a particular choice of +//! Diffie–Hellman key agreement (currently only X25519). //! -//! > **Note**: Only the `XX` handshake pattern is currently guaranteed to provide -//! > interoperability with other libp2p implementations. +//! > **Note**: Only the `XX` handshake pattern is currently guaranteed to +//! > provide +//! > interoperability with other libp2p implementations. //! -//! All upgrades produce as output a pair, consisting of the remote's static public key -//! and a `NoiseOutput` which represents the established cryptographic session with the -//! remote, implementing `futures::io::AsyncRead` and `futures::io::AsyncWrite`. +//! All upgrades produce as output a pair, consisting of the remote's static +//! public key and a `NoiseOutput` which represents the established +//! cryptographic session with the remote, implementing `futures::io::AsyncRead` +//! and `futures::io::AsyncWrite`. //! //! # Usage //! //! Example: //! //! ``` -//! use libp2p_core::{Transport, upgrade, transport::MemoryTransport}; -//! use libp2p_noise as noise; +//! use libp2p_core::{transport::MemoryTransport, upgrade, Transport}; //! use libp2p_identity as identity; +//! use libp2p_noise as noise; //! //! # fn main() { //! let id_keys = identity::Keypair::generate_ed25519(); //! let noise = noise::Config::new(&id_keys).unwrap(); -//! let builder = MemoryTransport::default().upgrade(upgrade::Version::V1).authenticate(noise); +//! let builder = MemoryTransport::default() +//! .upgrade(upgrade::Version::V1) +//! .authenticate(noise); //! // let transport = builder.multiplex(...); //! # } //! ``` @@ -58,22 +64,25 @@ mod io; mod protocol; -pub use io::Output; +use std::{collections::HashSet, fmt::Write, pin::Pin}; -use crate::handshake::State; -use crate::io::handshake; -use crate::protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}; use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +pub use io::Output; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use multiaddr::Protocol; use multihash::Multihash; use snow::params::NoiseParams; -use std::collections::HashSet; -use std::fmt::Write; -use std::pin::Pin; + +use crate::{ + handshake::State, + io::handshake, + protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}, +}; /// The configuration for the noise handshake. #[derive(Clone)] @@ -84,15 +93,17 @@ pub struct Config { /// Prologue to use in the noise handshake. /// - /// The prologue can contain arbitrary data that will be hashed into the noise handshake. - /// For the handshake to succeed, both parties must set the same prologue. + /// The prologue can contain arbitrary data that will be hashed into the + /// noise handshake. For the handshake to succeed, both parties must set + /// the same prologue. /// /// For further information, see . prologue: Vec, } impl Config { - /// Construct a new configuration for the noise handshake using the XX handshake pattern. + /// Construct a new configuration for the noise handshake using the XX + /// handshake pattern. pub fn new(identity: &identity::Keypair) -> Result { let noise_keys = Keypair::new().into_authentic(identity)?; @@ -112,8 +123,8 @@ impl Config { /// Set WebTransport certhashes extension. /// - /// In case of initiator, these certhashes will be used to validate the ones reported by - /// responder. + /// In case of initiator, these certhashes will be used to validate the ones + /// reported by responder. /// /// In case of responder, these certhashes will be reported to initiator. pub fn with_webtransport_certhashes(mut self, certhashes: HashSet>) -> Self { diff --git a/transports/noise/src/protocol.rs b/transports/noise/src/protocol.rs index 29d0c81e2e4..5202a8c011d 100644 --- a/transports/noise/src/protocol.rs +++ b/transports/noise/src/protocol.rs @@ -20,7 +20,6 @@ //! Components of a Noise protocol. -use crate::Error; use libp2p_identity as identity; use once_cell::sync::Lazy; use rand::{Rng as _, SeedableRng}; @@ -28,6 +27,8 @@ use snow::params::NoiseParams; use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; use zeroize::Zeroize; +use crate::Error; + /// Prefix of static key signatures for domain separation. pub(crate) const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:"; @@ -84,7 +85,8 @@ impl Keypair { } /// Turn this DH keypair into a [`AuthenticKeypair`], i.e. a DH keypair that - /// is authentic w.r.t. the given identity keypair, by signing the DH public key. + /// is authentic w.r.t. the given identity keypair, by signing the DH public + /// key. pub(crate) fn into_authentic( self, id_keys: &identity::Keypair, diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index 62b5d41d6b9..abc5a038f93 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -18,14 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; + use futures::prelude::*; -use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::upgrade; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::{ + transport::{MemoryTransport, Transport}, + upgrade, + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, +}; use libp2p_identity as identity; use libp2p_noise as noise; use quickcheck::*; -use std::io; use tracing_subscriber::EnvFilter; #[allow(dead_code)] diff --git a/transports/noise/tests/webtransport_certhashes.rs b/transports/noise/tests/webtransport_certhashes.rs index b3c924f8188..7fa28da0ebe 100644 --- a/transports/noise/tests/webtransport_certhashes.rs +++ b/transports/noise/tests/webtransport_certhashes.rs @@ -1,8 +1,9 @@ +use std::collections::HashSet; + use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; use multihash::Multihash; -use std::collections::HashSet; const SHA_256_MH: u64 = 0x12; diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index 7480874a85e..ecb03d3ffe6 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -18,9 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::error; -use std::fmt; -use std::io::Error as IoError; +use std::{error, fmt, io::Error as IoError}; #[derive(Debug)] pub enum Error { @@ -33,7 +31,8 @@ pub enum Error { /// Failed to parse public key from bytes in protobuf message. InvalidPublicKey(libp2p_identity::DecodingError), - /// Failed to parse the [`PeerId`](libp2p_identity::PeerId) from bytes in the protobuf message. + /// Failed to parse the [`PeerId`](libp2p_identity::PeerId) from bytes in + /// the protobuf message. InvalidPeerId(libp2p_identity::ParseError), /// The peer id of the exchange isn't consistent with the remote public key. diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index ddd5f7f8a9b..851a5d771a9 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -18,20 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodeError, Error}; -use crate::proto::Exchange; -use crate::Config; +use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use std::io::{Error as IoError, ErrorKind as IoErrorKind}; + +use crate::{ + error::{DecodeError, Error}, + proto::Exchange, + Config, +}; pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where S: AsyncRead + AsyncWrite + Send + Unpin, { - // The handshake messages all start with a variable-length integer indicating the size. + // The handshake messages all start with a variable-length integer indicating + // the size. let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); tracing::trace!("sending exchange to remote"); diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 4a322d63fab..28d0d522c7f 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -22,22 +22,24 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::error::Error; - -use bytes::Bytes; -use futures::future::BoxFuture; -use futures::prelude::*; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; -use libp2p_identity as identity; -use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; use std::{ - io, iter, + io, + iter, pin::Pin, task::{Context, Poll}, }; +use bytes::Bytes; +use futures::{future::BoxFuture, prelude::*}; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; +use libp2p_identity as identity; +use libp2p_identity::{PeerId, PublicKey}; + +use crate::error::Error; + mod error; mod handshake; mod proto { diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index 06f932fbe71..5562716dfbb 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{fmt, pin::Pin}; + use futures::{ io::{self, AsyncWrite}, ready, @@ -25,7 +27,6 @@ use futures::{ }; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; -use std::{fmt, pin::Pin}; /// A writer that encrypts and forwards to an inner writer #[pin_project] @@ -56,9 +57,11 @@ impl CryptWriter { /// Write the contents of a [`Vec`] into an [`AsyncWrite`]. /// -/// The handling 0 byte progress and the Interrupted error was taken from BufWriter in async_std. +/// The handling 0 byte progress and the Interrupted error was taken from +/// BufWriter in async_std. /// -/// If this fn returns Ready(Ok(())), the buffer has been completely flushed and is empty. +/// If this fn returns Ready(Ok(())), the buffer has been completely flushed and +/// is empty. fn poll_flush_buf( inner: &mut Pin<&mut W>, buf: &mut Vec, @@ -74,7 +77,8 @@ fn poll_flush_buf( // we made progress, so try again written += n; } else { - // we got Ok but got no progress whatsoever, so bail out so we don't spin writing 0 bytes. + // we got Ok but got no progress whatsoever, so bail out so we don't spin + // writing 0 bytes. ret = Poll::Ready(Err(io::Error::new( io::ErrorKind::WriteZero, "Failed to write buffered data", @@ -83,7 +87,8 @@ fn poll_flush_buf( } } Poll::Ready(Err(e)) => { - // Interrupted is the only error that we consider to be recoverable by trying again + // Interrupted is the only error that we consider to be recoverable by trying + // again if e.kind() != io::ErrorKind::Interrupted { // for any other error, don't try again ret = Poll::Ready(Err(e)); diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index 083ffff36a3..d1ce35887bd 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -19,23 +19,13 @@ // DEALINGS IN THE SOFTWARE. //! Implementation of the [pnet](https://github.com/libp2p/specs/blob/master/pnet/Private-Networks-PSK-V1.md) protocol. -//! //| The `pnet` protocol implements *Pre-shared Key Based Private Networks in libp2p*. -//! Libp2p nodes configured with a pre-shared key can only communicate with other nodes with -//! the same key. +//! Libp2p nodes configured with a pre-shared key can only communicate with +//! other nodes with the same key. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod crypt_writer; -use crypt_writer::CryptWriter; -use futures::prelude::*; -use pin_project::pin_project; -use rand::RngCore; -use salsa20::{ - cipher::{KeyIvInit, StreamCipher}, - Salsa20, XSalsa20, -}; -use sha3::{digest::ExtendableOutput, Shake128}; use std::{ error, fmt::{self, Write}, @@ -47,6 +37,17 @@ use std::{ task::{Context, Poll}, }; +use crypt_writer::CryptWriter; +use futures::prelude::*; +use pin_project::pin_project; +use rand::RngCore; +use salsa20::{ + cipher::{KeyIvInit, StreamCipher}, + Salsa20, + XSalsa20, +}; +use sha3::{digest::ExtendableOutput, Shake128}; + const KEY_SIZE: usize = 32; const NONCE_SIZE: usize = 24; const WRITE_BUFFER_SIZE: usize = 1024; @@ -168,7 +169,8 @@ pub enum KeyParseError { InvalidKeyEncoding, /// Key is of the wrong length InvalidKeyLength, - /// key string contains a char that is not consistent with the specified encoding + /// key string contains a char that is not consistent with the specified + /// encoding InvalidKeyChar(ParseIntError), } @@ -200,8 +202,8 @@ impl PnetConfig { /// upgrade a connection to use pre shared key encryption. /// - /// the upgrade works by both sides exchanging 24 byte nonces and then encrypting - /// subsequent traffic with XSalsa20 + /// the upgrade works by both sides exchanging 24 byte nonces and then + /// encrypting subsequent traffic with XSalsa20 pub async fn handshake( self, mut socket: TSocket, @@ -229,8 +231,8 @@ impl PnetConfig { } } -/// The result of a handshake. This implements AsyncRead and AsyncWrite and can therefore -/// be used as base for additional upgrades. +/// The result of a handshake. This implements AsyncRead and AsyncWrite and can +/// therefore be used as base for additional upgrades. #[pin_project] pub struct PnetOutput { #[pin] @@ -319,9 +321,10 @@ impl fmt::Display for PnetError { #[cfg(test)] mod tests { - use super::*; use quickcheck::*; + use super::*; + impl Arbitrary for PreSharedKey { fn arbitrary(g: &mut Gen) -> PreSharedKey { let key = core::array::from_fn(|_| u8::arbitrary(g)); @@ -367,7 +370,10 @@ mod tests { #[test] fn fingerprint() { // checked against go-ipfs output - let key = "/key/swarm/psk/1.0.0/\n/base16/\n6189c5cf0b87fb800c1a9feeda73c6ab5e998db48fb9e6a978575c770ceef683".parse::().unwrap(); + let key = "/key/swarm/psk/1.0.0/\n/base16/\\ + n6189c5cf0b87fb800c1a9feeda73c6ab5e998db48fb9e6a978575c770ceef683" + .parse::() + .unwrap(); let expected = "45fc986bbc9388a11d939df26f730f0c"; let actual = key.fingerprint().to_string(); assert_eq!(expected, actual); diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index 79ffaeab447..563e949ec7b 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -1,10 +1,13 @@ use std::time::Duration; use futures::{future, AsyncRead, AsyncWrite, StreamExt}; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; -use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_core::{ + multiaddr::Protocol, + transport::MemoryTransport, + upgrade::Version, + Multiaddr, + Transport, +}; use libp2p_pnet::{PnetConfig, PreSharedKey}; use libp2p_swarm::{dummy, Config, NetworkBehaviour, Swarm, SwarmEvent}; diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 2456ed3e36f..57fa876dd2e 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -18,19 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{sync::Arc, time::Duration}; + use quinn::{ crypto::rustls::{QuicClientConfig, QuicServerConfig}, - MtuDiscoveryConfig, VarInt, + MtuDiscoveryConfig, + VarInt, }; -use std::{sync::Arc, time::Duration}; /// Config for the transport. #[derive(Clone)] pub struct Config { /// Timeout for the initial handshake when establishing a connection. - /// The actual timeout is the minimum of this and the [`Config::max_idle_timeout`]. + /// The actual timeout is the minimum of this and the + /// [`Config::max_idle_timeout`]. pub handshake_timeout: Duration, - /// Maximum duration of inactivity in ms to accept before timing out the connection. + /// Maximum duration of inactivity in ms to accept before timing out the + /// connection. pub max_idle_timeout: u32, /// Period of inactivity before sending a keep-alive packet. /// Must be set lower than the idle_timeout of both @@ -46,17 +50,17 @@ pub struct Config { /// Max unacknowledged data in bytes that may be sent on a single stream. pub max_stream_data: u32, - /// Max unacknowledged data in bytes that may be sent in total on all streams - /// of a connection. + /// Max unacknowledged data in bytes that may be sent in total on all + /// streams of a connection. pub max_connection_data: u32, /// Support QUIC version draft-29 for dialing and listening. /// - /// Per default only QUIC Version 1 / [`libp2p_core::multiaddr::Protocol::QuicV1`] - /// is supported. + /// Per default only QUIC Version 1 / + /// [`libp2p_core::multiaddr::Protocol::QuicV1`] is supported. /// - /// If support for draft-29 is enabled servers support draft-29 and version 1 on all - /// QUIC listening addresses. + /// If support for draft-29 is enabled servers support draft-29 and version + /// 1 on all QUIC listening addresses. /// As client the version is chosen based on the remote's address. pub support_draft_29: bool, @@ -67,7 +71,8 @@ pub struct Config { /// Libp2p identity of the node. keypair: libp2p_identity::Keypair, - /// Parameters governing MTU discovery. See [`MtuDiscoveryConfig`] for details. + /// Parameters governing MTU discovery. See [`MtuDiscoveryConfig`] for + /// details. mtu_discovery_config: Option, } @@ -98,7 +103,8 @@ impl Config { } } - /// Set the upper bound to the max UDP payload size that MTU discovery will search for. + /// Set the upper bound to the max UDP payload size that MTU discovery will + /// search for. pub fn mtu_upper_bound(mut self, value: u16) -> Self { self.mtu_discovery_config .get_or_insert_with(Default::default) @@ -153,8 +159,8 @@ impl From for QuinnConfig { let mut server_config = quinn::ServerConfig::with_crypto(server_tls_config); server_config.transport = Arc::clone(&transport); // Disables connection migration. - // Long-term this should be enabled, however we then need to handle address change - // on connections in the `Connection`. + // Long-term this should be enabled, however we then need to handle address + // change on connections in the `Connection`. server_config.migration(false); let mut client_config = quinn::ClientConfig::new(client_tls_config); diff --git a/transports/quic/src/connection.rs b/transports/quic/src/connection.rs index 783258a0130..3fdbd7c55e0 100644 --- a/transports/quic/src/connection.rs +++ b/transports/quic/src/connection.rs @@ -21,18 +21,18 @@ mod connecting; mod stream; -pub use connecting::Connecting; -pub use stream::Stream; - -use crate::{ConnectionError, Error}; - -use futures::{future::BoxFuture, FutureExt}; -use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use std::{ pin::Pin, task::{Context, Poll}, }; +pub use connecting::Connecting; +use futures::{future::BoxFuture, FutureExt}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +pub use stream::Stream; + +use crate::{ConnectionError, Error}; + /// State for a single opened QUIC connection. pub struct Connection { /// Underlying connection. @@ -52,8 +52,9 @@ pub struct Connection { impl Connection { /// Build a [`Connection`] from raw components. /// - /// This function assumes that the [`quinn::Connection`] is completely fresh and none of - /// its methods has ever been called. Failure to comply might lead to logic errors and panics. + /// This function assumes that the [`quinn::Connection`] is completely fresh + /// and none of its methods has ever been called. Failure to comply + /// might lead to logic errors and panics. fn new(connection: quinn::Connection) -> Self { Self { connection, diff --git a/transports/quic/src/connection/connecting.rs b/transports/quic/src/connection/connecting.rs index f6e397b4d1e..7ca0aa5e58c 100644 --- a/transports/quic/src/connection/connecting.rs +++ b/transports/quic/src/connection/connecting.rs @@ -18,9 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Future that drives a QUIC connection until is has performed its TLS handshake. +//! Future that drives a QUIC connection until is has performed its TLS +//! handshake. -use crate::{Connection, ConnectionError, Error}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use futures::{ future::{select, Either, FutureExt, Select}, @@ -29,11 +34,8 @@ use futures::{ use futures_timer::Delay; use libp2p_identity::PeerId; use quinn::rustls::pki_types::CertificateDer; -use std::{ - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; + +use crate::{Connection, ConnectionError, Error}; /// A QUIC connection currently being negotiated. #[derive(Debug)] diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index a38d123a6a4..6f1961081d2 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -1,15 +1,14 @@ -use crate::{provider::Provider, Error}; - -use futures::future::Either; - -use rand::{distributions, Rng}; - -use std::convert::Infallible; use std::{ + convert::Infallible, net::{SocketAddr, UdpSocket}, time::Duration, }; +use futures::future::Either; +use rand::{distributions, Rng}; + +use crate::{provider::Provider, Error}; + pub(crate) async fn hole_puncher( socket: UdpSocket, remote_addr: SocketAddr, diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index 7ae649b6914..9f22903adc1 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -31,29 +31,33 @@ //! # #[cfg(feature = "async-std")] //! # fn main() -> std::io::Result<()> { //! # +//! use libp2p_core::{transport::ListenerId, Multiaddr, Transport}; //! use libp2p_quic as quic; -//! use libp2p_core::{Multiaddr, Transport, transport::ListenerId}; //! //! let keypair = libp2p_identity::Keypair::generate_ed25519(); //! let quic_config = quic::Config::new(&keypair); //! //! let mut quic_transport = quic::async_std::Transport::new(quic_config); //! -//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1".parse().expect("address should be valid"); -//! quic_transport.listen_on(ListenerId::next(), addr).expect("listen error."); +//! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1" +//! .parse() +//! .expect("address should be valid"); +//! quic_transport +//! .listen_on(ListenerId::next(), addr) +//! .expect("listen error."); //! # //! # Ok(()) //! # } //! ``` //! -//! The [`GenTransport`] struct implements the [`libp2p_core::Transport`]. See the -//! documentation of [`libp2p_core`] and of libp2p in general to learn how to use the -//! [`Transport`][libp2p_core::Transport] trait. -//! -//! Note that QUIC provides transport, security, and multiplexing in a single protocol. Therefore, -//! QUIC connections do not need to be upgraded. You will get a compile-time error if you try. -//! Instead, you must pass all needed configuration into the constructor. +//! The [`GenTransport`] struct implements the [`libp2p_core::Transport`]. See +//! the documentation of [`libp2p_core`] and of libp2p in general to learn how +//! to use the [`Transport`][libp2p_core::Transport] trait. //! +//! Note that QUIC provides transport, security, and multiplexing in a single +//! protocol. Therefore, QUIC connections do not need to be upgraded. You will +//! get a compile-time error if you try. Instead, you must pass all needed +//! configuration into the constructor. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -67,7 +71,6 @@ use std::net::SocketAddr; pub use config::Config; pub use connection::{Connecting, Connection, Stream}; - #[cfg(feature = "async-std")] pub use provider::async_std; #[cfg(feature = "tokio")] @@ -94,7 +97,8 @@ pub enum Error { #[error("Handshake with the remote timed out.")] HandshakeTimedOut, - /// Error when `Transport::dial_as_listener` is called without an active listener. + /// Error when `Transport::dial_as_listener` is called without an active + /// listener. #[error("Tried to dial as listener without an active listener.")] NoActiveListenerForDialAsListener, diff --git a/transports/quic/src/provider.rs b/transports/quic/src/provider.rs index 6f1122ee55f..b262dd18e8b 100644 --- a/transports/quic/src/provider.rs +++ b/transports/quic/src/provider.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::future::BoxFuture; -use if_watch::IfEvent; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -27,6 +25,9 @@ use std::{ time::Duration, }; +use futures::future::BoxFuture; +use if_watch::IfEvent; + #[cfg(feature = "async-std")] pub mod async_std; #[cfg(feature = "tokio")] @@ -47,7 +48,8 @@ pub trait Provider: Unpin + Send + Sized + 'static { /// Run the corresponding runtime fn runtime() -> Runtime; - /// Create a new [`if_watch`] watcher that reports [`IfEvent`]s for network interface changes. + /// Create a new [`if_watch`] watcher that reports [`IfEvent`]s for network + /// interface changes. fn new_if_watcher() -> io::Result; /// Poll for an address change event. @@ -59,7 +61,8 @@ pub trait Provider: Unpin + Send + Sized + 'static { /// Sleep for specified amount of time. fn sleep(duration: Duration) -> BoxFuture<'static, ()>; - /// Sends data on the socket to the given address. On success, returns the number of bytes written. + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. fn send_to<'a>( udp_socket: &'a UdpSocket, buf: &'a [u8], diff --git a/transports/quic/src/provider/async_std.rs b/transports/quic/src/provider/async_std.rs index a110058108c..b5c3ac917dc 100644 --- a/transports/quic/src/provider/async_std.rs +++ b/transports/quic/src/provider/async_std.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::UdpSocket, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`async-std`] runtime. diff --git a/transports/quic/src/provider/tokio.rs b/transports/quic/src/provider/tokio.rs index 9cb148d6ef2..83753faac01 100644 --- a/transports/quic/src/provider/tokio.rs +++ b/transports/quic/src/provider/tokio.rs @@ -18,7 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{future::BoxFuture, FutureExt}; use std::{ io, net::{SocketAddr, UdpSocket}, @@ -26,6 +25,8 @@ use std::{ time::Duration, }; +use futures::{future::BoxFuture, FutureExt}; + use crate::GenTransport; /// Transport with [`tokio`] runtime. diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 057d0f978d7..50fce588e88 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -18,51 +18,60 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::config::{Config, QuinnConfig}; -use crate::hole_punching::hole_puncher; -use crate::provider::Provider; -use crate::{ConnectError, Connecting, Connection, Error}; - -use futures::channel::oneshot; -use futures::future::{BoxFuture, Either}; -use futures::ready; -use futures::stream::StreamExt; -use futures::{prelude::*, stream::SelectAll}; +use std::{ + collections::{ + hash_map::{DefaultHasher, Entry}, + HashMap, + HashSet, + }, + fmt, + hash::{Hash, Hasher}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}, + pin::Pin, + task::{Context, Poll, Waker}, + time::Duration, +}; +use futures::{ + channel::oneshot, + future::{BoxFuture, Either}, + prelude::*, + ready, + stream::{SelectAll, StreamExt}, +}; use if_watch::IfEvent; - -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::Endpoint; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, + Endpoint, Transport, }; use libp2p_identity::PeerId; use socket2::{Domain, Socket, Type}; -use std::collections::hash_map::{DefaultHasher, Entry}; -use std::collections::{HashMap, HashSet}; -use std::hash::{Hash, Hasher}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; -use std::time::Duration; -use std::{fmt, io}; -use std::{ - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, + +use crate::{ + config::{Config, QuinnConfig}, + hole_punching::hole_puncher, + provider::Provider, + ConnectError, + Connecting, + Connection, + Error, }; /// Implementation of the [`Transport`] trait for QUIC. /// -/// By default only QUIC Version 1 (RFC 9000) is supported. In the [`Multiaddr`] this maps to -/// [`libp2p_core::multiaddr::Protocol::QuicV1`]. -/// The [`libp2p_core::multiaddr::Protocol::Quic`] codepoint is interpreted as QUIC version -/// draft-29 and only supported if [`Config::support_draft_29`] is set to `true`. -/// Note that in that case servers support both version an all QUIC listening addresses. +/// By default only QUIC Version 1 (RFC 9000) is supported. In the [`Multiaddr`] +/// this maps to [`libp2p_core::multiaddr::Protocol::QuicV1`]. +/// The [`libp2p_core::multiaddr::Protocol::Quic`] codepoint is interpreted as +/// QUIC version draft-29 and only supported if [`Config::support_draft_29`] is +/// set to `true`. Note that in that case servers support both version an all +/// QUIC listening addresses. /// -/// Version draft-29 should only be used to connect to nodes from other libp2p implementations -/// that do not support `QuicV1` yet. Support for it will be removed long-term. -/// See . +/// Version draft-29 should only be used to connect to nodes from other libp2p +/// implementations that do not support `QuicV1` yet. Support for it will be +/// removed long-term. See . #[derive(Debug)] pub struct GenTransport { /// Config for the inner [`quinn`] structs. @@ -75,7 +84,8 @@ pub struct GenTransport { listeners: SelectAll>, /// Dialer for each socket family if no matching listener exists. dialer: HashMap, - /// Waker to poll the transport again when a new dialer or listener is added. + /// Waker to poll the transport again when a new dialer or listener is + /// added. waker: Option, /// Holepunching attempts hole_punch_attempts: HashMap>, @@ -448,7 +458,8 @@ struct Listener { /// Pending event to reported. pending_event: Option<::Item>, - /// The stream must be awaken after it has been closed to deliver the last event. + /// The stream must be awaken after it has been closed to deliver the last + /// event. close_listener_waker: Option, listening_addresses: HashSet, @@ -497,8 +508,8 @@ impl Listener

{ }) } - /// Report the listener as closed in a [`TransportEvent::ListenerClosed`] and - /// terminate the stream. + /// Report the listener as closed in a [`TransportEvent::ListenerClosed`] + /// and terminate the stream. fn close(&mut self, reason: Result<(), Error>) { if self.is_closed { return; @@ -693,8 +704,8 @@ fn ip_to_listenaddr( Some(socketaddr_to_multiaddr(&socket_addr, version)) } -/// Tries to turn a QUIC multiaddress into a UDP [`SocketAddr`]. Returns None if the format -/// of the multiaddr is wrong. +/// Tries to turn a QUIC multiaddress into a UDP [`SocketAddr`]. Returns None if +/// the format of the multiaddr is wrong. fn multiaddr_to_socketaddr( addr: &Multiaddr, support_draft_29: bool, @@ -745,9 +756,10 @@ fn socketaddr_to_multiaddr(socket_addr: &SocketAddr, version: ProtocolVersion) - #[cfg(test)] #[cfg(any(feature = "async-std", feature = "tokio"))] mod tests { - use super::*; use futures::future::poll_fn; + use super::*; + #[test] fn multiaddr_to_udp_conversion() { assert!(multiaddr_to_socketaddr( @@ -784,14 +796,21 @@ mod tests { ); assert_eq!( multiaddr_to_socketaddr( - &"/ip4/127.0.0.1/udp/55148/quic-v1/p2p/12D3KooW9xk7Zp1gejwfwNpfm6L9zH5NL4Bx5rm94LRYJJHJuARZ" + &"/ip4/127.0.0.1/udp/55148/quic-v1/p2p/\ + 12D3KooW9xk7Zp1gejwfwNpfm6L9zH5NL4Bx5rm94LRYJJHJuARZ" .parse::() - .unwrap(), false + .unwrap(), + false ), - Some((SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - 55148, - ), ProtocolVersion::V1, Some("12D3KooW9xk7Zp1gejwfwNpfm6L9zH5NL4Bx5rm94LRYJJHJuARZ".parse().unwrap()))) + Some(( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 55148,), + ProtocolVersion::V1, + Some( + "12D3KooW9xk7Zp1gejwfwNpfm6L9zH5NL4Bx5rm94LRYJJHJuARZ" + .parse() + .unwrap() + ) + )) ); assert_eq!( multiaddr_to_socketaddr( @@ -851,8 +870,8 @@ mod tests { .now_or_never() .is_none()); - // Run test twice to check that there is no unexpected behaviour if `Transport.listener` - // is temporarily empty. + // Run test twice to check that there is no unexpected behaviour if + // `Transport.listener` is temporarily empty. for _ in 0..2 { let id = ListenerId::next(); transport @@ -885,8 +904,8 @@ mod tests { } e => panic!("Unexpected event: {e:?}"), } - // Poll once again so that the listener has the chance to return `Poll::Ready(None)` and - // be removed from the list of listeners. + // Poll once again so that the listener has the chance to return + // `Poll::Ready(None)` and be removed from the list of listeners. assert!(poll_fn(|cx| Pin::new(&mut transport).as_mut().poll(cx)) .now_or_never() .is_none()); diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index 6a760f9997c..e231a3a3ae3 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -1,16 +1,43 @@ #![cfg(any(feature = "async-std", feature = "tokio"))] -use futures::channel::{mpsc, oneshot}; -use futures::future::BoxFuture; -use futures::future::{poll_fn, Either}; -use futures::stream::StreamExt; -use futures::{future, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; +use std::{ + future::Future, + io, + num::NonZeroU8, + pin::Pin, + sync::{Arc, Mutex}, + task::Poll, + time::Duration, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future, + future::{poll_fn, BoxFuture, Either}, + stream::StreamExt, + AsyncReadExt, + AsyncWriteExt, + FutureExt, + SinkExt, +}; use futures_timer::Delay; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}; -use libp2p_core::transport::{Boxed, DialOpts, OrTransport, PortUse, TransportEvent}; -use libp2p_core::transport::{ListenerId, TransportError}; -use libp2p_core::Endpoint; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr, Transport}; +use libp2p_core::{ + multiaddr::Protocol, + muxing::{StreamMuxerBox, StreamMuxerExt, SubstreamBox}, + transport::{ + Boxed, + DialOpts, + ListenerId, + OrTransport, + PortUse, + TransportError, + TransportEvent, + }, + upgrade, + Endpoint, + Multiaddr, + Transport, +}; use libp2p_identity::PeerId; use libp2p_noise as noise; use libp2p_quic as quic; @@ -18,15 +45,6 @@ use libp2p_tcp as tcp; use libp2p_yamux as yamux; use quic::Provider; use rand::RngCore; -use std::future::Future; -use std::io; -use std::num::NonZeroU8; -use std::task::Poll; -use std::time::Duration; -use std::{ - pin::Pin, - sync::{Arc, Mutex}, -}; use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] @@ -85,7 +103,8 @@ async fn ipv4_dial_ipv6() { assert_eq!(b_connected, a_peer_id); } -/// Tests that a [`Transport::dial`] wakes up the task previously polling [`Transport::poll`]. +/// Tests that a [`Transport::dial`] wakes up the task previously polling +/// [`Transport::poll`]. /// /// See https://github.com/libp2p/rust-libp2p/pull/3306 for context. #[cfg(feature = "async-std")] @@ -117,8 +136,9 @@ async fn wrapped_with_delay() { self.0.lock().unwrap().remove_listener(id) } - /// Delayed dial, i.e. calling [`Transport::dial`] on the inner [`Transport`] not within the - /// synchronous [`Transport::dial`] method, but within the [`Future`] returned by the outer + /// Delayed dial, i.e. calling [`Transport::dial`] on the inner + /// [`Transport`] not within the synchronous [`Transport::dial`] + /// method, but within the [`Future`] returned by the outer /// [`Transport::dial`]. fn dial( &mut self, @@ -128,8 +148,8 @@ async fn wrapped_with_delay() { let t = self.0.clone(); Ok(async move { // Simulate DNS lookup. Giving the `Transport::poll` the chance to return - // `Poll::Pending` and thus suspending its task, waiting for a wakeup from the dial - // on the inner transport below. + // `Poll::Pending` and thus suspending its task, waiting for a wakeup from the + // dial on the inner transport below. Delay::new(Duration::from_millis(100)).await; let dial = t @@ -176,8 +196,9 @@ async fn wrapped_with_delay() { // Spawn B // - // Note that the dial is spawned on a different task than the transport allowing the transport - // task to poll the transport once and then suspend, waiting for the wakeup from the dial. + // Note that the dial is spawned on a different task than the transport allowing + // the transport task to poll the transport once and then suspend, waiting + // for the wakeup from the dial. let dial = async_std::task::spawn({ let dial = b_transport .dial( @@ -200,7 +221,8 @@ async fn wrapped_with_delay() { #[cfg(feature = "async-std")] #[async_std::test] -#[ignore] // Transport currently does not validate PeerId. Enable once we make use of PeerId validation in rustls. +#[ignore] // Transport currently does not validate PeerId. Enable once we make use of + // PeerId validation in rustls. async fn wrong_peerid() { use libp2p_identity::PeerId; @@ -305,7 +327,8 @@ async fn draft_29_support() { let (_, mut b_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); - // If a server supports draft-29 all its QUIC addresses can be dialed on draft-29 or version-1 + // If a server supports draft-29 all its QUIC addresses can be dialed on + // draft-29 or version-1 let a_quic_addr = start_listening(&mut a_transport, "/ip4/127.0.0.1/udp/0/quic").await; let a_quic_mapped_addr = swap_protocol!(a_quic_addr, Quic => QuicV1); let a_quic_v1_addr = start_listening(&mut a_transport, "/ip4/127.0.0.1/udp/0/quic-v1").await; @@ -462,8 +485,8 @@ async fn test_local_listener_reuse() { } } }; - // If we do not poll until the end, `NewAddress` events may be `Ready` and `connect` function - // below will panic due to an unexpected event. + // If we do not poll until the end, `NewAddress` events may be `Ready` and + // `connect` function below will panic due to an unexpected event. poll_fn(|cx| { let mut pinned = Pin::new(&mut a_transport); while pinned.as_mut().poll(cx).is_ready() {} @@ -755,7 +778,8 @@ async fn open_outbound_streams( {} } -/// Helper function for driving two transports until they established a connection. +/// Helper function for driving two transports until they established a +/// connection. #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. async fn connect( listener: &mut Boxed<(PeerId, StreamMuxerBox)>, diff --git a/transports/quic/tests/stream_compliance.rs b/transports/quic/tests/stream_compliance.rs index b0536473215..9d2656b8b4d 100644 --- a/transports/quic/tests/stream_compliance.rs +++ b/transports/quic/tests/stream_compliance.rs @@ -1,10 +1,13 @@ -use futures::channel::oneshot; -use futures::StreamExt; -use libp2p_core::transport::{DialOpts, ListenerId, PortUse}; -use libp2p_core::{Endpoint, Transport}; -use libp2p_quic as quic; use std::time::Duration; +use futures::{channel::oneshot, StreamExt}; +use libp2p_core::{ + transport::{DialOpts, ListenerId, PortUse}, + Endpoint, + Transport, +}; +use libp2p_quic as quic; + #[async_std::test] async fn close_implies_flush() { let (alice, bob) = connected_peers().await; diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 4c4fa7c6b84..949204f2142 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -22,19 +22,24 @@ //! //! # Usage //! -//! This crate provides a [`async_io::Transport`] and [`tokio::Transport`], depending on -//! the enabled features, which implement the [`libp2p_core::Transport`] trait for use as a -//! transport with `libp2p-core` or `libp2p-swarm`. +//! This crate provides a [`async_io::Transport`] and [`tokio::Transport`], +//! depending on the enabled features, which implement the +//! [`libp2p_core::Transport`] trait for use as a transport with `libp2p-core` +//! or `libp2p-swarm`. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod provider; -#[cfg(feature = "async-io")] -pub use provider::async_io; - -#[cfg(feature = "tokio")] -pub use provider::tokio; +use std::{ + collections::{HashSet, VecDeque}, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, + pin::Pin, + sync::{Arc, RwLock}, + task::{Context, Poll, Waker}, + time::Duration, +}; use futures::{future::Ready, prelude::*, stream::SelectAll}; use futures_timer::Delay; @@ -43,17 +48,12 @@ use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse, TransportError, TransportEvent}, }; +#[cfg(feature = "async-io")] +pub use provider::async_io; +#[cfg(feature = "tokio")] +pub use provider::tokio; use provider::{Incoming, Provider}; use socket2::{Domain, Socket, Type}; -use std::{ - collections::{HashSet, VecDeque}, - io, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, TcpListener}, - pin::Pin, - sync::{Arc, RwLock}, - task::{Context, Poll, Waker}, - time::Duration, -}; /// The configuration for a TCP/IP transport capability for libp2p. #[derive(Clone, Debug)] @@ -131,10 +131,9 @@ impl PortReuse { impl Config { /// Creates a new configuration for a TCP/IP transport: /// - /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. - /// See [`Config::nodelay`]. - /// * Reuse of listening ports is _disabled_. - /// See [`Config::port_reuse`]. + /// * Nagle's algorithm, i.e. `TCP_NODELAY`, is _enabled_. See + /// [`Config::nodelay`]. + /// * Reuse of listening ports is _disabled_. See [`Config::port_reuse`]. /// * No custom `IP_TTL` is set. The default of the OS TCP stack applies. /// See [`Config::ttl`]. /// * The size of the listen backlog for new listening sockets is `1024`. @@ -171,20 +170,25 @@ impl Config { /// /// # Deprecation Notice /// - /// The new implementation works on a per-connaction basis, defined by the behaviour. This - /// removes the necessaity to configure the transport for port reuse, instead the behaviour - /// requiring this behaviour can decide whether to use port reuse or not. + /// The new implementation works on a per-connaction basis, defined by the + /// behaviour. This removes the necessaity to configure the transport + /// for port reuse, instead the behaviour requiring this behaviour can + /// decide whether to use port reuse or not. /// - /// The API to configure port reuse is part of [`Transport`] and the option can be found in - /// [`libp2p_core::transport::DialOpts`]. + /// The API to configure port reuse is part of [`Transport`] and the option + /// can be found in [`libp2p_core::transport::DialOpts`]. /// - /// If [`PortUse::Reuse`] is enabled, the transport will try to reuse the local port of the - /// listener. If that's not possible, i.e. there is no listener or the transport doesn't allow - /// a direct control over ports, a new port (or the default behaviour) is used. If port reuse - /// is enabled for a connection, this option will be treated on a best-effor basis. + /// If [`PortUse::Reuse`] is enabled, the transport will try to reuse the + /// local port of the listener. If that's not possible, i.e. there is no + /// listener or the transport doesn't allow a direct control over ports, + /// a new port (or the default behaviour) is used. If port reuse + /// is enabled for a connection, this option will be treated on a best-effor + /// basis. #[deprecated( since = "0.42.0", - note = "This option does nothing now, since the port reuse policy is now decided on a per-connection basis by the behaviour. The function will be removed in a future release." + note = "This option does nothing now, since the port reuse policy is now decided on a \ + per-connection basis by the behaviour. The function will be removed in a future \ + release." )] pub fn port_reuse(self, _port_reuse: bool) -> Self { self @@ -228,7 +232,8 @@ impl Default for Config { /// An abstract [`libp2p_core::Transport`] implementation. /// -/// You shouldn't need to use this type directly. Use one of the following instead: +/// You shouldn't need to use this type directly. Use one of the following +/// instead: /// /// - [`tokio::Transport`] /// - [`async_io::Transport`] @@ -241,10 +246,12 @@ where /// The configuration of port reuse when dialing. port_reuse: PortReuse, /// All the active listeners. - /// The [`ListenStream`] struct contains a stream that we want to be pinned. Since the `VecDeque` - /// can be resized, the only way is to use a `Pin>`. + /// The [`ListenStream`] struct contains a stream that we want to be pinned. + /// Since the `VecDeque` can be resized, the only way is to use a + /// `Pin>`. listeners: SelectAll>, - /// Pending transport events to return from [`libp2p_core::Transport::poll`]. + /// Pending transport events to return from + /// [`libp2p_core::Transport::poll`]. pending_events: VecDeque::ListenerUpgrade, io::Error>>, } @@ -257,7 +264,8 @@ where /// /// If you don't want to specify a [`Config`], use [`Transport::default`]. /// - /// It is best to call this function through one of the type-aliases of this type: + /// It is best to call this function through one of the type-aliases of this + /// type: /// /// - [`tokio::Transport::new`] /// - [`async_io::Transport::new`] @@ -465,9 +473,11 @@ where pause: Option, /// Pending event to reported. pending_event: Option<::Item>, - /// The listener can be manually closed with [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). + /// The listener can be manually closed with + /// [`Transport::remove_listener`](libp2p_core::Transport::remove_listener). is_closed: bool, - /// The stream must be awaken after it has been closed to deliver the last event. + /// The stream must be awaken after it has been closed to deliver the last + /// event. close_listener_waker: Option, } @@ -621,7 +631,8 @@ where } if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. + // Terminate the stream if the listener closed and all remaining events have + // been reported. return Poll::Ready(None); } @@ -705,13 +716,13 @@ fn ip_to_multiaddr(ip: IpAddr, port: u16) -> Multiaddr { #[cfg(test)] mod tests { - use super::*; use futures::{ channel::{mpsc, oneshot}, future::poll_fn, }; - use libp2p_core::Endpoint; - use libp2p_core::Transport as _; + use libp2p_core::{Endpoint, Transport as _}; + + use super::*; #[test] fn multiaddr_to_tcp_conversion() { diff --git a/transports/tcp/src/provider.rs b/transports/tcp/src/provider.rs index d94da7a6fc3..6b5c8e5638d 100644 --- a/transports/tcp/src/provider.rs +++ b/transports/tcp/src/provider.rs @@ -26,13 +26,19 @@ pub mod async_io; #[cfg(feature = "tokio")] pub mod tokio; -use futures::future::BoxFuture; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::Stream; +use std::{ + fmt, + io, + net::{SocketAddr, TcpListener, TcpStream}, + task::{Context, Poll}, +}; + +use futures::{ + future::BoxFuture, + io::{AsyncRead, AsyncWrite}, + Stream, +}; use if_watch::{IfEvent, IpNet}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::task::{Context, Poll}; -use std::{fmt, io}; /// An incoming connection returned from [`Provider::poll_accept()`]. pub struct Incoming { @@ -67,8 +73,8 @@ pub trait Provider: Clone + Send + 'static { /// setup to complete, i.e. for the stream to be writable. fn new_stream(_: TcpStream) -> BoxFuture<'static, io::Result>; - /// Polls a [`Self::Listener`] for an incoming connection, ensuring a task wakeup, - /// if necessary. + /// Polls a [`Self::Listener`] for an incoming connection, ensuring a task + /// wakeup, if necessary. fn poll_accept( _: &mut Self::Listener, _: &mut Context<'_>, diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index fe0abe42d54..5469d5ee874 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -18,15 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, + net, + task::{Context, Poll}, +}; use async_io::Async; use futures::future::{BoxFuture, FutureExt}; -use std::io; -use std::net; -use std::task::{Context, Poll}; -/// A TCP [`Transport`](libp2p_core::Transport) that works with the `async-std` ecosystem. +use super::{Incoming, Provider}; + +/// A TCP [`Transport`](libp2p_core::Transport) that works with the `async-std` +/// ecosystem. /// /// # Example /// @@ -40,9 +44,14 @@ use std::task::{Context, Poll}; /// # async fn main() { /// let mut transport = tcp::async_io::Transport::new(tcp::Config::default()); /// let id = ListenerId::next(); -/// transport.listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// transport +/// .listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } @@ -100,8 +109,9 @@ impl Provider for Tcp { Some(Err(e)) => return Poll::Ready(Err(e)), Some(Ok(res)) => break res, None => { - // Since it doesn't do any harm, account for false positives of - // `poll_readable` just in case, i.e. try again. + // Since it doesn't do any harm, account for false + // positives of `poll_readable` + // just in case, i.e. try again. } }, } diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs index ec2d098e3fb..72c96e9bff6 100644 --- a/transports/tcp/src/provider/tokio.rs +++ b/transports/tcp/src/provider/tokio.rs @@ -18,18 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use super::{Incoming, Provider}; +use std::{ + io, + net, + pin::Pin, + task::{Context, Poll}, +}; use futures::{ future::{BoxFuture, FutureExt}, prelude::*, }; -use std::io; -use std::net; -use std::pin::Pin; -use std::task::{Context, Poll}; -/// A TCP [`Transport`](libp2p_core::Transport) that works with the `tokio` ecosystem. +use super::{Incoming, Provider}; + +/// A TCP [`Transport`](libp2p_core::Transport) that works with the `tokio` +/// ecosystem. /// /// # Example /// @@ -42,9 +46,14 @@ use std::task::{Context, Poll}; /// # #[tokio::main] /// # async fn main() { /// let mut transport = tcp::tokio::Transport::new(tcp::Config::default()); -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// /// println!("Listening on {addr}"); /// # } @@ -116,7 +125,8 @@ impl Provider for Tcp { } } -/// A [`tokio::net::TcpStream`] that implements [`AsyncRead`] and [`AsyncWrite`]. +/// A [`tokio::net::TcpStream`] that implements [`AsyncRead`] and +/// [`AsyncWrite`]. #[derive(Debug)] pub struct TcpStream(pub tokio::net::TcpStream); diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 65b373bcf9b..c51bdb52329 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -22,12 +22,12 @@ //! //! This module handles generation, signing, and verification of certificates. +use std::sync::Arc; + use libp2p_identity as identity; use libp2p_identity::PeerId; use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm}; -use std::sync::Arc; - /// The libp2p Public Key Extension is a X.509 extension /// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. @@ -37,11 +37,13 @@ const P2P_EXT_OID: [u64; 9] = [1, 3, 6, 1, 4, 1, 53594, 1, 1]; /// and the public key that it used to generate the certificate carrying /// the libp2p Public Key Extension, using its private host key. /// This signature provides cryptographic proof that the peer was -/// in possession of the private host key at the time the certificate was signed. +/// in possession of the private host key at the time the certificate was +/// signed. const P2P_SIGNING_PREFIX: [u8; 21] = *b"libp2p-tls-handshake:"; // Certificates MUST use the NamedCurve encoding for elliptic curve parameters. -// Similarly, hash functions with an output length less than 256 bits MUST NOT be used. +// Similarly, hash functions with an output length less than 256 bits MUST NOT +// be used. static P2P_SIGNATURE_ALGORITHM: &rcgen::SignatureAlgorithm = &rcgen::PKCS_ECDSA_P256_SHA256; #[derive(Debug)] @@ -123,8 +125,8 @@ pub fn generate( /// Attempts to parse the provided bytes as a [`P2pCertificate`]. /// -/// For this to succeed, the certificate must contain the specified extension and the signature must -/// match the embedded public key. +/// For this to succeed, the certificate must contain the specified extension +/// and the signature must match the embedded public key. pub fn parse<'a>( certificate: &'a rustls::pki_types::CertificateDer<'a>, ) -> Result, ParseError> { @@ -146,13 +148,14 @@ pub struct P2pCertificate<'a> { extension: P2pExtension, } -/// The contents of the specific libp2p extension, containing the public host key -/// and a signature performed using the private host key. +/// The contents of the specific libp2p extension, containing the public host +/// key and a signature performed using the private host key. #[derive(Debug)] pub struct P2pExtension { public_key: identity::PublicKey, /// This signature provides cryptographic proof that the peer was - /// in possession of the private host key at the time the certificate was signed. + /// in possession of the private host key at the time the certificate was + /// signed. signature: Vec, } @@ -226,7 +229,8 @@ fn parse_unverified(der_input: &[u8]) -> Result { return Err(webpki::Error::UnsupportedCriticalExtension); } - // Implementations MUST ignore non-critical extensions with unknown OIDs. + // Implementations MUST ignore non-critical extensions with unknown + // OIDs. } // The certificate MUST contain the libp2p Public Key Extension. @@ -283,8 +287,8 @@ impl P2pCertificate<'_> { self.extension.public_key.to_peer_id() } - /// Verify the `signature` of the `message` signed by the private key corresponding to the public key stored - /// in the certificate. + /// Verify the `signature` of the `message` signed by the private key + /// corresponding to the public key stored in the certificate. pub fn verify_signature( &self, signature_scheme: rustls::SignatureScheme, @@ -298,9 +302,10 @@ impl P2pCertificate<'_> { Ok(()) } - /// Get a [`ring::signature::UnparsedPublicKey`] for this `signature_scheme`. - /// Return `Error` if the `signature_scheme` does not match the public key signature - /// and hashing algorithm or if the `signature_scheme` is not supported. + /// Get a [`ring::signature::UnparsedPublicKey`] for this + /// `signature_scheme`. Return `Error` if the `signature_scheme` does + /// not match the public key signature and hashing algorithm or if the + /// `signature_scheme` is not supported. fn public_key( &self, signature_scheme: rustls::SignatureScheme, @@ -492,9 +497,10 @@ impl P2pCertificate<'_> { #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + #[test] fn sanity_check() { let keypair = identity::Keypair::generate_ed25519(); diff --git a/transports/tls/src/lib.rs b/transports/tls/src/lib.rs index 3aa66db12b3..57d7d69d4bd 100644 --- a/transports/tls/src/lib.rs +++ b/transports/tls/src/lib.rs @@ -29,14 +29,12 @@ pub mod certificate; mod upgrade; mod verifier; -use certificate::AlwaysResolvesCert; -use libp2p_identity::Keypair; -use libp2p_identity::PeerId; use std::sync::Arc; +use certificate::AlwaysResolvesCert; pub use futures_rustls::TlsStream; -pub use upgrade::Config; -pub use upgrade::UpgradeError; +use libp2p_identity::{Keypair, PeerId}; +pub use upgrade::{Config, UpgradeError}; const P2P_ALPN: [u8; 6] = *b"libp2p"; diff --git a/transports/tls/src/upgrade.rs b/transports/tls/src/upgrade.rs index 1c61d265ea6..2326c382dbe 100644 --- a/transports/tls/src/upgrade.rs +++ b/transports/tls/src/upgrade.rs @@ -18,20 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::certificate; -use crate::certificate::P2pCertificate; -use futures::future::BoxFuture; -use futures::AsyncWrite; -use futures::{AsyncRead, FutureExt}; +use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, +}; + +use futures::{future::BoxFuture, AsyncRead, AsyncWrite, FutureExt}; use futures_rustls::TlsStream; -use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; -use libp2p_core::UpgradeInfo; +use libp2p_core::{ + upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}, + UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_identity::PeerId; use rustls::{pki_types::ServerName, CommonState}; -use std::net::{IpAddr, Ipv4Addr}; -use std::sync::Arc; +use crate::{certificate, certificate::P2pCertificate}; #[derive(thiserror::Error, Debug)] pub enum UpgradeError { @@ -102,8 +104,11 @@ where fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { async move { - // Spec: In order to keep this flexibility for future versions, clients that only support the version of the handshake defined in this document MUST NOT send any value in the Server Name Indication. - // Setting `ServerName` to unspecified will disable the use of the SNI extension. + // Spec: In order to keep this flexibility for future versions, clients that + // only support the version of the handshake defined in this document MUST NOT + // send any value in the Server Name Indication. + // Setting `ServerName` to unspecified will disable the use of the SNI + // extension. let name = ServerName::IpAddress(rustls::pki_types::IpAddr::from(IpAddr::V4( Ipv4Addr::UNSPECIFIED, ))); diff --git a/transports/tls/src/verifier.rs b/transports/tls/src/verifier.rs index 65636cbe708..769c8a1b7a2 100644 --- a/transports/tls/src/verifier.rs +++ b/transports/tls/src/verifier.rs @@ -23,19 +23,28 @@ //! This module handles a verification of a client/server certificate chain //! and signatures allegedly by the given certificates. -use crate::certificate; +use std::sync::Arc; + use libp2p_identity::PeerId; use rustls::{ client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, crypto::ring::cipher_suite::{ - TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384, TLS13_CHACHA20_POLY1305_SHA256, + TLS13_AES_128_GCM_SHA256, + TLS13_AES_256_GCM_SHA384, + TLS13_CHACHA20_POLY1305_SHA256, }, pki_types::CertificateDer, server::danger::{ClientCertVerified, ClientCertVerifier}, - CertificateError, DigitallySignedStruct, DistinguishedName, OtherError, SignatureScheme, - SupportedCipherSuite, SupportedProtocolVersion, + CertificateError, + DigitallySignedStruct, + DistinguishedName, + OtherError, + SignatureScheme, + SupportedCipherSuite, + SupportedProtocolVersion, }; -use std::sync::Arc; + +use crate::certificate; /// The protocol versions supported by this verifier. /// @@ -56,7 +65,8 @@ pub(crate) static CIPHERSUITES: &[SupportedCipherSuite] = &[ /// Implementation of the `rustls` certificate verification traits for libp2p. /// -/// Only TLS 1.3 is supported. TLS 1.2 should be disabled in the configuration of `rustls`. +/// Only TLS 1.3 is supported. TLS 1.2 should be disabled in the configuration +/// of `rustls`. #[derive(Debug)] pub(crate) struct Libp2pCertificateVerifier { /// The peer ID we intend to connect to @@ -205,8 +215,8 @@ impl ClientCertVerifier for Libp2pCertificateVerifier { /// MUST check these conditions and abort the connection attempt if /// (a) the presented certificate is not yet valid, OR /// (b) if it is expired. -/// Endpoints MUST abort the connection attempt if more than one certificate is received, -/// or if the certificate’s self-signature is not valid. +/// Endpoints MUST abort the connection attempt if more than one certificate is +/// received, or if the certificate’s self-signature is not valid. fn verify_presented_certs( end_entity: &CertificateDer, intermediates: &[CertificateDer], diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index d488ae7846a..cf11f4c0b1d 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -1,10 +1,8 @@ +use std::time::Duration; + use futures::{future, StreamExt}; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::transport::MemoryTransport; -use libp2p_core::upgrade::Version; -use libp2p_core::Transport; +use libp2p_core::{multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Transport}; use libp2p_swarm::{dummy, Config, Swarm, SwarmEvent}; -use std::time::Duration; #[tokio::test] async fn can_establish_connection() { diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 5c57e255b4d..ef0025d2f06 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -26,10 +26,12 @@ //! //! # Usage //! -//! The `UdsConfig` transport supports multiaddresses of the form `/unix//tmp/foo`. +//! The `UdsConfig` transport supports multiaddresses of the form +//! `/unix//tmp/foo`. //! -//! The `UdsConfig` structs implements the `Transport` trait of the `core` library. See the -//! documentation of `core` and of libp2p in general to learn how to use the `Transport` trait. +//! The `UdsConfig` structs implements the `Transport` trait of the `core` +//! library. See the documentation of `core` and of libp2p in general to learn +//! how to use the `Transport` trait. #![cfg(all( unix, @@ -38,21 +40,24 @@ ))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::stream::BoxStream; +use std::{ + collections::VecDeque, + io, + path::PathBuf, + pin::Pin, + task::{Context, Poll}, +}; + use futures::{ future::{BoxFuture, Ready}, prelude::*, + stream::BoxStream, }; -use libp2p_core::transport::ListenerId; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{DialOpts, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, Transport, }; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::{io, path::PathBuf}; pub type Listener = BoxStream< 'static, @@ -219,8 +224,8 @@ codegen!( /// Turns a `Multiaddr` containing a single `Unix` component into a path. /// -/// Also returns an error if the path is not absolute, as we don't want to dial/listen on relative -/// paths. +/// Also returns an error if the path is not absolute, as we don't want to +/// dial/listen on relative paths. // This type of logic should probably be moved into the multiaddr package fn multiaddr_to_path(addr: &Multiaddr) -> Result { let mut protocols = addr.iter(); @@ -241,14 +246,17 @@ fn multiaddr_to_path(addr: &Multiaddr) -> Result { #[cfg(all(test, feature = "async-std"))] mod tests { - use super::{multiaddr_to_path, UdsConfig}; + use std::{borrow::Cow, path::Path}; + use futures::{channel::oneshot, prelude::*}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, PortUse}, - Endpoint, Transport, + Endpoint, + Transport, }; - use std::{borrow::Cow, path::Path}; + + use super::{multiaddr_to_path, UdsConfig}; #[test] fn multiaddr_to_path_conversion() { diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index d0c6ccd2238..40dc1eb2fda 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -1,28 +1,33 @@ //! A libp2p connection backed by an [RtcPeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection). -use super::{Error, Stream}; -use crate::stream::DropListener; -use futures::channel::mpsc; -use futures::stream::FuturesUnordered; -use futures::StreamExt; +use std::{ + pin::Pin, + task::{ready, Context, Poll, Waker}, +}; + +use futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}; use js_sys::{Object, Reflect}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; use libp2p_webrtc_utils::Fingerprint; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::Waker; -use std::task::{ready, Context, Poll}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use web_sys::{ - RtcConfiguration, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelInit, RtcDataChannelType, + RtcConfiguration, + RtcDataChannel, + RtcDataChannelEvent, + RtcDataChannelInit, + RtcDataChannelType, RtcSessionDescriptionInit, }; +use super::{Error, Stream}; +use crate::stream::DropListener; + /// A WebRTC Connection. /// -/// All connections need to be [`Send`] which is why some fields are wrapped in [`SendWrapper`]. -/// This is safe because WASM is single-threaded. +/// All connections need to be [`Send`] which is why some fields are wrapped in +/// [`SendWrapper`]. This is safe because WASM is single-threaded. pub struct Connection { /// The [RtcPeerConnection] that is used for the WebRTC Connection inner: SendWrapper, @@ -31,9 +36,11 @@ pub struct Connection { closed: bool, /// An [`mpsc::channel`] for all inbound data channels. /// - /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all inbound data channels. + /// Because the browser's WebRTC API is event-based, we need to use a + /// channel to obtain all inbound data channels. inbound_data_channels: SendWrapper>, - /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + /// A list of futures, which, once completed, signal that a [`Stream`] has + /// been dropped. drop_listeners: FuturesUnordered, no_drop_listeners_waker: Option, @@ -43,7 +50,8 @@ pub struct Connection { impl Connection { /// Create a new inner WebRTC Connection pub(crate) fn new(peer_connection: RtcPeerConnection) -> Self { - // An ondatachannel Future enables us to poll for incoming data channel events in poll_incoming + // An ondatachannel Future enables us to poll for incoming data channel events + // in poll_incoming let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { @@ -120,7 +128,8 @@ impl StreamMuxer for Connection { Poll::Ready(Ok(stream)) } None => { - // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. + // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets + // freed which means we are most likely shutting down the connection. tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } @@ -199,12 +208,14 @@ impl RtcPeerConnection { /// Creates the stream for the initial noise handshake. /// - /// The underlying data channel MUST have `negotiated` set to `true` and carry the ID 0. + /// The underlying data channel MUST have `negotiated` set to `true` and + /// carry the ID 0. pub(crate) fn new_handshake_stream(&self) -> (Stream, DropListener) { Stream::new(self.new_data_channel(true)) } - /// Creates a regular data channel for when the connection is already established. + /// Creates a regular data channel for when the connection is already + /// established. pub(crate) fn new_regular_data_channel(&self) -> RtcDataChannel { self.new_data_channel(false) } @@ -298,11 +309,22 @@ mod sdp_tests { #[test] fn test_fingerprint() { - let sdp = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; + let sdp = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 \ + 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP \ + webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:\ + libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:\ + libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 \ + A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:\ + 5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\\ + na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; let fingerprint = parse_fingerprint(sdp).unwrap(); assert_eq!(fingerprint.algorithm(), "sha-256"); - assert_eq!(fingerprint.to_sdp_format(), "A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89"); + assert_eq!( + fingerprint.to_sdp_format(), + "A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:\ + D4:F4:31:89" + ); } } diff --git a/transports/webrtc-websys/src/lib.rs b/transports/webrtc-websys/src/lib.rs index 04fced4111b..07207eb0ae8 100644 --- a/transports/webrtc-websys/src/lib.rs +++ b/transports/webrtc-websys/src/lib.rs @@ -7,7 +7,9 @@ mod stream; mod transport; mod upgrade; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 9e63fd92462..34b722b021b 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -1,5 +1,6 @@ -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +use libp2p_webrtc_utils::Fingerprint; use web_sys::{RtcSdpType, RtcSessionDescriptionInit}; /// Creates the SDP answer used by the client. @@ -19,7 +20,8 @@ pub(crate) fn answer( /// Creates the munged SDP offer from the Browser's given SDP offer /// -/// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. +/// Certificate verification is disabled which is why we hardcode a dummy +/// fingerprint here. pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionInit { // find line and replace a=ice-ufrag: with "\r\na=ice-ufrag:{client_ufrag}\r\n" // find line and replace a=ice-pwd: with "\r\na=ice-ufrag:{client_ufrag}\r\n" diff --git a/transports/webrtc-websys/src/stream.rs b/transports/webrtc-websys/src/stream.rs index 812aa5afbbf..ee0183b07f0 100644 --- a/transports/webrtc-websys/src/stream.rs +++ b/transports/webrtc-websys/src/stream.rs @@ -1,11 +1,15 @@ //! The WebRTC [Stream] over the Connection -use self::poll_data_channel::PollDataChannel; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite}; use send_wrapper::SendWrapper; -use std::pin::Pin; -use std::task::{Context, Poll}; use web_sys::RtcDataChannel; +use self::poll_data_channel::PollDataChannel; + mod poll_data_channel; /// A stream over a WebRTC connection. diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 3ec744342eb..a3f0552be61 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -1,19 +1,23 @@ -use std::cmp::min; -use std::io; -use std::pin::Pin; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::task::{Context, Poll}; +use std::{ + cmp::min, + io, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{AsyncRead, AsyncWrite}; +use futures::{task::AtomicWaker, AsyncRead, AsyncWrite}; use libp2p_webrtc_utils::MAX_MSG_LEN; use wasm_bindgen::prelude::*; use web_sys::{Event, MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelState}; -/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements [`AsyncRead`] and [`AsyncWrite`]. +/// [`PollDataChannel`] is a wrapper around [`RtcDataChannel`] which implements +/// [`AsyncRead`] and [`AsyncWrite`]. #[derive(Debug, Clone)] pub(crate) struct PollDataChannel { /// The [`RtcDataChannel`] being wrapped. @@ -25,7 +29,8 @@ pub(crate) struct PollDataChannel { /// Waker for when we are waiting for the DC to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the [`MAX_MSG_LEN`] threshold. + /// Waker for when we are waiting to write (again) to the DC because we + /// previously exceeded the [`MAX_MSG_LEN`] threshold. write_waker: Rc, /// Waker for when we are waiting for the DC to be closed. @@ -33,9 +38,12 @@ pub(crate) struct PollDataChannel { /// Whether we've been overloaded with data by the remote. /// - /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us messages faster than we can read them. - /// In that case, we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. - /// Failing these will (very likely), cause the application developer to drop the stream which resets it. + /// This is set to `true` in case `read_buffer` overflows, i.e. the remote + /// is sending us messages faster than we can read them. In that case, + /// we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], + /// depending which one gets called earlier. Failing these will (very + /// likely), cause the application developer to drop the stream which resets + /// it. overloaded: Rc, // Store the closures for proper garbage collection. @@ -83,7 +91,8 @@ impl PollDataChannel { inner.set_onclose(Some(on_close_closure.as_ref().unchecked_ref())); let new_data_waker = Rc::new(AtomicWaker::new()); - let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate + // `MAX_READ_BUFFER` per stream. let overloaded = Rc::new(AtomicBool::new(false)); let on_message_closure = Closure::::new({ diff --git a/transports/webrtc-websys/src/transport.rs b/transports/webrtc-websys/src/transport.rs index 836acb0b9f6..af1bd74d433 100644 --- a/transports/webrtc-websys/src/transport.rs +++ b/transports/webrtc-websys/src/transport.rs @@ -1,15 +1,18 @@ -use super::upgrade; -use super::Connection; -use super::Error; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::DialOpts; -use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_core::{ + multiaddr::Multiaddr, + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, +}; use libp2p_identity::{Keypair, PeerId}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; + +use super::{upgrade, Connection, Error}; /// Config for the [`Transport`]. #[derive(Clone)] @@ -17,7 +20,8 @@ pub struct Config { keypair: Keypair, } -/// A WebTransport [`Transport`](libp2p_core::Transport) that works with `web-sys`. +/// A WebTransport [`Transport`](libp2p_core::Transport) that works with +/// `web-sys`. pub struct Transport { config: Config, } diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index d42f2e3ae18..c4db575a4ce 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -1,13 +1,11 @@ -use super::Error; -use crate::connection::RtcPeerConnection; -use crate::error::AuthenticationError; -use crate::sdp; -use crate::Connection; +use std::net::SocketAddr; + use libp2p_identity::{Keypair, PeerId}; -use libp2p_webrtc_utils::noise; -use libp2p_webrtc_utils::Fingerprint; +use libp2p_webrtc_utils::{noise, Fingerprint}; use send_wrapper::SendWrapper; -use std::net::SocketAddr; + +use super::Error; +use crate::{connection::RtcPeerConnection, error::AuthenticationError, sdp, Connection}; /// Upgrades an outbound WebRTC connection by creating the data channel /// and conducting a Noise handshake @@ -29,7 +27,8 @@ async fn outbound_inner( let rtc_peer_connection = RtcPeerConnection::new(remote_fingerprint.algorithm()).await?; // Create stream for Noise handshake - // Must create data channel before Offer is created for it to be included in the SDP + // Must create data channel before Offer is created for it to be included in the + // SDP let (channel, listener) = rtc_peer_connection.new_handshake_stream(); drop(listener); diff --git a/transports/webrtc/src/lib.rs b/transports/webrtc/src/lib.rs index ea1e6a4d646..e3f9db888fa 100644 --- a/transports/webrtc/src/lib.rs +++ b/transports/webrtc/src/lib.rs @@ -18,67 +18,71 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Implementation of the [`libp2p_core::Transport`] trait for WebRTC protocol without a signaling -//! server. +//! Implementation of the [`libp2p_core::Transport`] trait for WebRTC protocol +//! without a signaling server. //! //! # Overview //! -//! ## ICE +//!  ## ICE //! //! RFCs: 8839, 8445 See also: //! //! //! The WebRTC protocol uses ICE in order to establish a connection. //! -//! In a typical ICE setup, there are two endpoints, called agents, that want to communicate. One -//! of these two agents can be the local browser, while the other agent is the target of the -//! connection. +//! In a typical ICE setup, there are two endpoints, called agents, that want to +//! communicate. One of these two agents can be the local browser, while the +//! other agent is the target of the connection. //! -//! Even though in this specific context all we want is a simple client-server communication, it is -//! helpful to keep in mind that ICE was designed to solve the problem of NAT traversal. +//! Even though in this specific context all we want is a simple client-server +//! communication, it is helpful to keep in mind that ICE was designed to solve +//! the problem of NAT traversal. //! //! The ICE workflow works as follows: //! -//! - An "offerer" determines ways in which it could be accessible (either an -//! IP address or through a relay using a TURN server), which are called "candidates". It then -//! generates a small text payload in a format called SDP, that describes the request for a -//! connection. -//! - The offerer sends this SDP-encoded message to the answerer. The medium through which this -//! exchange is done is out of scope of the ICE protocol. -//! - The answerer then finds its own candidates, and generates an answer, again in the SDP format. -//! This answer is sent back to the offerer. +//! - An "offerer" determines ways in which it could be accessible (either an IP +//! address or through a relay using a TURN server), which are called +//! "candidates". It then generates a small text payload in a format called +//! SDP, that describes the request for a connection. +//! - The offerer sends this SDP-encoded message to the answerer. The medium +//! through which this exchange is done is out of scope of the ICE protocol. +//! - The answerer then finds its own candidates, and generates an answer, again +//! in the SDP format. This answer is sent back to the offerer. //! - Each agent then tries to connect to the remote's candidates. //! -//! We pretend to send the offer to the remote agent (the target of the connection), then pretend -//! that it has found a valid IP address for itself (i.e. a candidate), then pretend that the SDP -//! answer containing this candidate has been sent back. This will cause the offerer to execute -//! step 4: try to connect to the remote's candidate. +//! We pretend to send the offer to the remote agent (the target of the +//! connection), then pretend that it has found a valid IP address for itself +//! (i.e. a candidate), then pretend that the SDP answer containing this +//! candidate has been sent back. This will cause the offerer to execute step 4: +//! try to connect to the remote's candidate. //! //! ## TCP or UDP //! -//! WebRTC by itself doesn't hardcode any specific protocol for media streams. Instead, it is the -//! SDP message of the offerer that specifies which protocol to use. In our use case (one or more -//! data channels), we know that the offerer will always request either TCP+DTLS+SCTP, or -//! UDP+DTLS+SCTP. +//! WebRTC by itself doesn't hardcode any specific protocol for media streams. +//! Instead, it is the SDP message of the offerer that specifies which protocol +//! to use. In our use case (one or more data channels), we know that the +//! offerer will always request either TCP+DTLS+SCTP, or UDP+DTLS+SCTP. //! -//! The implementation only supports UDP at the moment, so if the offerer requests TCP+DTLS+SCTP, it -//! will not respond. Support for TCP may be added in the future (see -//! ). +//! The implementation only supports UDP at the moment, so if the offerer +//! requests TCP+DTLS+SCTP, it will not respond. Support for TCP may be added in +//! the future (see ). //! //! ## DTLS+SCTP //! //! RFCs: 8841, 8832 //! -//! In both cases (TCP or UDP), the next layer is DTLS. DTLS is similar to the well-known TLS -//! protocol, except that it doesn't guarantee ordering of delivery (as this is instead provided by -//! the SCTP layer on top of DTLS). In other words, once the TCP or UDP connection is established, -//! the browser will try to perform a DTLS handshake. -//! -//! During the ICE negotiation, each agent must include in its SDP packet a hash of the self-signed -//! certificate that it will use during the DTLS handshake. In our use-case, where we try to -//! hand-crate the SDP answer generated by the remote, this is problematic. A way to solve this -//! is to make the hash a part of the remote's multiaddr. On the server side, we turn -//! certificate verification off. +//! In both cases (TCP or UDP), the next layer is DTLS. DTLS is similar to the +//! well-known TLS protocol, except that it doesn't guarantee ordering of +//! delivery (as this is instead provided by the SCTP layer on top of DTLS). In +//! other words, once the TCP or UDP connection is established, the browser will +//! try to perform a DTLS handshake. +//! +//! During the ICE negotiation, each agent must include in its SDP packet a hash +//! of the self-signed certificate that it will use during the DTLS handshake. +//! In our use-case, where we try to hand-crate the SDP answer generated by the +//! remote, this is problematic. A way to solve this is to make the hash a part +//! of the remote's multiaddr. On the server side, we turn certificate +//! verification off. #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/transports/webrtc/src/tokio/certificate.rs b/transports/webrtc/src/tokio/certificate.rs index 81197af4132..c75dcea6268 100644 --- a/transports/webrtc/src/tokio/certificate.rs +++ b/transports/webrtc/src/tokio/certificate.rs @@ -50,8 +50,8 @@ impl Certificate { /// /// # Panics /// - /// This function will panic if there's no fingerprint with the SHA-256 algorithm (see - /// [`RTCCertificate::get_fingerprints`]). + /// This function will panic if there's no fingerprint with the SHA-256 + /// algorithm (see [`RTCCertificate::get_fingerprints`]). pub fn fingerprint(&self) -> Fingerprint { let fingerprints = self.inner.get_fingerprints(); let sha256_fingerprint = fingerprints @@ -72,7 +72,8 @@ impl Certificate { }) } - /// Serializes the certificate (including the private key) in PKCS#8 format in PEM. + /// Serializes the certificate (including the private key) in PKCS#8 format + /// in PEM. /// /// See [`RTCCertificate::serialize_pem`] #[cfg(feature = "pem")] @@ -82,7 +83,8 @@ impl Certificate { /// Extract the [`RTCCertificate`] from this wrapper. /// - /// This function is `pub(crate)` to avoid leaking the `webrtc` dependency to our users. + /// This function is `pub(crate)` to avoid leaking the `webrtc` dependency + /// to our users. pub(crate) fn to_rtc_certificate(&self) -> RTCCertificate { self.inner.clone() } @@ -100,9 +102,10 @@ enum Kind { #[cfg(all(test, feature = "pem"))] mod test { - use super::*; use rand::thread_rng; + use super::*; + #[test] fn test_certificate_serialize_pem_and_from_pem() { let cert = Certificate::generate(&mut thread_rng()).unwrap(); diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 3bcc4c3193e..d57b7923b77 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -18,26 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::stream::FuturesUnordered; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll, Waker}, +}; + use futures::{ channel::{ mpsc, oneshot::{self, Sender}, }, + future::BoxFuture, lock::Mutex as FutMutex, + ready, + stream::FuturesUnordered, StreamExt, - {future::BoxFuture, ready}, }; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use webrtc::data::data_channel::DataChannel as DetachedDataChannel; -use webrtc::data_channel::RTCDataChannel; -use webrtc::peer_connection::RTCPeerConnection; - -use std::task::Waker; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + data::data_channel::DataChannel as DetachedDataChannel, + data_channel::RTCDataChannel, + peer_connection::RTCPeerConnection, }; use crate::tokio::{error::Error, stream, stream::Stream}; @@ -46,11 +48,13 @@ use crate::tokio::{error::Error, stream, stream::Stream}; /// See [`Connection::poll_inbound`]. const MAX_DATA_CHANNELS_IN_FLIGHT: usize = 10; -/// A WebRTC connection, wrapping [`RTCPeerConnection`] and implementing [`StreamMuxer`] trait. +/// A WebRTC connection, wrapping [`RTCPeerConnection`] and implementing +/// [`StreamMuxer`] trait. pub struct Connection { /// [`RTCPeerConnection`] to the remote peer. /// - /// Uses futures mutex because used in async code (see poll_outbound and poll_close). + /// Uses futures mutex because used in async code (see poll_outbound and + /// poll_close). peer_conn: Arc>, /// Channel onto which incoming data channels are put. @@ -59,10 +63,12 @@ pub struct Connection { /// Future, which, once polled, will result in an outbound stream. outbound_fut: Option, Error>>>, - /// Future, which, once polled, will result in closing the entire connection. + /// Future, which, once polled, will result in closing the entire + /// connection. close_fut: Option>>, - /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + /// A list of futures, which, once completed, signal that a [`Stream`] has + /// been dropped. drop_listeners: FuturesUnordered, no_drop_listeners_waker: Option, } @@ -92,10 +98,10 @@ impl Connection { /// Registers a handler for incoming data channels. /// - /// NOTE: `mpsc::Sender` is wrapped in `Arc` because cloning a raw sender would make the channel - /// unbounded. "The channel’s capacity is equal to buffer + num-senders. In other words, each - /// sender gets a guaranteed slot in the channel capacity..." - /// See + /// NOTE: `mpsc::Sender` is wrapped in `Arc` because cloning a raw sender + /// would make the channel unbounded. "The channel’s capacity is equal + /// to buffer + num-senders. In other words, each sender gets a + /// guaranteed slot in the channel capacity..." See async fn register_incoming_data_channels_handler( rtc_conn: &RTCPeerConnection, tx: Arc>>>, @@ -172,7 +178,9 @@ impl StreamMuxer for Connection { "Sender-end of channel should be owned by `RTCPeerConnection`" ); - Poll::Pending // Return `Pending` without registering a waker: If the channel is closed, we don't need to be called anymore. + Poll::Pending // Return `Pending` without registering a waker: + // If the channel is closed, we don't need to be + // called anymore. } } } diff --git a/transports/webrtc/src/tokio/fingerprint.rs b/transports/webrtc/src/tokio/fingerprint.rs index c075e486232..c737d0c8ce6 100644 --- a/transports/webrtc/src/tokio/fingerprint.rs +++ b/transports/webrtc/src/tokio/fingerprint.rs @@ -24,7 +24,8 @@ const SHA256: &str = "sha-256"; type Multihash = multihash::Multihash<64>; -/// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. +/// A certificate fingerprint that is assumed to be created using the SHA256 +/// hash algorithm. #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub struct Fingerprint(libp2p_webrtc_utils::Fingerprint); diff --git a/transports/webrtc/src/tokio/req_res_chan.rs b/transports/webrtc/src/tokio/req_res_chan.rs index fb29e16db27..5604761c09e 100644 --- a/transports/webrtc/src/tokio/req_res_chan.rs +++ b/transports/webrtc/src/tokio/req_res_chan.rs @@ -18,16 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{ - channel::{mpsc, oneshot}, - SinkExt, StreamExt, -}; - use std::{ io, task::{Context, Poll}, }; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, + StreamExt, +}; + pub(crate) fn new(capacity: usize) -> (Sender, Receiver) { let (sender, receiver) = mpsc::channel(capacity); diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index 4be4c19f188..dbdb2fec616 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -18,10 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; -use libp2p_webrtc_utils::sdp::render_description; -use libp2p_webrtc_utils::Fingerprint; use std::net::SocketAddr; + +pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; +use libp2p_webrtc_utils::{sdp::render_description, Fingerprint}; use webrtc::peer_connection::sdp::session_description::RTCSessionDescription; /// Creates the SDP answer used by the client. @@ -40,7 +40,8 @@ pub(crate) fn answer( /// Creates the SDP offer used by the server. /// -/// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. +/// Certificate verification is disabled which is why we hardcode a dummy +/// fingerprint here. pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescription { let offer = render_description( CLIENT_SESSION_DESCRIPTION, @@ -67,9 +68,9 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti // v= -> always 0 // o= // -// identifies the creator of the SDP document. We are allowed to use dummy values -// (`-` and `0.0.0.0` as ) to remain anonymous, which we do. Note that "IN" means -// "Internet". +// identifies the creator of the SDP document. We are allowed to +// use dummy values (`-` and `0.0.0.0` as ) to remain anonymous, +// which we do. Note that "IN" means "Internet". // // s= // @@ -82,15 +83,16 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti // // t= // -// Start and end of the validity of the session. `0 0` means that the session never expires. +// Start and end of the validity of the session. `0 0` means that the +// session never expires. // // m= ... // -// A `m=` line describes a request to establish a certain protocol. The protocol in this line -// (i.e. `TCP/DTLS/SCTP` or `UDP/DTLS/SCTP`) must always be the same as the one in the offer. -// We know that this is true because we tweak the offer to match the protocol. The `` -// component must always be `webrtc-datachannel` for WebRTC. -// RFCs: 8839, 8866, 8841 +// A `m=` line describes a request to establish a certain protocol. The +// protocol in this line (i.e. `TCP/DTLS/SCTP` or `UDP/DTLS/SCTP`) must +// always be the same as the one in the offer. We know that this is true +// because we tweak the offer to match the protocol. The `` component +// must always be `webrtc-datachannel` for WebRTC. RFCs: 8839, 8866, 8841 // // a=mid: // @@ -98,7 +100,8 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti // // a=ice-options:ice2 // -// Indicates that we are complying with RFC8839 (as opposed to the legacy RFC5245). +// Indicates that we are complying with RFC8839 (as opposed to the legacy +// RFC5245). // // a=ice-ufrag: // a=ice-pwd: @@ -114,14 +117,15 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti // // a=setup:actpass // -// The endpoint that is the offerer MUST use the setup attribute value of setup:actpass and be -// prepared to receive a client_hello before it receives the answer. +// The endpoint that is the offerer MUST use the setup attribute value of +// setup:actpass and be prepared to receive a client_hello before it +// receives the answer. // // a=sctp-port: // // The SCTP port (RFC8841) -// Note it's different from the "m=" line port value, which indicates the port of the -// underlying transport-layer protocol (UDP or TCP). +// Note it's different from the "m=" line port value, which indicates the +// port of the underlying transport-layer protocol (UDP or TCP). // // a=max-message-size: // diff --git a/transports/webrtc/src/tokio/stream.rs b/transports/webrtc/src/tokio/stream.rs index 4278a751e27..6dc4e091a9f 100644 --- a/transports/webrtc/src/tokio/stream.rs +++ b/transports/webrtc/src/tokio/stream.rs @@ -31,8 +31,9 @@ use webrtc::data::data_channel::{DataChannel, PollDataChannel}; /// A substream on top of a WebRTC data channel. /// -/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well -/// as support a half-closed state which we do by framing messages in a protobuf envelope. +/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and +/// [`AsyncWrite`] as well as support a half-closed state which we do by framing +/// messages in a protobuf envelope. pub struct Stream { inner: libp2p_webrtc_utils::Stream>, } @@ -40,8 +41,8 @@ pub struct Stream { pub(crate) type DropListener = libp2p_webrtc_utils::DropListener>; impl Stream { - /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream - /// is dropped. + /// Returns a new `Substream` and a listener, which will notify the receiver + /// when/if the substream is dropped. pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { let mut data_channel = PollDataChannel::new(data_channel).compat(); data_channel.get_mut().set_read_buf_capacity(MAX_MSG_LEN); diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 62049c8f59b..bc1723f2aad 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -18,6 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + io, + net::{IpAddr, SocketAddr}, + pin::Pin, + task::{Context, Poll, Waker}, +}; + use futures::{future::BoxFuture, prelude::*, stream::SelectAll}; use if_watch::{tokio::IfWatcher, IfEvent}; use libp2p_core::{ @@ -28,14 +35,6 @@ use libp2p_identity as identity; use libp2p_identity::PeerId; use webrtc::peer_connection::configuration::RTCConfiguration; -use std::net::IpAddr; -use std::{ - io, - net::SocketAddr, - pin::Pin, - task::{Context, Poll, Waker}, -}; - use crate::tokio::{ certificate::Certificate, connection::Connection, @@ -60,8 +59,8 @@ impl Transport { /// /// ``` /// use libp2p_identity as identity; + /// use libp2p_webrtc::tokio::{Certificate, Transport}; /// use rand::thread_rng; - /// use libp2p_webrtc::tokio::{Transport, Certificate}; /// /// let id_keys = identity::Keypair::generate_ed25519(); /// let transport = Transport::new(id_keys, Certificate::generate(&mut thread_rng()).unwrap()); @@ -124,8 +123,8 @@ impl libp2p_core::Transport for Transport { dial_opts: DialOpts, ) -> Result> { if dial_opts.role.is_listener() { - // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP packet to the - // `addr`. See DCUtR specification below. + // TODO: As the listener of a WebRTC hole punch, we need to send a random UDP + // packet to the `addr`. See DCUtR specification below. // // https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol tracing::warn!("WebRTC hole punch is not yet supported"); @@ -196,7 +195,8 @@ struct ListenStream { /// Pending event to reported. pending_event: Option<::Item>, - /// The stream must be awaken after it has been closed to deliver the last event. + /// The stream must be awaken after it has been closed to deliver the last + /// event. close_listener_waker: Option, } @@ -231,8 +231,8 @@ impl ListenStream { }) } - /// Report the listener as closed in a [`TransportEvent::ListenerClosed`] and - /// terminate the stream. + /// Report the listener as closed in a [`TransportEvent::ListenerClosed`] + /// and terminate the stream. fn close(&mut self, reason: Result<(), Error>) { match self.report_closed { Some(_) => tracing::debug!("Listener was already closed"), @@ -294,7 +294,8 @@ impl ListenStream { Poll::Pending } - /// Constructs a [`Multiaddr`] for the given IP address that represents our listen address. + /// Constructs a [`Multiaddr`] for the given IP address that represents our + /// listen address. fn listen_multiaddress(&self, ip: IpAddr) -> Multiaddr { let socket_addr = SocketAddr::new(ip, self.listen_addr.port()); @@ -360,7 +361,8 @@ impl Stream for ListenStream { } } -/// A config which holds peer's keys and a x509Cert used to authenticate WebRTC communications. +/// A config which holds peer's keys and a x509Cert used to authenticate WebRTC +/// communications. #[derive(Clone)] struct Config { inner: RTCConfiguration, @@ -422,15 +424,18 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { Some(SocketAddr::new(ip, port)) } -// Tests ////////////////////////////////////////////////////////////////////////////////////////// +// Tests /////////////////////////////////////////////////////////////////////// +// /////////////////// #[cfg(test)] mod tests { - use super::*; + use std::net::Ipv6Addr; + use futures::future::poll_fn; use libp2p_core::Transport as _; use rand::thread_rng; - use std::net::Ipv6Addr; + + use super::*; #[test] fn missing_webrtc_protocol() { @@ -443,7 +448,8 @@ mod tests { #[test] fn tcp_is_invalid_protocol() { - let addr = "/ip4/127.0.0.1/tcp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" + let addr = "/ip4/127.0.0.1/tcp/12345/webrtc-direct/certhash/\ + uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" .parse() .unwrap(); @@ -454,7 +460,8 @@ mod tests { #[test] fn cannot_follow_other_protocols_after_certhash() { - let addr = "/ip4/127.0.0.1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/tcp/12345" + let addr = "/ip4/127.0.0.1/udp/12345/webrtc-direct/certhash/\ + uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/tcp/12345" .parse() .unwrap(); @@ -477,11 +484,12 @@ mod tests { #[test] fn fails_to_parse_if_certhash_present_but_wrong_hash_function() { - // We only support SHA2-256 for now but this certhash has been encoded with SHA3-256. - let addr = - "/ip6/::1/udp/12345/webrtc-direct/certhash/uFiCH_tkkzpAwkoIDbE4I7QtQksFMYs5nQ4MyYrkgCJYi4A" - .parse() - .unwrap(); + // We only support SHA2-256 for now but this certhash has been encoded with + // SHA3-256. + let addr = "/ip6/::1/udp/12345/webrtc-direct/certhash/\ + uFiCH_tkkzpAwkoIDbE4I7QtQksFMYs5nQ4MyYrkgCJYi4A" + .parse() + .unwrap(); let maybe_addr = parse_webrtc_listen_addr(&addr); @@ -498,8 +506,8 @@ mod tests { .now_or_never() .is_none()); - // Run test twice to check that there is no unexpected behaviour if `QuicTransport.listener` - // is temporarily empty. + // Run test twice to check that there is no unexpected behaviour if + // `QuicTransport.listener` is temporarily empty. for _ in 0..2 { let listener = ListenerId::next(); transport @@ -540,8 +548,8 @@ mod tests { } e => panic!("Unexpected event: {e:?}"), } - // Poll once again so that the listener has the chance to return `Poll::Ready(None)` and - // be removed from the list of listeners. + // Poll once again so that the listener has the chance to return + // `Poll::Ready(None)` and be removed from the list of listeners. assert!(poll_fn(|cx| Pin::new(&mut transport).as_mut().poll(cx)) .now_or_never() .is_none()); diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index 7a8d960826d..64c40675aac 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -18,6 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::{ + collections::{HashMap, HashSet}, + io, + io::ErrorKind, + net::SocketAddr, + sync::Arc, + task::{Context, Poll}, +}; + use async_trait::async_trait; use futures::{ channel::oneshot, @@ -31,23 +40,17 @@ use stun::{ }; use thiserror::Error; use tokio::{io::ReadBuf, net::UdpSocket}; -use webrtc::ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}; -use webrtc::util::{Conn, Error}; - -use std::{ - collections::{HashMap, HashSet}, - io, - io::ErrorKind, - net::SocketAddr, - sync::Arc, - task::{Context, Poll}, +use webrtc::{ + ice::udp_mux::{UDPMux, UDPMuxConn, UDPMuxConnParams, UDPMuxWriter}, + util::{Conn, Error}, }; use crate::tokio::req_res_chan; const RECEIVE_MTU: usize = 8192; -/// A previously unseen address of a remote which has sent us an ICE binding request. +/// A previously unseen address of a remote which has sent us an ICE binding +/// request. #[derive(Debug)] pub(crate) struct NewAddr { pub(crate) addr: SocketAddr, @@ -78,7 +81,8 @@ pub(crate) struct UDPMuxNewAddr { /// Maps from socket address to the underlying connection. address_map: HashMap, - /// Set of the new addresses to avoid sending the same address multiple times. + /// Set of the new addresses to avoid sending the same address multiple + /// times. new_addrs: HashSet, /// `true` when UDP mux is closed. @@ -154,8 +158,8 @@ impl UDPMuxNewAddr { Ok(UDPMuxConn::new(params)) } - /// Returns a muxed connection if the `ufrag` from the given STUN message matches an existing - /// connection. + /// Returns a muxed connection if the `ufrag` from the given STUN message + /// matches an existing connection. fn conn_from_stun_message( &self, buffer: &[u8], @@ -181,8 +185,8 @@ impl UDPMuxNewAddr { } } - /// Reads from the underlying UDP socket and either reports a new address or proxies data to the - /// muxed connection. + /// Reads from the underlying UDP socket and either reports a new address or + /// proxies data to the muxed connection. pub(crate) fn poll(&mut self, cx: &mut Context) -> Poll { let mut recv_buf = [0u8; RECEIVE_MTU]; @@ -303,8 +307,9 @@ impl UDPMuxNewAddr { if let Poll::Ready(Some((ufrag, response))) = self.remove_conn_command.poll_next_unpin(cx) { - // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but since `conns` - // is keyed on `ufrag` their implementation is equivalent. + // Pion's ice implementation has both `RemoveConnByFrag` and `RemoveConn`, but + // since `conns` is keyed on `ufrag` their implementation is + // equivalent. if let Some(removed_conn) = self.conns.remove(&ufrag) { for address in removed_conn.get_addresses() { @@ -336,8 +341,9 @@ impl UDPMuxNewAddr { let conn = self.address_map.get(&addr); let conn = match conn { - // If we couldn't find the connection based on source address, see if - // this is a STUN message and if so if we can find the connection based on ufrag. + // If we couldn't find the connection based on source address, see + // if this is a STUN message and if + // so if we can find the connection based on ufrag. None if is_stun_message(read.filled()) => { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), @@ -417,8 +423,8 @@ impl UDPMuxNewAddr { } } -/// Handle which utilizes [`req_res_chan`] to transmit commands (e.g. remove connection) from the -/// WebRTC ICE agent to [`UDPMuxNewAddr::poll`]. +/// Handle which utilizes [`req_res_chan`] to transmit commands (e.g. remove +/// connection) from the WebRTC ICE agent to [`UDPMuxNewAddr::poll`]. pub(crate) struct UdpMuxHandle { close_sender: req_res_chan::Sender<(), Result<(), Error>>, get_conn_sender: req_res_chan::Sender, Error>>, @@ -426,7 +432,8 @@ pub(crate) struct UdpMuxHandle { } impl UdpMuxHandle { - /// Returns a new `UdpMuxHandle` and `close`, `get_conn` and `remove` receivers. + /// Returns a new `UdpMuxHandle` and `close`, `get_conn` and `remove` + /// receivers. pub(crate) fn new() -> ( Self, req_res_chan::Receiver<(), Result<(), Error>>, @@ -475,8 +482,8 @@ impl UDPMux for UdpMuxHandle { } } -/// Handle which utilizes [`req_res_chan`] to transmit commands from [`UDPMuxConn`] connections to -/// [`UDPMuxNewAddr::poll`]. +/// Handle which utilizes [`req_res_chan`] to transmit commands from +/// [`UDPMuxConn`] connections to [`UDPMuxNewAddr::poll`]. pub(crate) struct UdpMuxWriterHandle { registration_channel: req_res_chan::Sender<(UDPMuxConn, SocketAddr), ()>, send_channel: req_res_chan::Sender<(Vec, SocketAddr), Result>, @@ -530,8 +537,8 @@ impl UDPMuxWriter for UdpMuxWriterHandle { } } -/// Gets the ufrag from the given STUN message or returns an error, if failed to decode or the -/// username attribute is not present. +/// Gets the ufrag from the given STUN message or returns an error, if failed to +/// decode or the username attribute is not present. fn ufrag_from_stun_message(buffer: &[u8], local_ufrag: bool) -> Result { let (result, message) = { let mut m = STUNMessage::new(); diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 4145a5e7510..756f272b80f 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -18,27 +18,23 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_webrtc_utils::{noise, Fingerprint}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; -use futures::channel::oneshot; -use futures::future::Either; +use futures::{channel::oneshot, future::Either}; use futures_timer::Delay; use libp2p_identity as identity; use libp2p_identity::PeerId; -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use webrtc::api::setting_engine::SettingEngine; -use webrtc::api::APIBuilder; -use webrtc::data::data_channel::DataChannel; -use webrtc::data_channel::data_channel_init::RTCDataChannelInit; -use webrtc::dtls_transport::dtls_role::DTLSRole; -use webrtc::ice::network_type::NetworkType; -use webrtc::ice::udp_mux::UDPMux; -use webrtc::ice::udp_network::UDPNetwork; -use webrtc::peer_connection::configuration::RTCConfiguration; -use webrtc::peer_connection::RTCPeerConnection; - -use crate::tokio::sdp::random_ufrag; -use crate::tokio::{error::Error, sdp, stream::Stream, Connection}; +use libp2p_webrtc_utils::{noise, Fingerprint}; +use webrtc::{ + api::{setting_engine::SettingEngine, APIBuilder}, + data::data_channel::DataChannel, + data_channel::data_channel_init::RTCDataChannelInit, + dtls_transport::dtls_role::DTLSRole, + ice::{network_type::NetworkType, udp_mux::UDPMux, udp_network::UDPNetwork}, + peer_connection::{configuration::RTCConfiguration, RTCPeerConnection}, +}; + +use crate::tokio::{error::Error, sdp, sdp::random_ufrag, stream::Stream, Connection}; /// Creates a new outbound WebRTC connection. pub(crate) async fn outbound( @@ -136,8 +132,8 @@ async fn new_inbound_connection( se.disable_certificate_fingerprint_verification(true); // Act as a DTLS server (one which waits for a connection). // - // NOTE: removing this seems to break DTLS setup (both sides send `ClientHello` messages, - // but none end up responding). + // NOTE: removing this seems to break DTLS setup (both sides send `ClientHello` + // messages, but none end up responding). se.set_answering_dtls_role(DTLSRole::Server)?; } @@ -157,8 +153,8 @@ fn setting_engine( ) -> SettingEngine { let mut se = SettingEngine::default(); - // Set both ICE user and password to our fingerprint because that's what the client is - // expecting.. + // Set both ICE user and password to our fingerprint because that's what the + // client is expecting.. se.set_ice_credentials(ufrag.to_owned(), ufrag.to_owned()); se.set_udp_network(UDPNetwork::Muxed(udp_mux.clone())); @@ -168,8 +164,8 @@ fn setting_engine( // Set the desired network type. // - // NOTE: if not set, a [`webrtc_ice::agent::Agent`] might pick a wrong local candidate - // (e.g. IPv6 `[::1]` while dialing an IPv4 `10.11.12.13`). + // NOTE: if not set, a [`webrtc_ice::agent::Agent`] might pick a wrong local + // candidate (e.g. IPv6 `[::1]` while dialing an IPv4 `10.11.12.13`). let network_type = match addr { SocketAddr::V4(_) => NetworkType::Udp4, SocketAddr::V6(_) => NetworkType::Udp6, @@ -187,7 +183,8 @@ async fn get_remote_fingerprint(conn: &RTCPeerConnection) -> Fingerprint { } async fn create_substream_for_noise_handshake(conn: &RTCPeerConnection) -> Result { - // NOTE: the data channel w/ `negotiated` flag set to `true` MUST be created on both ends. + // NOTE: the data channel w/ `negotiated` flag set to `true` MUST be created on + // both ends. let data_channel = conn .create_data_channel( "", diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index d606d66c41f..354db9b6e3c 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -18,21 +18,35 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::channel::mpsc; -use futures::future::{BoxFuture, Either}; -use futures::stream::StreamExt; -use futures::{future, ready, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; -use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}; -use libp2p_core::{Endpoint, Multiaddr, Transport}; +use std::{ + future::Future, + num::NonZeroU8, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc, + future, + future::{BoxFuture, Either}, + ready, + stream::StreamExt, + AsyncReadExt, + AsyncWriteExt, + FutureExt, + SinkExt, +}; +use libp2p_core::{ + muxing::{StreamMuxerBox, StreamMuxerExt}, + transport::{Boxed, DialOpts, ListenerId, PortUse, TransportEvent}, + Endpoint, + Multiaddr, + Transport, +}; use libp2p_identity::PeerId; use libp2p_webrtc as webrtc; use rand::{thread_rng, RngCore}; -use std::future::Future; -use std::num::NonZeroU8; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; use tracing_subscriber::EnvFilter; #[tokio::test] diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 17b07c71c0a..ae76a58ea7d 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -7,8 +7,8 @@ // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -22,21 +22,25 @@ mod web_context; +use std::{ + cmp::min, + pin::Pin, + rc::Rc, + sync::{ + atomic::{AtomicBool, Ordering}, + Mutex, + }, + task::{Context, Poll}, +}; + use bytes::BytesMut; -use futures::task::AtomicWaker; -use futures::{future::Ready, io, prelude::*}; +use futures::{future::Ready, io, prelude::*, task::AtomicWaker}; use js_sys::Array; -use libp2p_core::transport::DialOpts; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, - transport::{ListenerId, TransportError, TransportEvent}, + transport::{DialOpts, ListenerId, TransportError, TransportEvent}, }; use send_wrapper::SendWrapper; -use std::cmp::min; -use std::rc::Rc; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::{pin::Pin, task::Context, task::Poll}; use wasm_bindgen::prelude::*; use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; @@ -60,13 +64,13 @@ use crate::web_context::WebContext; /// .multiplex(yamux::Config::default()) /// .boxed(); /// ``` -/// #[derive(Default)] pub struct Transport { _private: (), } -/// Arbitrary, maximum amount we are willing to buffer before we throttle our user. +/// Arbitrary, maximum amount we are willing to buffer before we throttle our +/// user. const MAX_BUFFER: usize = 1024 * 1024; impl libp2p_core::Transport for Transport { @@ -174,7 +178,8 @@ struct Inner { /// Waker for when we are waiting for the WebSocket to be opened. open_waker: Rc, - /// Waker for when we are waiting to write (again) to the WebSocket because we previously exceeded the [`MAX_BUFFER`] threshold. + /// Waker for when we are waiting to write (again) to the WebSocket because + /// we previously exceeded the [`MAX_BUFFER`] threshold. write_waker: Rc, /// Waker for when we are waiting for the WebSocket to be closed. @@ -306,7 +311,8 @@ impl Connection { .expect("to have a window or worker context") .set_interval_with_callback_and_timeout_and_arguments( on_buffered_amount_low_closure.as_ref().unchecked_ref(), - 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. + 100, /* Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws + * transport, no further effort was invested at the time. */ &Array::new(), ) .expect("to be able to set an interval"); @@ -432,7 +438,8 @@ impl AsyncWrite for Connection { impl Drop for Connection { fn drop(&mut self) { - // Unset event listeners, as otherwise they will be called by JS after the handlers have already been dropped. + // Unset event listeners, as otherwise they will be called by JS after the + // handlers have already been dropped. self.inner.socket.set_onclose(None); self.inner.socket.set_onerror(None); self.inner.socket.set_onopen(None); @@ -456,9 +463,10 @@ impl Drop for Connection { #[cfg(test)] mod tests { - use super::*; use libp2p_identity::PeerId; + use super::*; + #[test] fn extract_url() { let peer_id = PeerId::random(); diff --git a/transports/websocket/src/error.rs b/transports/websocket/src/error.rs index 7dc22331bcd..efab95a7621 100644 --- a/transports/websocket/src/error.rs +++ b/transports/websocket/src/error.rs @@ -18,10 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::tls; -use libp2p_core::Multiaddr; use std::{error, fmt}; +use libp2p_core::Multiaddr; + +use crate::tls; + /// Error in WebSockets. #[derive(Debug)] pub enum Error { diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 259be6a68f8..827a1864818 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,11 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{error::Error, quicksink, tls}; +use std::{ + borrow::Cow, + collections::HashMap, + fmt, + io, + mem, + net::IpAddr, + ops::DerefMut, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; -use futures_rustls::rustls::pki_types::ServerName; -use futures_rustls::{client, server}; +use futures_rustls::{client, rustls::pki_types::ServerName, server}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, transport::{DialOpts, ListenerId, TransportError, TransportEvent}, @@ -33,12 +44,10 @@ use soketto::{ connection::{self, CloseReason}, handshake, }; -use std::borrow::Cow; -use std::net::IpAddr; -use std::{collections::HashMap, ops::DerefMut, sync::Arc}; -use std::{fmt, io, mem, pin::Pin, task::Context, task::Poll}; use url::Url; +use crate::{error::Error, quicksink, tls}; + /// Max. number of payload bytes of a single frame. const MAX_DATA_SIZE: usize = 256 * 1024 * 1024; @@ -809,10 +818,12 @@ where #[cfg(test)] mod tests { - use super::*; - use libp2p_identity::PeerId; use std::io; + use libp2p_identity::PeerId; + + use super::*; + #[test] fn listen_addr() { let tcp_addr = "/ip4/0.0.0.0/tcp/2222".parse::().unwrap(); diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index cbc923613dd..9a48a477624 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -27,6 +27,12 @@ pub mod framed; mod quicksink; pub mod tls; +use std::{ + io, + pin::Pin, + task::{Context, Poll}, +}; + use error::Error; use framed::{Connection, Incoming}; use futures::{future::BoxFuture, prelude::*, ready}; @@ -37,25 +43,23 @@ use libp2p_core::{ Transport, }; use rw_stream_sink::RwStreamSink; -use std::{ - io, - pin::Pin, - task::{Context, Poll}, -}; /// A Websocket transport. /// -/// DO NOT wrap this transport with a DNS transport if you want Secure Websockets to work. +/// DO NOT wrap this transport with a DNS transport if you want Secure +/// Websockets to work. /// -/// A Secure Websocket transport needs to wrap DNS transport to resolve domain names after -/// they are checked against the remote certificates. Use a combination of DNS and TCP transports -/// to build a Secure Websocket transport. +/// A Secure Websocket transport needs to wrap DNS transport to resolve domain +/// names after they are checked against the remote certificates. Use a +/// combination of DNS and TCP transports to build a Secure Websocket transport. /// -/// If you don't need Secure Websocket's support, use a plain TCP transport as an inner transport. +/// If you don't need Secure Websocket's support, use a plain TCP transport as +/// an inner transport. /// /// # Dependencies /// -/// This transport requires the `zlib` shared library to be installed on the system. +/// This transport requires the `zlib` shared library to be installed on the +/// system. /// /// Future releases might lift this requirement, see . /// @@ -75,18 +79,28 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new(dns::async_std::Transport::system( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ).await.unwrap()); +/// let mut transport = websocket::WsConfig::new( +/// dns::async_std::Transport::system(tcp::async_io::Transport::new(tcp::Config::default())) +/// .await +/// .unwrap(), +/// ); /// /// let rcgen_cert = generate_simple_self_signed(vec!["localhost".to_string()]).unwrap(); /// let priv_key = websocket::tls::PrivateKey::new(rcgen_cert.serialize_private_key_der()); /// let cert = websocket::tls::Certificate::new(rcgen_cert.serialize_der().unwrap()); /// transport.set_tls_config(websocket::tls::Config::new(priv_key, vec![cert]).unwrap()); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/tls/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -105,13 +119,20 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new( -/// tcp::async_io::Transport::new(tcp::Config::default()), -/// ); +/// let mut transport = +/// websocket::WsConfig::new(tcp::async_io::Transport::new(tcp::Config::default())); /// -/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()).unwrap(); +/// let id = transport +/// .listen_on( +/// ListenerId::next(), +/// "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(), +/// ) +/// .unwrap(); /// -/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); +/// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)) +/// .await +/// .into_new_address() +/// .unwrap(); /// println!("Listening on {addr}"); /// /// # } @@ -218,7 +239,8 @@ where } } -/// Type alias corresponding to `framed::WsConfig::Dial` and `framed::WsConfig::ListenerUpgrade`. +/// Type alias corresponding to `framed::WsConfig::Dial` and +/// `framed::WsConfig::ListenerUpgrade`. pub type InnerFuture = BoxFuture<'static, Result, Error>>; /// Function type that wraps a websocket connection (see. `wrap_connection`). @@ -279,20 +301,24 @@ where } } -// Tests ////////////////////////////////////////////////////////////////////////////////////////// +// Tests /////////////////////////////////////////////////////////////////////// +// /////////////////// #[cfg(test)] mod tests { - use super::WsConfig; use futures::prelude::*; use libp2p_core::{ multiaddr::Protocol, transport::{DialOpts, ListenerId, PortUse}, - Endpoint, Multiaddr, Transport, + Endpoint, + Multiaddr, + Transport, }; use libp2p_identity::PeerId; use libp2p_tcp as tcp; + use super::WsConfig; + #[test] fn dialer_connects_to_listener_ipv4() { let a = "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap(); diff --git a/transports/websocket/src/quicksink.rs b/transports/websocket/src/quicksink.rs index 4f620536ea1..a0e2fb8b0f6 100644 --- a/transports/websocket/src/quicksink.rs +++ b/transports/websocket/src/quicksink.rs @@ -19,26 +19,28 @@ // ```no_run // use async_std::io; // use futures::prelude::*; +// // use crate::quicksink::Action; // // crate::quicksink::make_sink(io::stdout(), |mut stdout, action| async move { // match action { // Action::Send(x) => stdout.write_all(x).await?, // Action::Flush => stdout.flush().await?, -// Action::Close => stdout.close().await? +// Action::Close => stdout.close().await?, // } // Ok::<_, io::Error>(stdout) // }); // ``` -use futures::{ready, sink::Sink}; -use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; +use futures::{ready, sink::Sink}; +use pin_project_lite::pin_project; + /// Returns a `Sink` impl based on the initial value and the given closure. /// /// The closure will be applied to the initial value and an [`Action`] that @@ -291,10 +293,11 @@ where #[cfg(test)] mod tests { - use crate::quicksink::{make_sink, Action}; use async_std::{io, task}; use futures::{channel::mpsc, prelude::*}; + use crate::quicksink::{make_sink, Action}; + #[test] fn smoke_test() { task::block_on(async { diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 77090e21675..c5f6e9f5c2d 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -18,9 +18,10 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; use std::{fmt, io, sync::Arc}; +use futures_rustls::{rustls, TlsAcceptor, TlsConnector}; + /// TLS configuration. #[derive(Clone)] pub struct Config { @@ -38,7 +39,8 @@ impl fmt::Debug for Config { pub struct PrivateKey(rustls::pki_types::PrivateKeyDer<'static>); impl PrivateKey { - /// Assert the given bytes are DER-encoded ASN.1 in either PKCS#8 or PKCS#1 format. + /// Assert the given bytes are DER-encoded ASN.1 in either PKCS#8 or PKCS#1 + /// format. pub fn new(bytes: Vec) -> Self { PrivateKey( rustls::pki_types::PrivateKeyDer::try_from(bytes) @@ -65,7 +67,8 @@ impl Certificate { } impl Config { - /// Create a new TLS configuration with the given server key and certificate chain. + /// Create a new TLS configuration with the given server key and certificate + /// chain. pub fn new(key: PrivateKey, certs: I) -> Result where I: IntoIterator, @@ -164,7 +167,8 @@ pub(crate) fn dns_name_ref(name: &str) -> Result Multihash<64> { let (_base, bytes) = multibase::decode(s).unwrap(); Multihash::from_bytes(&bytes).unwrap() @@ -159,7 +163,13 @@ mod tests { #[test] fn valid_webtransport_multiaddr() { - let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let addr = Multiaddr::from_str( + "/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/certhash/\ + uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/\ + uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/\ + 12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG", + ) + .unwrap(); let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); assert_eq!(endpoint.host, "127.0.0.1"); @@ -187,7 +197,11 @@ mod tests { #[test] fn valid_webtransport_multiaddr_without_certhashes() { - let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let addr = Multiaddr::from_str( + "/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/p2p/\ + 12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG", + ) + .unwrap(); let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); assert_eq!(endpoint.host, "127.0.0.1"); @@ -201,7 +215,13 @@ mod tests { #[test] fn ipv6_webtransport() { - let addr = Multiaddr::from_str("/ip6/::1/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let addr = Multiaddr::from_str( + "/ip6/::1/udp/44874/quic-v1/webtransport/certhash/\ + uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/\ + uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/\ + 12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG", + ) + .unwrap(); let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); assert_eq!(endpoint.host, "::1"); @@ -214,7 +234,13 @@ mod tests { #[test] fn dns_webtransport() { - let addr = Multiaddr::from_str("/dns/libp2p.io/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let addr = Multiaddr::from_str( + "/dns/libp2p.io/udp/44874/quic-v1/webtransport/certhash/\ + uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/\ + uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/\ + 12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG", + ) + .unwrap(); let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); assert_eq!(endpoint.host, "libp2p.io"); diff --git a/transports/webtransport-websys/src/fused_js_promise.rs b/transports/webtransport-websys/src/fused_js_promise.rs index 0ba846501c2..d3d3858a553 100644 --- a/transports/webtransport-websys/src/fused_js_promise.rs +++ b/transports/webtransport-websys/src/fused_js_promise.rs @@ -1,8 +1,11 @@ +use std::{ + future::Future, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::FutureExt; use js_sys::Promise; -use std::future::Future; -use std::pin::Pin; -use std::task::{ready, Context, Poll}; use wasm_bindgen::JsValue; use wasm_bindgen_futures::JsFuture; diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs index f9c59694fa3..94c2ef54020 100644 --- a/transports/webtransport-websys/src/lib.rs +++ b/transports/webtransport-websys/src/lib.rs @@ -9,7 +9,9 @@ mod stream; mod transport; mod utils; -pub use self::connection::Connection; -pub use self::error::Error; -pub use self::stream::Stream; -pub use self::transport::{Config, Transport}; +pub use self::{ + connection::Connection, + error::Error, + stream::Stream, + transport::{Config, Transport}, +}; diff --git a/transports/webtransport-websys/src/stream.rs b/transports/webtransport-websys/src/stream.rs index ba4238ac814..b9d1669b6dc 100644 --- a/transports/webtransport-websys/src/stream.rs +++ b/transports/webtransport-websys/src/stream.rs @@ -1,16 +1,20 @@ +use std::{ + io, + pin::Pin, + task::{ready, Context, Poll}, +}; + use futures::{AsyncRead, AsyncWrite, FutureExt}; use js_sys::Uint8Array; use send_wrapper::SendWrapper; -use std::io; -use std::pin::Pin; -use std::task::ready; -use std::task::{Context, Poll}; use web_sys::{ReadableStreamDefaultReader, WritableStreamDefaultWriter}; -use crate::bindings::WebTransportBidirectionalStream; -use crate::fused_js_promise::FusedJsPromise; -use crate::utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}; -use crate::Error; +use crate::{ + bindings::WebTransportBidirectionalStream, + fused_js_promise::FusedJsPromise, + utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}, + Error, +}; /// A stream on a connection. #[derive(Debug)] diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index 6a9a9dad954..ecdd0bd179d 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -1,24 +1,26 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + use futures::future::FutureExt; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::transport::{ - Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent, +use libp2p_core::{ + muxing::StreamMuxerBox, + transport::{Boxed, DialOpts, ListenerId, Transport as _, TransportError, TransportEvent}, }; use libp2p_identity::{Keypair, PeerId}; use multiaddr::Multiaddr; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use crate::endpoint::Endpoint; -use crate::Connection; -use crate::Error; +use crate::{endpoint::Endpoint, Connection, Error}; /// Config for the [`Transport`]. pub struct Config { keypair: Keypair, } -/// A WebTransport [`Transport`](libp2p_core::Transport) that works with `web-sys`. +/// A WebTransport [`Transport`](libp2p_core::Transport) that works with +/// `web-sys`. pub struct Transport { config: Config, } diff --git a/transports/webtransport-websys/src/utils.rs b/transports/webtransport-websys/src/utils.rs index 0b3550e5b5b..df59ee15161 100644 --- a/transports/webtransport-websys/src/utils.rs +++ b/transports/webtransport-websys/src/utils.rs @@ -1,7 +1,8 @@ +use std::io; + use js_sys::{Promise, Reflect}; use once_cell::sync::Lazy; use send_wrapper::SendWrapper; -use std::io; use wasm_bindgen::{JsCast, JsValue}; use crate::Error; @@ -17,7 +18,6 @@ static DO_NOTHING: Lazy> = Lazy::new(|| { /// A promise always runs in the background, however if you don't await it, /// or specify a `catch` handler before you drop it, it might cause some side /// effects. This function avoids any side effects. -// // Ref: https://github.com/typescript-eslint/typescript-eslint/blob/391a6702c0a9b5b3874a7a27047f2a721f090fb6/packages/eslint-plugin/docs/rules/no-floating-promises.md pub(crate) fn detach_promise(promise: Promise) { // Avoid having "floating" promise and ignore any errors. @@ -50,7 +50,6 @@ where } /// Parse response from `ReadableStreamDefaultReader::read`. -// // Ref: https://streams.spec.whatwg.org/#default-reader-prototype pub(crate) fn parse_reader_response(resp: &JsValue) -> Result, JsValue> { let value = Reflect::get(resp, &JsValue::from_str("value"))?; diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs index 938cdf0b3e1..c0caf16a909 100644 --- a/wasm-tests/webtransport-tests/src/lib.rs +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -1,15 +1,18 @@ -use futures::channel::oneshot; -use futures::{AsyncReadExt, AsyncWriteExt}; +use std::{future::poll_fn, pin::Pin}; + +use futures::{channel::oneshot, AsyncReadExt, AsyncWriteExt}; use getrandom::getrandom; -use libp2p_core::transport::{DialOpts, PortUse}; -use libp2p_core::{Endpoint, StreamMuxer, Transport as _}; +use libp2p_core::{ + transport::{DialOpts, PortUse}, + Endpoint, + StreamMuxer, + Transport as _, +}; use libp2p_identity::{Keypair, PeerId}; use libp2p_noise as noise; use libp2p_webtransport_websys::{Config, Connection, Error, Stream, Transport}; use multiaddr::{Multiaddr, Protocol}; use multihash::Multihash; -use std::future::poll_fn; -use std::pin::Pin; use wasm_bindgen::JsCast; use wasm_bindgen_futures::{spawn_local, JsFuture}; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure};