From 7d5e6068681c35c925af4681255ec835a7f306ae Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 13 Mar 2023 19:06:40 +0800 Subject: [PATCH 01/83] feat: make mdns::Event clone-able --- protocols/mdns/src/behaviour.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index b34a1a73629..f2709e90bdf 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -339,7 +339,7 @@ where } /// Event that can be produced by the `Mdns` behaviour. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum Event { /// Discovered nodes through mDNS. Discovered(DiscoveredAddrsIter), @@ -352,6 +352,7 @@ pub enum Event { } /// Iterator that produces the list of addresses that have been discovered. +#[derive(Clone)] pub struct DiscoveredAddrsIter { inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, } @@ -379,6 +380,7 @@ impl fmt::Debug for DiscoveredAddrsIter { } /// Iterator that produces the list of addresses that have expired. +#[derive(Clone)] pub struct ExpiredAddrsIter { inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, } From a72e5e2f1edf92324268b837a63fd5b9f00e5373 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 13 Mar 2023 20:23:28 +0800 Subject: [PATCH 02/83] docs: filling CHANGELOG for PR 3606 Unreleased patch version 0.43.1 bumped. Added entry for PR 3606: Deriving 'Clone' for 'mdns::Event' --- protocols/mdns/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index c2b548e3389..62a361a0608 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.43.1 [unreleased] + +- Deriving `Clone` for `mdns::Event`. See [PR 3606]. + # 0.43.0 - Update to `libp2p-core` `v0.39.0`. From 926c7ca99cd5c2de45d6610da45a41ab418ebe3b Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 14 Mar 2023 03:17:21 +1100 Subject: [PATCH 03/83] Update protocols/mdns/CHANGELOG.md --- protocols/mdns/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 62a361a0608..c877ca48ccb 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -2,6 +2,7 @@ - Deriving `Clone` for `mdns::Event`. See [PR 3606]. +[PR 3606]: https://github.com/libp2p/rust-libp2p/pull/3606 # 0.43.0 - Update to `libp2p-core` `v0.39.0`. From 49d26c4795a45471fc67af3e6bca6477faf86244 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 14 Mar 2023 03:17:36 +1100 Subject: [PATCH 04/83] Update protocols/mdns/CHANGELOG.md --- protocols/mdns/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index c877ca48ccb..0c3b88c018c 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,6 +1,6 @@ # 0.43.1 [unreleased] -- Deriving `Clone` for `mdns::Event`. See [PR 3606]. +- Derive `Clone` for `mdns::Event`. See [PR 3606]. [PR 3606]: https://github.com/libp2p/rust-libp2p/pull/3606 # 0.43.0 From a8ff1eb46a336807fb06805a88ccbbde335591f3 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 16 Mar 2023 10:46:39 +0800 Subject: [PATCH 05/83] feat: change 'mdns::Event' to hold 'SmallVec' --- protocols/mdns/src/behaviour.rs | 68 ++------------------------------- 1 file changed, 4 insertions(+), 64 deletions(-) diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index f2709e90bdf..6f590c49882 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -304,9 +304,7 @@ where } } if !discovered.is_empty() { - let event = Event::Discovered(DiscoveredAddrsIter { - inner: discovered.into_iter(), - }); + let event = Event::Discovered(discovered); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); } // Emit expired event. @@ -323,9 +321,7 @@ where true }); if !expired.is_empty() { - let event = Event::Expired(ExpiredAddrsIter { - inner: expired.into_iter(), - }); + let event = Event::Expired(expired); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); } if let Some(closest_expiration) = closest_expiration { @@ -342,67 +338,11 @@ where #[derive(Debug, Clone)] pub enum Event { /// Discovered nodes through mDNS. - Discovered(DiscoveredAddrsIter), + Discovered(SmallVec<[(PeerId, Multiaddr); 4]>), /// The given combinations of `PeerId` and `Multiaddr` have expired. /// /// Each discovered record has a time-to-live. When this TTL expires and the address hasn't /// been refreshed, we remove it from the list and emit it as an `Expired` event. - Expired(ExpiredAddrsIter), -} - -/// Iterator that produces the list of addresses that have been discovered. -#[derive(Clone)] -pub struct DiscoveredAddrsIter { - inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, -} - -impl Iterator for DiscoveredAddrsIter { - type Item = (PeerId, Multiaddr); - - #[inline] - fn next(&mut self) -> Option { - self.inner.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl ExactSizeIterator for DiscoveredAddrsIter {} - -impl fmt::Debug for DiscoveredAddrsIter { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("DiscoveredAddrsIter").finish() - } -} - -/// Iterator that produces the list of addresses that have expired. -#[derive(Clone)] -pub struct ExpiredAddrsIter { - inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, -} - -impl Iterator for ExpiredAddrsIter { - type Item = (PeerId, Multiaddr); - - #[inline] - fn next(&mut self) -> Option { - self.inner.next() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -impl ExactSizeIterator for ExpiredAddrsIter {} - -impl fmt::Debug for ExpiredAddrsIter { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("ExpiredAddrsIter").finish() - } + Expired(SmallVec<[(PeerId, Multiaddr); 4]>), } From 4cec5fac84941c95cb50a35a3dc7395e2f71bf3c Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 16 Mar 2023 10:48:05 +0800 Subject: [PATCH 06/83] test: adapting test to 'mdns::Event' change --- protocols/mdns/tests/use-async-std.rs | 20 ++++++++++---------- protocols/mdns/tests/use-tokio.rs | 16 ++++++++-------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 139fcca1d50..bfc3cd1201d 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -61,13 +61,13 @@ async fn test_expired_async_std() { loop { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Expired(mut peers), _)) => { - if peers.any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Expired(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == b_peer_id) { return; } } - Either::Right((Event::Expired(mut peers), _)) => { - if peers.any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Expired(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == a_peer_id) { return; } } @@ -93,8 +93,8 @@ async fn test_no_expiration_on_close_async_std() { // 1. Connect via address from mDNS event loop { - if let Event::Discovered(mut peers) = a.next_behaviour_event().await { - if let Some((_, addr)) = peers.find(|(p, _)| p == &b_peer_id) { + if let Event::Discovered(peers) = a.next_behaviour_event().await { + if let Some((_, addr)) = peers.into_iter().find(|(p, _)| p == &b_peer_id) { a.dial_and_wait(addr).await; break; } @@ -130,13 +130,13 @@ async fn run_discovery_test(config: Config) { while !discovered_a && !discovered_b { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Discovered(mut peers), _)) => { - if peers.any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Discovered(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == b_peer_id) { discovered_b = true; } } - Either::Right((Event::Discovered(mut peers), _)) => { - if peers.any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Discovered(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == a_peer_id) { discovered_a = true; } } diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index e18ae28fee7..229418437f4 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -59,13 +59,13 @@ async fn test_expired_tokio() { loop { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Expired(mut peers), _)) => { - if peers.any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Expired(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == b_peer_id) { return; } } - Either::Right((Event::Expired(mut peers), _)) => { - if peers.any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Expired(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == a_peer_id) { return; } } @@ -86,13 +86,13 @@ async fn run_discovery_test(config: Config) { while !discovered_a && !discovered_b { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Discovered(mut peers), _)) => { - if peers.any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Discovered(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == b_peer_id) { discovered_b = true; } } - Either::Right((Event::Discovered(mut peers), _)) => { - if peers.any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Discovered(peers), _)) => { + if peers.into_iter().any(|(p, _)| p == a_peer_id) { discovered_a = true; } } From 18811d24ae748ca703f51ef66bdcb3961402965c Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 16 Mar 2023 17:30:41 +0800 Subject: [PATCH 07/83] feat(mdns): change 'mdns::Event' to hold 'Vec' --- protocols/mdns/src/behaviour.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 6f590c49882..f175a94aa06 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -285,7 +285,7 @@ where } } // Emit discovered event. - let mut discovered = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); + let mut discovered = Vec::new(); for iface_state in self.iface_states.values_mut() { while let Poll::Ready((peer, addr, expiration)) = iface_state.poll(cx, &self.listen_addresses) @@ -310,7 +310,7 @@ where // Emit expired event. let now = Instant::now(); let mut closest_expiration = None; - let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); + let mut expired = Vec::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { log::info!("expired: {} {}", peer, addr); @@ -338,11 +338,11 @@ where #[derive(Debug, Clone)] pub enum Event { /// Discovered nodes through mDNS. - Discovered(SmallVec<[(PeerId, Multiaddr); 4]>), + Discovered(Vec<(PeerId, Multiaddr)>), /// The given combinations of `PeerId` and `Multiaddr` have expired. /// /// Each discovered record has a time-to-live. When this TTL expires and the address hasn't /// been refreshed, we remove it from the list and emit it as an `Expired` event. - Expired(SmallVec<[(PeerId, Multiaddr); 4]>), + Expired(Vec<(PeerId, Multiaddr)>), } From abdce7ecbb16256bcad5118fd0d05ce644f01d5f Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 16 Mar 2023 17:39:50 +0800 Subject: [PATCH 08/83] doc: add entry for #3621 New unreleased minor version 0.44.0. --- protocols/mdns/CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 0c3b88c018c..6225dc376e5 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.44.0 [unreleased] + +- Change `mdns::Event` to hold `Vec`. See [PR 3621] + +[PR 3621]: https://github.com/libp2p/rust-libp2p/pull/3621 + # 0.43.1 [unreleased] - Derive `Clone` for `mdns::Event`. See [PR 3606]. From d5c246cc265e703d4f8af76f8c1de370a3f8c821 Mon Sep 17 00:00:00 2001 From: DrHuangMHT Date: Thu, 16 Mar 2023 22:05:01 +0800 Subject: [PATCH 09/83] Update protocols/mdns/CHANGELOG.md Co-authored-by: Thomas Eizinger --- protocols/mdns/CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 6225dc376e5..04a8162e065 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,6 +1,7 @@ # 0.44.0 [unreleased] -- Change `mdns::Event` to hold `Vec`. See [PR 3621] +- Change `mdns::Event` to hold `Vec` and remove `DiscoveredAddrsIter` and `ExpiredAddrsIter`. + See [PR 3621]. [PR 3621]: https://github.com/libp2p/rust-libp2p/pull/3621 From 9d45cf3d8c91221333f91bee67b675e0b043c623 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Sun, 26 Mar 2023 19:36:50 +0800 Subject: [PATCH 10/83] Remove conflict marker to resolve conflicts --- protocols/mdns/src/behaviour.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 56b9f9af144..5186ce91cb7 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -304,15 +304,8 @@ where } } if !discovered.is_empty() { -<<<<<<< HEAD let event = Event::Discovered(discovered); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); -======= - let event = Event::Discovered(DiscoveredAddrsIter { - inner: discovered.into_iter(), - }); return Poll::Ready(ToSwarm::GenerateEvent(event)); ->>>>>>> upstream/master } // Emit expired event. let now = Instant::now(); @@ -328,15 +321,8 @@ where true }); if !expired.is_empty() { -<<<<<<< HEAD let event = Event::Expired(expired); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); -======= - let event = Event::Expired(ExpiredAddrsIter { - inner: expired.into_iter(), - }); return Poll::Ready(ToSwarm::GenerateEvent(event)); ->>>>>>> upstream/master } if let Some(closest_expiration) = closest_expiration { let mut timer = P::Timer::at(closest_expiration); From 947b368a5c8bbf6e7f68e383f32254a441316bdb Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 11:26:31 +0800 Subject: [PATCH 11/83] add 'try' to method returning 'Result' --- identity/src/ecdsa.rs | 26 +++- identity/src/ed25519.rs | 16 +- identity/src/error.rs | 26 +--- identity/src/keypair.rs | 308 ++++++++++++++++++++++---------------- identity/src/lib.rs | 2 + identity/src/rsa.rs | 92 +++++++++--- identity/src/secp256k1.rs | 19 ++- 7 files changed, 293 insertions(+), 196 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 27063a2c57f..a3fecd12fce 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -34,7 +34,7 @@ use p256::{ use void::Void; /// An ECDSA keypair. -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct Keypair { secret: SecretKey, public: PublicKey, @@ -60,6 +60,11 @@ impl Keypair { pub fn secret(&self) -> &SecretKey { &self.secret } + + pub fn try_from_bytes(pk:impl AsRef<[u8]>) -> Result{ + let secret_key = SecretKey::try_from_bytes(pk)?; + Ok(secret_key.into()) + } } impl fmt::Debug for Keypair { @@ -86,7 +91,7 @@ impl From for SecretKey { } /// An ECDSA secret key. -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct SecretKey(SigningKey); impl SecretKey { @@ -108,8 +113,8 @@ impl SecretKey { } /// Decode a secret key from a byte buffer. - pub fn from_bytes(buf: &[u8]) -> Result { - SigningKey::from_bytes(buf) + pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { + SigningKey::from_bytes(buf.as_ref()) .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) .map(SecretKey) } @@ -136,7 +141,7 @@ impl PublicKey { } /// Decode a public key from a byte buffer without compression. - pub fn from_bytes(k: &[u8]) -> Result { + pub fn try_from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; @@ -157,11 +162,11 @@ impl PublicKey { } /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. - pub fn decode_der(k: &[u8]) -> Result { + pub fn try_decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { DecodingError::failed_to_parse::("ASN.1-encoded ecdsa p256 public key", None) })?; - Self::from_bytes(buf) + Self::try_from_bytes(buf) } // ecPublicKey (ANSI X9.62 public key type) OID: 1.2.840.10045.2.1 @@ -258,4 +263,11 @@ mod tests { let invalid_msg = "h3ll0 w0rld".as_bytes(); assert!(!pk.verify(invalid_msg, &sig)); } + + #[test] + fn serialize_deserialize(){ + let pair = Keypair::generate(); + let bytes_secret = pair.secret().to_bytes(); + assert_eq!(Keypair::try_from_bytes(bytes_secret).unwrap(),pair) + } } diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 64c54a2f9f8..bff31a61c1a 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -49,7 +49,7 @@ impl Keypair { /// produced by [`Keypair::encode`], zeroing the input on success. /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. - pub fn decode(kp: &mut [u8]) -> Result { + pub fn try_decode(kp: &mut [u8]) -> Result { ed25519::Keypair::from_bytes(kp) .map(|k| { kp.zeroize(); @@ -70,7 +70,7 @@ impl Keypair { /// Get the secret key of this keypair. pub fn secret(&self) -> SecretKey { - SecretKey::from_bytes(&mut self.0.secret.to_bytes()) + SecretKey::try_from_bytes(&mut self.0.secret.to_bytes()) .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") } } @@ -86,7 +86,7 @@ impl fmt::Debug for Keypair { impl Clone for Keypair { fn clone(&self) -> Keypair { let mut sk_bytes = self.0.secret.to_bytes(); - let secret = SecretKey::from_bytes(&mut sk_bytes) + let secret = SecretKey::try_from_bytes(&mut sk_bytes) .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") .0; let public = ed25519::PublicKey::from_bytes(&self.0.public.to_bytes()) @@ -167,7 +167,7 @@ impl PublicKey { } /// Decode a public key from a byte array as produced by `encode`. - pub fn decode(k: &[u8]) -> Result { + pub fn try_decode(k: &[u8]) -> Result { ed25519::PublicKey::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e)) .map(PublicKey) @@ -187,7 +187,7 @@ impl AsRef<[u8]> for SecretKey { impl Clone for SecretKey { fn clone(&self) -> SecretKey { let mut sk_bytes = self.0.to_bytes(); - Self::from_bytes(&mut sk_bytes).expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") + Self::try_from_bytes(&mut sk_bytes).expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") } } @@ -212,7 +212,7 @@ impl SecretKey { /// Create an Ed25519 secret key from a byte slice, zeroing the input on success. /// If the bytes do not constitute a valid Ed25519 secret key, an error is /// returned. - pub fn from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { + pub fn try_from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { let sk_bytes = sk_bytes.as_mut(); let secret = ed25519::SecretKey::from_bytes(&*sk_bytes) .map_err(|e| DecodingError::failed_to_parse("Ed25519 secret key", e))?; @@ -235,7 +235,7 @@ mod tests { fn prop() -> bool { let kp1 = Keypair::generate(); let mut kp1_enc = kp1.encode(); - let kp2 = Keypair::decode(&mut kp1_enc).unwrap(); + let kp2 = Keypair::try_decode(&mut kp1_enc).unwrap(); eq_keypairs(&kp1, &kp2) && kp1_enc.iter().all(|b| *b == 0) } QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); @@ -246,7 +246,7 @@ mod tests { fn prop() -> bool { let kp1 = Keypair::generate(); let mut sk = kp1.0.secret.to_bytes(); - let kp2 = Keypair::from(SecretKey::from_bytes(&mut sk).unwrap()); + let kp2 = Keypair::from(SecretKey::try_from_bytes(&mut sk).unwrap()); eq_keypairs(&kp1, &kp2) && sk == [0u8; 32] } QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); diff --git a/identity/src/error.rs b/identity/src/error.rs index aa12fa9d7dc..b9f47646830 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -31,13 +31,6 @@ pub struct DecodingError { } impl DecodingError { - #[cfg(not(all( - feature = "ecdsa", - feature = "rsa", - feature = "secp256k1", - feature = "ed25519", - not(target_arch = "wasm32") - )))] pub(crate) fn missing_feature(feature_name: &'static str) -> Self { Self { msg: format!("cargo feature `{feature_name}` is not enabled"), @@ -81,6 +74,7 @@ impl DecodingError { } } + #[allow(unused)] pub(crate) fn decoding_unsupported(key_type: &'static str) -> Self { Self { msg: format!("decoding {key_type} key from Protobuf is unsupported"), @@ -88,11 +82,7 @@ impl DecodingError { } } - #[cfg(any( - all(feature = "rsa", not(target_arch = "wasm32")), - feature = "secp256k1", - feature = "ecdsa" - ))] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { Self { msg: format!("encoding {key_type} key to Protobuf is unsupported"), @@ -123,18 +113,10 @@ pub struct SigningError { /// An error during encoding of key material. impl SigningError { #[cfg(any(feature = "secp256k1", feature = "rsa"))] - pub(crate) fn new(msg: S) -> Self { + pub(crate) fn new(msg: S, source: Option>) -> Self { Self { msg: msg.to_string(), - source: None, - } - } - - #[cfg(feature = "rsa")] - pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { - Self { - source: Some(Box::new(source)), - ..self + source, } } } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index e7e89ca5bd0..bad16f45251 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::error::{DecodingError, SigningError}; -use crate::proto; +use crate::{proto, KeyType}; use quick_protobuf::{BytesReader, Writer}; use std::convert::TryFrom; @@ -59,28 +59,28 @@ pub enum Keypair { #[cfg(feature = "ed25519")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::into_ed25519` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." )] Ed25519(ed25519::Keypair), /// An RSA keypair. #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::into_rsa` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." )] Rsa(rsa::Keypair), /// A Secp256k1 keypair. #[cfg(feature = "secp256k1")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::into_secp256k1` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." )] Secp256k1(secp256k1::Keypair), /// An ECDSA keypair. #[cfg(feature = "ecdsa")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::into_ecdsa` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." )] Ecdsa(ecdsa::Keypair), } @@ -107,42 +107,6 @@ impl Keypair { Keypair::Ecdsa(ecdsa::Keypair::generate()) } - #[cfg(feature = "ed25519")] - pub fn into_ed25519(self) -> Option { - #[allow(deprecated)] - match self { - Keypair::Ed25519(inner) => Some(inner), - _ => None, - } - } - - #[cfg(feature = "secp256k1")] - pub fn into_secp256k1(self) -> Option { - #[allow(deprecated)] - match self { - Keypair::Secp256k1(inner) => Some(inner), - _ => None, - } - } - - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub fn into_rsa(self) -> Option { - #[allow(deprecated)] - match self { - Keypair::Rsa(inner) => Some(inner), - _ => None, - } - } - - #[cfg(feature = "ecdsa")] - pub fn into_ecdsa(self) -> Option { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(inner) => Some(inner), - _ => None, - } - } - /// Decode an keypair from a DER-encoded secret key in PKCS#8 PrivateKeyInfo /// format (i.e. unencrypted) as defined in [RFC5208]. /// @@ -150,7 +114,7 @@ impl Keypair { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] - rsa::Keypair::from_pkcs8(pkcs8_der).map(Keypair::Rsa) + rsa::Keypair::try_decode(pkcs8_der).map(Keypair::Rsa) } /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey @@ -160,7 +124,7 @@ impl Keypair { #[cfg(feature = "secp256k1")] pub fn secp256k1_from_der(der: &mut [u8]) -> Result { #[allow(deprecated)] - secp256k1::SecretKey::from_der(der) + secp256k1::SecretKey::try_decode_der(der) .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) } @@ -168,7 +132,7 @@ impl Keypair { pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { #[allow(deprecated)] Ok(Keypair::Ed25519(ed25519::Keypair::from( - ed25519::SecretKey::from_bytes(bytes)?, + ed25519::SecretKey::try_from_bytes(bytes)?, ))) } @@ -213,22 +177,37 @@ impl Keypair { pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; - #[cfg(not(feature = "ed25519"))] - return Err(DecodingError::missing_feature("ed25519")); - #[allow(deprecated)] let pk: proto::PrivateKey = match self { #[cfg(feature = "ed25519")] - Self::Ed25519(data) => proto::PrivateKey { - Type: proto::KeyType::Ed25519, - Data: data.encode().to_vec(), - }, - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Self::Rsa(_) => return Err(DecodingError::encoding_unsupported("RSA")), - #[cfg(feature = "secp256k1")] - Self::Secp256k1(_) => return Err(DecodingError::encoding_unsupported("secp256k1")), - #[cfg(feature = "ecdsa")] - Self::Ecdsa(_) => return Err(DecodingError::encoding_unsupported("ECDSA")), + Self::Ed25519(data) => { + #[cfg(not(feature = "ed25519"))] + return Err(DecodingError::encoding_unsupported("ed25519")); + proto::PrivateKey { + Type: KeyType::Ed25519, + Data: data.encode().to_vec(), + } + } + Self::Rsa(_) => { + return Err(DecodingError::encoding_unsupported("RSA")); + } + Self::Secp256k1(data) => { + #[cfg(not(feature = "secp256k1"))] + return Err(DecodingError::encoding_unsupported("secp256k1")); + proto::PrivateKey{ + Type: KeyType::Secp256k1, + Data: data.secret().encode().into() + } + } + + Self::Ecdsa(data) => { + #[cfg(not(feature = "ecdsa"))] + return Err(DecodingError::encoding_unsupported("ECDSA")); + proto::PrivateKey{ + Type: KeyType::ECDSA, + Data: data.secret().to_bytes() + } + } }; let mut buf = Vec::with_capacity(pk.get_size()); @@ -248,30 +227,34 @@ impl Keypair { .map_err(|e| DecodingError::bad_protobuf("private key bytes", e)) .map(zeroize::Zeroizing::new)?; + #[allow(deprecated,unreachable_code)] match private_key.Type { - #[cfg(feature = "ed25519")] - proto::KeyType::Ed25519 => - { - #[allow(deprecated)] - ed25519::Keypair::decode(&mut private_key.Data).map(Keypair::Ed25519) + proto::KeyType::Ed25519 => { + #[cfg(feature = "ed25519")] + return ed25519::Keypair::try_decode(&mut private_key.Data).map(Keypair::Ed25519); + Err(DecodingError::missing_feature("ed25519")) + } + proto::KeyType::RSA => { + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + return rsa::Keypair::try_decode(&mut private_key.Data).map(Keypair::Rsa); + Err(DecodingError::missing_feature("rsa")) + } + proto::KeyType::Secp256k1 => { + #[cfg(feature = "secp256k1")] + return secp256k1::Keypair::try_from_bytes(&mut private_key.Data) + .map(Keypair::Secp256k1); + Err(DecodingError::missing_feature("secp256k1")) + } + proto::KeyType::ECDSA => { + #[cfg(feature = "ecdsa")] + return ecdsa::Keypair::try_from_bytes(&private_key.Data) + .map(Keypair::Ecdsa); + Err(DecodingError::missing_feature("ECDSA")) } - #[cfg(not(feature = "ed25519"))] - proto::KeyType::Ed25519 => Err(DecodingError::missing_feature("ed25519")), - proto::KeyType::RSA => Err(DecodingError::decoding_unsupported("RSA")), - proto::KeyType::Secp256k1 => Err(DecodingError::decoding_unsupported("secp256k1")), - proto::KeyType::ECDSA => Err(DecodingError::decoding_unsupported("ECDSA")), } } } -#[cfg(feature = "ecdsa")] -impl From for Keypair { - fn from(kp: ecdsa::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Ecdsa(kp) - } -} - #[cfg(feature = "ed25519")] impl From for Keypair { fn from(kp: ed25519::Keypair) -> Self { @@ -280,6 +263,14 @@ impl From for Keypair { } } +#[cfg(feature = "ecdsa")] +impl From for Keypair { + fn from(kp: ecdsa::Keypair) -> Self { + #[allow(deprecated)] + Keypair::Ecdsa(kp) + } +} + #[cfg(feature = "secp256k1")] impl From for Keypair { fn from(kp: secp256k1::Keypair) -> Self { @@ -296,6 +287,58 @@ impl From for Keypair { } } +#[cfg(feature = "ed25519")] +impl TryInto for Keypair { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + Keypair::Ed25519(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(feature = "ecdsa")] +impl TryInto for Keypair { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + Keypair::Ecdsa(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(feature = "secp256k1")] +impl TryInto for Keypair { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + Keypair::Secp256k1(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] +impl TryInto for Keypair { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + Keypair::Rsa(inner) => Ok(inner), + _ => Err(()), + } + } +} + /// The public key of a node's identity keypair. #[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum PublicKey { @@ -351,42 +394,6 @@ impl PublicKey { } } - #[cfg(feature = "ed25519")] - pub fn into_ed25519(self) -> Option { - #[allow(deprecated)] - match self { - PublicKey::Ed25519(inner) => Some(inner), - _ => None, - } - } - - #[cfg(feature = "secp256k1")] - pub fn into_secp256k1(self) -> Option { - #[allow(deprecated)] - match self { - PublicKey::Secp256k1(inner) => Some(inner), - _ => None, - } - } - - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub fn into_rsa(self) -> Option { - #[allow(deprecated)] - match self { - PublicKey::Rsa(inner) => Some(inner), - _ => None, - } - } - - #[cfg(feature = "ecdsa")] - pub fn into_ecdsa(self) -> Option { - #[allow(deprecated)] - match self { - PublicKey::Ecdsa(inner) => Some(inner), - _ => None, - } - } - /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn to_protobuf_encoding(&self) -> Vec { @@ -428,38 +435,29 @@ impl TryFrom for PublicKey { fn try_from(pubkey: proto::PublicKey) -> Result { #[allow(deprecated)] + #[allow(unreachable_code)] match pubkey.Type { - #[cfg(feature = "ed25519")] - proto::KeyType::Ed25519 => { - ed25519::PublicKey::decode(&pubkey.Data).map(PublicKey::Ed25519) - } - #[cfg(not(feature = "ed25519"))] proto::KeyType::Ed25519 => { + #[cfg(feature = "ed25519")] + return ed25519::PublicKey::try_decode(&pubkey.Data).map(PublicKey::Ed25519); log::debug!("support for ed25519 was disabled at compile-time"); Err(DecodingError::missing_feature("ed25519")) } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - proto::KeyType::RSA => rsa::PublicKey::decode_x509(&pubkey.Data).map(PublicKey::Rsa), - #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] proto::KeyType::RSA => { + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + return rsa::PublicKey::try_decode_x509(&pubkey.Data).map(PublicKey::Rsa); log::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::missing_feature("rsa")) } - #[cfg(feature = "secp256k1")] - proto::KeyType::Secp256k1 => { - secp256k1::PublicKey::decode(&pubkey.Data).map(PublicKey::Secp256k1) - } - #[cfg(not(feature = "secp256k1"))] proto::KeyType::Secp256k1 => { + #[cfg(feature = "secp256k1")] + return secp256k1::PublicKey::try_decode(&pubkey.Data).map(PublicKey::Secp256k1); log::debug!("support for secp256k1 was disabled at compile-time"); Err(DecodingError::missing_feature("secp256k1")) } - #[cfg(feature = "ecdsa")] - proto::KeyType::ECDSA => { - ecdsa::PublicKey::decode_der(&pubkey.Data).map(PublicKey::Ecdsa) - } - #[cfg(not(feature = "ecdsa"))] proto::KeyType::ECDSA => { + #[cfg(feature = "ecdsa")] + return ecdsa::PublicKey::try_decode_der(&pubkey.Data).map(PublicKey::Ecdsa); log::debug!("support for ECDSA was disabled at compile-time"); Err(DecodingError::missing_feature("ecdsa")) } @@ -467,6 +465,58 @@ impl TryFrom for PublicKey { } } +#[cfg(feature = "ed25519")] +impl TryInto for PublicKey { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + PublicKey::Ed25519(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(feature = "ecdsa")] +impl TryInto for PublicKey { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + PublicKey::Ecdsa(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(feature = "secp256k1")] +impl TryInto for PublicKey { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + PublicKey::Secp256k1(inner) => Ok(inner), + _ => Err(()), + } + } +} + +#[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] +impl TryInto for PublicKey { + type Error = (); + + fn try_into(self) -> Result { + match self { + #[allow(deprecated)] + PublicKey::Rsa(inner) => Ok(inner), + _ => Err(()), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 1f92b904bfe..54b2f7aaf0d 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -119,3 +119,5 @@ pub use error::{DecodingError, SigningError}; pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; + +pub type KeyType = proto::KeyType; \ No newline at end of file diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 12f5d75e7dd..c45c783ef18 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -31,42 +31,88 @@ use zeroize::Zeroize; /// An RSA keypair. #[derive(Clone)] -pub struct Keypair(Arc); +pub struct Keypair { + inner: Arc, + raw_key: Option>, +} impl std::fmt::Debug for Keypair { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("Keypair") - .field("public", self.0.public_key()) + .field("public", self.inner.public_key()) .finish() } } impl Keypair { - /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo - /// format (i.e. unencrypted) as defined in [RFC5208]. - /// - /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - pub fn from_pkcs8(der: &mut [u8]) -> Result { - let kp = RsaKeyPair::from_pkcs8(der) - .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; - der.zeroize(); - Ok(Keypair(Arc::new(kp))) - } - /// Get the public key from the keypair. pub fn public(&self) -> PublicKey { - PublicKey(self.0.public_key().as_ref().to_vec()) + PublicKey(self.inner.public_key().as_ref().to_vec()) } /// Sign a message with this keypair. pub fn sign(&self, data: &[u8]) -> Result, SigningError> { - let mut signature = vec![0; self.0.public_modulus_len()]; + let mut signature = vec![0; self.inner.public_modulus_len()]; let rng = SystemRandom::new(); - match self.0.sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) { + match self + .inner + .sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) + { Ok(()) => Ok(signature), - Err(e) => Err(SigningError::new("RSA").source(e)), + Err(e) => Err(SigningError::new("RSA",Some(Box::new(e)))), } } + + /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo + /// format (i.e. unencrypted) as defined in [RFC5208]. + /// Decoding from DER-encoded private key bytes is also supported. + /// + /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 + pub fn try_decode(bytes: &mut [u8]) -> Result { + let from_pkcs8_error = match RsaKeyPair::from_pkcs8(bytes) { + Ok(kp) => { + let kp = Self { + inner: Arc::new(kp), + raw_key: Some(bytes.to_vec()), + }; + bytes.zeroize(); + return Ok(kp); + } + Err(e) => e, + }; + let from_der_error = match RsaKeyPair::from_der(bytes) { + Ok(kp) => { + let kp = Self { + inner: Arc::new(kp), + raw_key: Some(bytes.to_vec()), + }; + bytes.zeroize(); + return Ok(kp); + } + Err(e) => e, + }; + Err(DecodingError::failed_to_parse( + "Ed25519 keypair", + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!( + "Cannot parse key from pkcs8 encoding or der encoding: {}\n{}", + from_pkcs8_error.to_string(), + from_der_error.to_string() + ), + ), + )) + } + + pub fn into_raw(&self) -> Option> { + self.raw_key.clone() + } +} + +impl Drop for Keypair{ + fn drop(&mut self) { + self.raw_key.zeroize() + } } /// An RSA public key. @@ -109,7 +155,7 @@ impl PublicKey { /// Decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo /// structure. See also `encode_x509`. - pub fn decode_x509(pk: &[u8]) -> Result { + pub fn try_decode_x509(pk: &[u8]) -> Result { Asn1SubjectPublicKeyInfo::decode(pk) .map_err(|e| DecodingError::failed_to_parse("RSA X.509", e)) .map(|spki| spki.subjectPublicKey.0) @@ -317,22 +363,22 @@ mod tests { impl Arbitrary for SomeKeypair { fn arbitrary(g: &mut Gen) -> SomeKeypair { let mut key = g.choose(&[KEY1, KEY2, KEY3]).unwrap().to_vec(); - SomeKeypair(Keypair::from_pkcs8(&mut key).unwrap()) + SomeKeypair(Keypair::try_decode(&mut key).unwrap()) } } #[test] fn rsa_from_pkcs8() { - assert!(Keypair::from_pkcs8(&mut KEY1.to_vec()).is_ok()); - assert!(Keypair::from_pkcs8(&mut KEY2.to_vec()).is_ok()); - assert!(Keypair::from_pkcs8(&mut KEY3.to_vec()).is_ok()); + assert!(Keypair::try_decode(&mut KEY1.to_vec()).is_ok()); + assert!(Keypair::try_decode(&mut KEY2.to_vec()).is_ok()); + assert!(Keypair::try_decode(&mut KEY3.to_vec()).is_ok()); } #[test] fn rsa_x509_encode_decode() { fn prop(SomeKeypair(kp): SomeKeypair) -> Result { let pk = kp.public(); - PublicKey::decode_x509(&pk.encode_x509()) + PublicKey::try_decode_x509(&pk.encode_x509()) .map_err(|e| e.to_string()) .map(|pk2| pk2 == pk) } diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 119c3ef64e9..117991f78b6 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -51,6 +51,11 @@ impl Keypair { pub fn secret(&self) -> &SecretKey { &self.secret } + + pub fn try_from_bytes(bytes:impl AsMut<[u8]>) -> Result{ + let secret_key = SecretKey::try_from_bytes(bytes)?; + Ok(secret_key.into()) + } } impl fmt::Debug for Keypair { @@ -97,7 +102,7 @@ impl SecretKey { /// error is returned. /// /// Note that the expected binary format is the same as `libsecp256k1`'s. - pub fn from_bytes(mut sk: impl AsMut<[u8]>) -> Result { + pub fn try_from_bytes(mut sk: impl AsMut<[u8]>) -> Result { let sk_bytes = sk.as_mut(); let secret = libsecp256k1::SecretKey::parse_slice(&*sk_bytes) .map_err(|e| DecodingError::failed_to_parse("parse secp256k1 secret key", e))?; @@ -109,7 +114,7 @@ impl SecretKey { /// structure as defined in [RFC5915], zeroing the input slice on success. /// /// [RFC5915]: https://tools.ietf.org/html/rfc5915 - pub fn from_der(mut der: impl AsMut<[u8]>) -> Result { + pub fn try_decode_der(mut der: impl AsMut<[u8]>) -> Result { // TODO: Stricter parsing. let der_obj = der.as_mut(); @@ -118,7 +123,7 @@ impl SecretKey { .and_then(Vec::load) .map_err(|e| DecodingError::failed_to_parse("secp256k1 SecretKey bytes", e))?; - let sk = SecretKey::from_bytes(&mut sk_bytes)?; + let sk = SecretKey::try_from_bytes(&mut sk_bytes)?; sk_bytes.zeroize(); der_obj.zeroize(); Ok(sk) @@ -133,7 +138,7 @@ impl SecretKey { } /// Returns the raw bytes of the secret key. - pub fn to_bytes(&self) -> [u8; 32] { + pub fn encode(&self) -> [u8; 32] { self.0.serialize() } @@ -141,7 +146,7 @@ impl SecretKey { /// ECDSA signature. pub fn sign_hash(&self, msg: &[u8]) -> Result, SigningError> { let m = Message::parse_slice(msg) - .map_err(|_| SigningError::new("failed to parse secp256k1 digest"))?; + .map_err(|_| SigningError::new("failed to parse secp256k1 digest",None))?; Ok(libsecp256k1::sign(&m, &self.0) .0 .serialize_der() @@ -214,7 +219,7 @@ impl PublicKey { /// Decode a public key from a byte slice in the the format produced /// by `encode`. - pub fn decode(k: &[u8]) -> Result { + pub fn try_decode(k: &[u8]) -> Result { libsecp256k1::PublicKey::parse_slice(k, Some(libsecp256k1::PublicKeyFormat::Compressed)) .map_err(|e| DecodingError::failed_to_parse("secp256k1 public key", e)) .map(PublicKey) @@ -230,7 +235,7 @@ mod tests { let sk1 = SecretKey::generate(); let mut sk_bytes = [0; 32]; sk_bytes.copy_from_slice(&sk1.0.serialize()[..]); - let sk2 = SecretKey::from_bytes(&mut sk_bytes).unwrap(); + let sk2 = SecretKey::try_from_bytes(&mut sk_bytes).unwrap(); assert_eq!(sk1.0.serialize(), sk2.0.serialize()); assert_eq!(sk_bytes, [0; 32]); } From e3e6355b4e74a98adf2da2167f10bda783beb248 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 11:34:23 +0800 Subject: [PATCH 12/83] implement protobuf encoding for RSA keys --- identity/src/error.rs | 16 ---------------- identity/src/keypair.rs | 7 +++++-- identity/src/rsa.rs | 8 ++++---- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index b9f47646830..dcb05576329 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -73,22 +73,6 @@ impl DecodingError { source: Some(Box::new(source)), } } - - #[allow(unused)] - pub(crate) fn decoding_unsupported(key_type: &'static str) -> Self { - Self { - msg: format!("decoding {key_type} key from Protobuf is unsupported"), - source: None, - } - } - - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { - Self { - msg: format!("encoding {key_type} key to Protobuf is unsupported"), - source: None, - } - } } impl fmt::Display for DecodingError { diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index bad16f45251..f9e2b9fefd3 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -188,8 +188,11 @@ impl Keypair { Data: data.encode().to_vec(), } } - Self::Rsa(_) => { - return Err(DecodingError::encoding_unsupported("RSA")); + Self::Rsa(data) => { + proto::PrivateKey{ + Type: KeyType::RSA, + Data: data.to_raw_bytes() + } } Self::Secp256k1(data) => { #[cfg(not(feature = "secp256k1"))] diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index c45c783ef18..2c5b725cdda 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -33,7 +33,7 @@ use zeroize::Zeroize; #[derive(Clone)] pub struct Keypair { inner: Arc, - raw_key: Option>, + raw_key: Vec, } impl std::fmt::Debug for Keypair { @@ -73,7 +73,7 @@ impl Keypair { Ok(kp) => { let kp = Self { inner: Arc::new(kp), - raw_key: Some(bytes.to_vec()), + raw_key: bytes.to_vec(), }; bytes.zeroize(); return Ok(kp); @@ -84,7 +84,7 @@ impl Keypair { Ok(kp) => { let kp = Self { inner: Arc::new(kp), - raw_key: Some(bytes.to_vec()), + raw_key: bytes.to_vec(), }; bytes.zeroize(); return Ok(kp); @@ -104,7 +104,7 @@ impl Keypair { )) } - pub fn into_raw(&self) -> Option> { + pub fn to_raw_bytes(&self) -> Vec { self.raw_key.clone() } } From b91bcc7fe87692a258b152ac246ce82029f0e244 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 11:59:41 +0800 Subject: [PATCH 13/83] fix: change 'encode_unsupported' to 'missing_feature' for branch leads to disabled feature --- identity/src/keypair.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index f9e2b9fefd3..29b5ce27f6f 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -182,13 +182,15 @@ impl Keypair { #[cfg(feature = "ed25519")] Self::Ed25519(data) => { #[cfg(not(feature = "ed25519"))] - return Err(DecodingError::encoding_unsupported("ed25519")); + return Err(DecodingError::missing_feature("ed25519")); proto::PrivateKey { Type: KeyType::Ed25519, Data: data.encode().to_vec(), } } Self::Rsa(data) => { + #[cfg(not(feature = "rsa"))] + return Err(DecodingError::missing_feature("rsa")); proto::PrivateKey{ Type: KeyType::RSA, Data: data.to_raw_bytes() @@ -196,7 +198,7 @@ impl Keypair { } Self::Secp256k1(data) => { #[cfg(not(feature = "secp256k1"))] - return Err(DecodingError::encoding_unsupported("secp256k1")); + return Err(DecodingError::missing_feature("secp256k1")); proto::PrivateKey{ Type: KeyType::Secp256k1, Data: data.secret().encode().into() @@ -205,7 +207,7 @@ impl Keypair { Self::Ecdsa(data) => { #[cfg(not(feature = "ecdsa"))] - return Err(DecodingError::encoding_unsupported("ECDSA")); + return Err(DecodingError::missing_feature("ECDSA")); proto::PrivateKey{ Type: KeyType::ECDSA, Data: data.secret().to_bytes() From 959b90f77db12e3bc7d7119b49c392305c93a886 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 12:09:37 +0800 Subject: [PATCH 14/83] fix: wrongly configured feature attributes when encoding to protobuf --- identity/src/keypair.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 29b5ce27f6f..c5c15342ddb 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -172,7 +172,7 @@ impl Keypair { /// Encode a private key as protobuf structure. #[cfg_attr( not(feature = "ed25519"), - allow(unreachable_code, unused_variables, unused_mut) + allow(unused_variables, unused_mut) )] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; @@ -181,33 +181,27 @@ impl Keypair { let pk: proto::PrivateKey = match self { #[cfg(feature = "ed25519")] Self::Ed25519(data) => { - #[cfg(not(feature = "ed25519"))] - return Err(DecodingError::missing_feature("ed25519")); proto::PrivateKey { Type: KeyType::Ed25519, Data: data.encode().to_vec(), } } + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Self::Rsa(data) => { - #[cfg(not(feature = "rsa"))] - return Err(DecodingError::missing_feature("rsa")); proto::PrivateKey{ Type: KeyType::RSA, Data: data.to_raw_bytes() } } + #[cfg(feature = "secp256k1")] Self::Secp256k1(data) => { - #[cfg(not(feature = "secp256k1"))] - return Err(DecodingError::missing_feature("secp256k1")); proto::PrivateKey{ Type: KeyType::Secp256k1, Data: data.secret().encode().into() } } - + #[cfg(feature = "ecdsa")] Self::Ecdsa(data) => { - #[cfg(not(feature = "ecdsa"))] - return Err(DecodingError::missing_feature("ECDSA")); proto::PrivateKey{ Type: KeyType::ECDSA, Data: data.secret().to_bytes() From 20134d1def265a334dc3999a4df7e18bd1a44896 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 19:05:42 +0800 Subject: [PATCH 15/83] fix: clippy and fmt --- identity/src/ecdsa.rs | 6 ++--- identity/src/ed25519.rs | 3 ++- identity/src/keypair.rs | 50 +++++++++++++++------------------------ identity/src/lib.rs | 2 +- identity/src/rsa.rs | 7 +++--- identity/src/secp256k1.rs | 4 ++-- 6 files changed, 30 insertions(+), 42 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index a3fecd12fce..952a8b57e66 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -61,7 +61,7 @@ impl Keypair { &self.secret } - pub fn try_from_bytes(pk:impl AsRef<[u8]>) -> Result{ + pub fn try_from_bytes(pk: impl AsRef<[u8]>) -> Result { let secret_key = SecretKey::try_from_bytes(pk)?; Ok(secret_key.into()) } @@ -265,9 +265,9 @@ mod tests { } #[test] - fn serialize_deserialize(){ + fn serialize_deserialize() { let pair = Keypair::generate(); let bytes_secret = pair.secret().to_bytes(); - assert_eq!(Keypair::try_from_bytes(bytes_secret).unwrap(),pair) + assert_eq!(Keypair::try_from_bytes(bytes_secret).unwrap(), pair) } } diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index bff31a61c1a..86b1e7e5a34 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -187,7 +187,8 @@ impl AsRef<[u8]> for SecretKey { impl Clone for SecretKey { fn clone(&self) -> SecretKey { let mut sk_bytes = self.0.to_bytes(); - Self::try_from_bytes(&mut sk_bytes).expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") + Self::try_from_bytes(&mut sk_bytes) + .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") } } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index c5c15342ddb..78ecb907c96 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -170,43 +170,32 @@ impl Keypair { } /// Encode a private key as protobuf structure. - #[cfg_attr( - not(feature = "ed25519"), - allow(unused_variables, unused_mut) - )] + #[cfg_attr(not(feature = "ed25519"), allow(unused_variables, unused_mut))] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; #[allow(deprecated)] let pk: proto::PrivateKey = match self { #[cfg(feature = "ed25519")] - Self::Ed25519(data) => { - proto::PrivateKey { - Type: KeyType::Ed25519, - Data: data.encode().to_vec(), - } - } + Self::Ed25519(data) => proto::PrivateKey { + Type: KeyType::Ed25519, + Data: data.encode().to_vec(), + }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Self::Rsa(data) => { - proto::PrivateKey{ - Type: KeyType::RSA, - Data: data.to_raw_bytes() - } - } + Self::Rsa(data) => proto::PrivateKey { + Type: KeyType::RSA, + Data: data.to_raw_bytes(), + }, #[cfg(feature = "secp256k1")] - Self::Secp256k1(data) => { - proto::PrivateKey{ - Type: KeyType::Secp256k1, - Data: data.secret().encode().into() - } - } + Self::Secp256k1(data) => proto::PrivateKey { + Type: KeyType::Secp256k1, + Data: data.secret().encode().into(), + }, #[cfg(feature = "ecdsa")] - Self::Ecdsa(data) => { - proto::PrivateKey{ - Type: KeyType::ECDSA, - Data: data.secret().to_bytes() - } - } + Self::Ecdsa(data) => proto::PrivateKey { + Type: KeyType::ECDSA, + Data: data.secret().to_bytes(), + }, }; let mut buf = Vec::with_capacity(pk.get_size()); @@ -226,7 +215,7 @@ impl Keypair { .map_err(|e| DecodingError::bad_protobuf("private key bytes", e)) .map(zeroize::Zeroizing::new)?; - #[allow(deprecated,unreachable_code)] + #[allow(deprecated, unreachable_code)] match private_key.Type { proto::KeyType::Ed25519 => { #[cfg(feature = "ed25519")] @@ -246,8 +235,7 @@ impl Keypair { } proto::KeyType::ECDSA => { #[cfg(feature = "ecdsa")] - return ecdsa::Keypair::try_from_bytes(&private_key.Data) - .map(Keypair::Ecdsa); + return ecdsa::Keypair::try_from_bytes(&private_key.Data).map(Keypair::Ecdsa); Err(DecodingError::missing_feature("ECDSA")) } } diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 54b2f7aaf0d..d89607a75ac 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -120,4 +120,4 @@ pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; -pub type KeyType = proto::KeyType; \ No newline at end of file +pub type KeyType = proto::KeyType; diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 2c5b725cdda..b6a4fcb67cc 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -59,7 +59,7 @@ impl Keypair { .sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) { Ok(()) => Ok(signature), - Err(e) => Err(SigningError::new("RSA",Some(Box::new(e)))), + Err(e) => Err(SigningError::new("RSA", Some(Box::new(e)))), } } @@ -97,8 +97,7 @@ impl Keypair { std::io::ErrorKind::InvalidInput, format!( "Cannot parse key from pkcs8 encoding or der encoding: {}\n{}", - from_pkcs8_error.to_string(), - from_der_error.to_string() + from_pkcs8_error, from_der_error ), ), )) @@ -109,7 +108,7 @@ impl Keypair { } } -impl Drop for Keypair{ +impl Drop for Keypair { fn drop(&mut self) { self.raw_key.zeroize() } diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 117991f78b6..2939df41392 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -52,7 +52,7 @@ impl Keypair { &self.secret } - pub fn try_from_bytes(bytes:impl AsMut<[u8]>) -> Result{ + pub fn try_from_bytes(bytes: impl AsMut<[u8]>) -> Result { let secret_key = SecretKey::try_from_bytes(bytes)?; Ok(secret_key.into()) } @@ -146,7 +146,7 @@ impl SecretKey { /// ECDSA signature. pub fn sign_hash(&self, msg: &[u8]) -> Result, SigningError> { let m = Message::parse_slice(msg) - .map_err(|_| SigningError::new("failed to parse secp256k1 digest",None))?; + .map_err(|_| SigningError::new("failed to parse secp256k1 digest", None))?; Ok(libsecp256k1::sign(&m, &self.0) .0 .serialize_der() From d833904c5c3ae4adcf00ecd0c64adaa223783064 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 19:45:05 +0800 Subject: [PATCH 16/83] doc: documentation for changed methods --- identity/src/ecdsa.rs | 6 ++++-- identity/src/ed25519.rs | 6 +++--- identity/src/error.rs | 2 ++ identity/src/keypair.rs | 4 ++++ identity/src/rsa.rs | 4 +++- identity/src/secp256k1.rs | 6 ++++-- 6 files changed, 20 insertions(+), 8 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 952a8b57e66..e08e24dea31 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -61,6 +61,8 @@ impl Keypair { &self.secret } + /// Try to parse an secret key byte array into a ECDSA `SecretKey` + /// and promote it into a `Keypair`. pub fn try_from_bytes(pk: impl AsRef<[u8]>) -> Result { let secret_key = SecretKey::try_from_bytes(pk)?; Ok(secret_key.into()) @@ -107,12 +109,12 @@ impl SecretKey { signature.as_bytes().to_owned() } - /// Encode a secret key into a byte buffer. + /// Convert a secret key into a byte buffer. pub fn to_bytes(&self) -> Vec { self.0.to_bytes().to_vec() } - /// Decode a secret key from a byte buffer. + /// Try to parse a secret key from a byte buffer. pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { SigningKey::from_bytes(buf.as_ref()) .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 86b1e7e5a34..caac7be37a2 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -45,7 +45,7 @@ impl Keypair { self.0.to_bytes() } - /// Decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) /// produced by [`Keypair::encode`], zeroing the input on success. /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. @@ -166,7 +166,7 @@ impl PublicKey { self.0.to_bytes() } - /// Decode a public key from a byte array as produced by `encode`. + /// Try to decode a public key from a byte array as produced by `encode`. pub fn try_decode(k: &[u8]) -> Result { ed25519::PublicKey::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e)) @@ -210,7 +210,7 @@ impl SecretKey { ) } - /// Create an Ed25519 secret key from a byte slice, zeroing the input on success. + /// Try to create an Ed25519 secret key from a byte slice, zeroing the input on success. /// If the bytes do not constitute a valid Ed25519 secret key, an error is /// returned. pub fn try_from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { diff --git a/identity/src/error.rs b/identity/src/error.rs index dcb05576329..83b0f1c8030 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -31,6 +31,7 @@ pub struct DecodingError { } impl DecodingError { + /// The given feature is supported but not enabled at compile time. pub(crate) fn missing_feature(feature_name: &'static str) -> Self { Self { msg: format!("cargo feature `{feature_name}` is not enabled"), @@ -44,6 +45,7 @@ impl DecodingError { feature = "ed25519", feature = "rsa" ))] + /// Error occurred when parsing one type into another. pub(crate) fn failed_to_parse(what: &'static str, source: S) -> Self where E: Error + Send + Sync + 'static, diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 78ecb907c96..4091dc2d878 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -128,6 +128,10 @@ impl Keypair { .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) } + /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by [`Keypair::encode`], zeroing the input on success. + /// + /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[cfg(feature = "ed25519")] pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { #[allow(deprecated)] diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index b6a4fcb67cc..df45077177c 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -66,7 +66,8 @@ impl Keypair { /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo /// format (i.e. unencrypted) as defined in [RFC5208]. /// Decoding from DER-encoded private key bytes is also supported. - /// + /// Note that a copy of the undecoded byte array will be stored for encoding. + /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 pub fn try_decode(bytes: &mut [u8]) -> Result { let from_pkcs8_error = match RsaKeyPair::from_pkcs8(bytes) { @@ -103,6 +104,7 @@ impl Keypair { )) } + /// Get the byte array used to parse the keypair from. pub fn to_raw_bytes(&self) -> Vec { self.raw_key.clone() } diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 2939df41392..73070eadfaf 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -52,6 +52,8 @@ impl Keypair { &self.secret } + /// Try to parse a secret key byte arrray into a secp256k1 `SecretKey` + /// and promote it into a `Keypair`. pub fn try_from_bytes(bytes: impl AsMut<[u8]>) -> Result { let secret_key = SecretKey::try_from_bytes(bytes)?; Ok(secret_key.into()) @@ -97,7 +99,7 @@ impl SecretKey { SecretKey(libsecp256k1::SecretKey::random(&mut rand::thread_rng())) } - /// Create a secret key from a byte slice, zeroing the slice on success. + /// Try to parse a secret key from a byte slice, zeroing the slice on success. /// If the bytes do not constitute a valid Secp256k1 secret key, an /// error is returned. /// @@ -110,7 +112,7 @@ impl SecretKey { Ok(SecretKey(secret)) } - /// Decode a DER-encoded Secp256k1 secret key in an ECPrivateKey + /// Try to decode a DER-encoded Secp256k1 secret key in an ECPrivateKey /// structure as defined in [RFC5915], zeroing the input slice on success. /// /// [RFC5915]: https://tools.ietf.org/html/rfc5915 From 0896a61a3a5c8c46ee73d4427593f8e8627b45d8 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 27 Mar 2023 19:59:24 +0800 Subject: [PATCH 17/83] feat: enforce naming rules --- core/src/signed_envelope.rs | 4 ++-- examples/dcutr/src/main.rs | 2 +- examples/file-sharing/src/network.rs | 2 +- examples/relay-server/src/main.rs | 2 +- identity/src/keypair.rs | 22 +++++++++++----------- identity/src/peer_id.rs | 4 ++-- identity/tests/keypair_api.rs | 4 ++-- misc/keygen/src/config.rs | 2 +- misc/keygen/src/main.rs | 4 ++-- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/gossipsub/src/protocol.rs | 4 ++-- protocols/identify/src/protocol.rs | 6 +++--- transports/noise/src/io/handshake.rs | 4 ++-- transports/plaintext/src/handshake.rs | 4 ++-- transports/tls/src/certificate.rs | 4 ++-- 15 files changed, 35 insertions(+), 35 deletions(-) diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 0eafda4c487..d4e4b47b4bc 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -76,7 +76,7 @@ impl SignedEnvelope { use quick_protobuf::MessageWrite; let envelope = proto::Envelope { - public_key: self.key.to_protobuf_encoding(), + public_key: self.key.try_to_protobuf_encoding().unwrap(), payload_type: self.payload_type, payload: self.payload, signature: self.signature, @@ -101,7 +101,7 @@ impl SignedEnvelope { proto::Envelope::from_reader(&mut reader, bytes).map_err(DecodeError::from)?; Ok(Self { - key: PublicKey::from_protobuf_encoding(&envelope.public_key)?, + key: PublicKey::try_from_protobuf_encoding(&envelope.public_key)?, payload_type: envelope.payload_type.to_vec(), payload: envelope.payload.to_vec(), signature: envelope.signature.to_vec(), diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index e7dd716723f..27e28297a71 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -281,5 +281,5 @@ fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - identity::Keypair::ed25519_from_bytes(bytes).expect("only errors on wrong length") + identity::Keypair::try_from_bytes_ed25519(bytes).expect("only errors on wrong length") } diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index f21ff95276d..0c91112b99c 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -39,7 +39,7 @@ pub async fn new( Some(seed) => { let mut bytes = [0u8; 32]; bytes[0] = seed; - identity::Keypair::ed25519_from_bytes(bytes).unwrap() + identity::Keypair::try_from_bytes_ed25519(bytes).unwrap() } None => identity::Keypair::generate_ed25519(), }; diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 583b6695708..68fd32a9666 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -103,7 +103,7 @@ fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - identity::Keypair::ed25519_from_bytes(bytes).expect("only errors on wrong length") + identity::Keypair::try_from_bytes_ed25519(bytes).expect("only errors on wrong length") } #[derive(Debug, Parser)] diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 4091dc2d878..946242a7dd2 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -112,7 +112,7 @@ impl Keypair { /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { + pub fn try_from_pkcs8_rsa(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] rsa::Keypair::try_decode(pkcs8_der).map(Keypair::Rsa) } @@ -122,7 +122,7 @@ impl Keypair { /// /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[cfg(feature = "secp256k1")] - pub fn secp256k1_from_der(der: &mut [u8]) -> Result { + pub fn try_from_der_secp256k1(der: &mut [u8]) -> Result { #[allow(deprecated)] secp256k1::SecretKey::try_decode_der(der) .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) @@ -133,7 +133,7 @@ impl Keypair { /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[cfg(feature = "ed25519")] - pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { + pub fn try_from_bytes_ed25519(bytes: impl AsMut<[u8]>) -> Result { #[allow(deprecated)] Ok(Keypair::Ed25519(ed25519::Keypair::from( ed25519::SecretKey::try_from_bytes(bytes)?, @@ -175,7 +175,7 @@ impl Keypair { /// Encode a private key as protobuf structure. #[cfg_attr(not(feature = "ed25519"), allow(unused_variables, unused_mut))] - pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { + pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; #[allow(deprecated)] @@ -211,7 +211,7 @@ impl Keypair { /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[cfg_attr(not(feature = "ed25519"), allow(unused_mut))] - pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -387,7 +387,7 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. - pub fn to_protobuf_encoding(&self) -> Vec { + pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; let public_key = proto::PublicKey::from(self); @@ -398,12 +398,12 @@ impl PublicKey { .write_message(&mut writer) .expect("Encoding to succeed"); - buf + Ok(buf) } /// Decode a public key from a protobuf structure, e.g. read from storage /// or received from another node. - pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -521,9 +521,9 @@ mod tests { let expected_keypair = Keypair::generate_ed25519(); let expected_peer_id = expected_keypair.public().to_peer_id(); - let encoded = expected_keypair.to_protobuf_encoding().unwrap(); + let encoded = expected_keypair.try_to_protobuf_encoding().unwrap(); - let keypair = Keypair::from_protobuf_encoding(&encoded).unwrap(); + let keypair = Keypair::try_from_protobuf_encoding(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); assert_eq!(expected_peer_id, peer_id); @@ -538,7 +538,7 @@ mod tests { let encoded = BASE64_STANDARD.decode(base_64_encoded).unwrap(); - let keypair = Keypair::from_protobuf_encoding(&encoded).unwrap(); + let keypair = Keypair::try_from_protobuf_encoding(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); assert_eq!(expected_peer_id, peer_id); diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index ae9ffc80c0a..8987f55c617 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -57,7 +57,7 @@ impl PeerId { pub fn from_public_key(key: &crate::keypair::PublicKey) -> PeerId { use multihash::MultihashDigest as _; - let key_enc = key.to_protobuf_encoding(); + let key_enc = key.try_to_protobuf_encoding().unwrap(); let hash_algorithm = if key_enc.len() <= MAX_INLINE_KEY_LENGTH { Code::Identity @@ -132,7 +132,7 @@ impl PeerId { let alg = Code::try_from(self.multihash.code()) .expect("Internal multihash is always a valid `Code`"); - let enc = public_key.to_protobuf_encoding(); + let enc = public_key.try_to_protobuf_encoding().unwrap(); Some(alg.digest(&enc) == self.multihash) } } diff --git a/identity/tests/keypair_api.rs b/identity/tests/keypair_api.rs index c1102278a63..19649053134 100644 --- a/identity/tests/keypair_api.rs +++ b/identity/tests/keypair_api.rs @@ -2,12 +2,12 @@ use libp2p_identity::Keypair; #[test] fn calling_keypair_api() { - let _ = Keypair::from_protobuf_encoding(&[]); + let _ = Keypair::try_from_protobuf_encoding(&[]); } #[allow(dead_code)] fn using_keypair(kp: Keypair) { - let _ = kp.to_protobuf_encoding(); + let _ = kp.try_to_protobuf_encoding(); let _ = kp.sign(&[]); let _ = kp.public(); } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index 30ec8f8324e..8959a294bbd 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -18,7 +18,7 @@ impl Config { } pub fn from_key_material(peer_id: PeerId, keypair: &Keypair) -> Result> { - let priv_key = BASE64_STANDARD.encode(keypair.to_protobuf_encoding()?); + let priv_key = BASE64_STANDARD.encode(keypair.try_to_protobuf_encoding()?); let peer_id = peer_id.to_base58(); Ok(Self { identity: Identity { peer_id, priv_key }, diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index 64d98005369..ad8a7f62973 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), Box> { Command::From { config } => { let config = Zeroizing::new(config::Config::from_file(config.as_ref())?); - let keypair = identity::Keypair::from_protobuf_encoding(&Zeroizing::new( + let keypair = identity::Keypair::try_from_protobuf_encoding(&Zeroizing::new( BASE64_STANDARD.decode(config.identity.priv_key.as_bytes())?, ))?; @@ -118,7 +118,7 @@ fn main() -> Result<(), Box> { println!( "PeerId: {:?} Keypair: {:?}", local_peer_id, - local_keypair.to_protobuf_encoding() + local_keypair.try_to_protobuf_encoding().unwrap() ); } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 4b358afdd02..41bf95390b6 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -178,7 +178,7 @@ impl From for PublishConfig { match authenticity { MessageAuthenticity::Signed(keypair) => { let public_key = keypair.public(); - let key_enc = public_key.to_protobuf_encoding(); + let key_enc = public_key.try_to_protobuf_encoding().unwrap(); let key = if key_enc.len() <= 42 { // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it // specifically in the [`rpc_proto::proto::Message::key`] field. diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index f7b04269c92..9d6edfbecd1 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -237,10 +237,10 @@ impl GossipsubCodec { let public_key = match message .key .as_deref() - .map(PublicKey::from_protobuf_encoding) + .map(PublicKey::try_from_protobuf_encoding) { Some(Ok(key)) => key, - _ => match PublicKey::from_protobuf_encoding(&source.to_bytes()[2..]) { + _ => match PublicKey::try_from_protobuf_encoding(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { warn!("Signature verification failed: No valid public key supplied"); diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 51e530291dc..13b818757cf 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -169,7 +169,7 @@ where .map(|addr| addr.to_vec()) .collect(); - let pubkey_bytes = info.public_key.to_protobuf_encoding(); + let pubkey_bytes = info.public_key.try_to_protobuf_encoding().unwrap(); let message = proto::Identify { agentVersion: Some(info.agent_version), @@ -235,7 +235,7 @@ impl TryFrom for Info { addrs }; - let public_key = PublicKey::from_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; + let public_key = PublicKey::try_from_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { Ok(a) => a, @@ -386,7 +386,7 @@ mod tests { publicKey: Some( identity::Keypair::generate_ed25519() .public() - .to_protobuf_encoding(), + .try_to_protobuf_encoding().unwrap(), ), }; diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 672f24ef7ce..b96094b77e3 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -214,7 +214,7 @@ where let pb = pb_result?; if !pb.identity_key.is_empty() { - let pk = identity::PublicKey::from_protobuf_encoding(&pb.identity_key)?; + let pk = identity::PublicKey::try_from_protobuf_encoding(&pb.identity_key)?; if let Some(ref k) = state.id_remote_pubkey { if k != &pk { return Err(NoiseError::UnexpectedKey); @@ -236,7 +236,7 @@ where T: AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { - identity_key: state.identity.public.to_protobuf_encoding(), + identity_key: state.identity.public.try_to_protobuf_encoding().unwrap(), ..Default::default() }; diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 3f70f515a09..70208d084ba 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -54,7 +54,7 @@ impl HandshakeContext { fn new(config: PlainText2Config) -> Self { let exchange = Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.to_protobuf_encoding()), + pubkey: Some(config.local_public_key.try_to_protobuf_encoding().unwrap()), }; let mut buf = Vec::with_capacity(exchange.get_size()); let mut writer = Writer::new(&mut buf); @@ -77,7 +77,7 @@ impl HandshakeContext { let mut reader = BytesReader::from_bytes(&exchange_bytes); let prop = Exchange::from_reader(&mut reader, &exchange_bytes)?; - let public_key = PublicKey::from_protobuf_encoding(&prop.pubkey.unwrap_or_default())?; + let public_key = PublicKey::try_from_protobuf_encoding(&prop.pubkey.unwrap_or_default())?; let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; // Check the validity of the remote's `Exchange`. diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 6321fa26400..317c7c4474a 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -159,7 +159,7 @@ fn parse_unverified(der_input: &[u8]) -> Result { // required KeyType Type = 1; // required bytes Data = 2; // } - let public_key = identity::PublicKey::from_protobuf_encoding(&public_key) + let public_key = identity::PublicKey::try_from_protobuf_encoding(&public_key) .map_err(|_| webpki::Error::UnknownIssuer)?; let ext = P2pExtension { public_key, @@ -215,7 +215,7 @@ fn make_libp2p_extension( // signature OCTET STRING // } let extension_content = { - let serialized_pubkey = identity_keypair.public().to_protobuf_encoding(); + let serialized_pubkey = identity_keypair.public().try_to_protobuf_encoding().unwrap(); yasna::encode_der(&(serialized_pubkey, signature)) }; From f559cfebec2928a690fc55ccde5eec70f3f218d8 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 29 Mar 2023 18:08:30 +0800 Subject: [PATCH 18/83] hide 'proto::KeyType' --- identity/src/keypair.rs | 10 +++++----- identity/src/lib.rs | 11 ++++++++++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 946242a7dd2..4c963fd7d41 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use crate::error::{DecodingError, SigningError}; -use crate::{proto, KeyType}; +use crate::proto; use quick_protobuf::{BytesReader, Writer}; use std::convert::TryFrom; @@ -182,22 +182,22 @@ impl Keypair { let pk: proto::PrivateKey = match self { #[cfg(feature = "ed25519")] Self::Ed25519(data) => proto::PrivateKey { - Type: KeyType::Ed25519, + Type: proto::KeyType::Ed25519, Data: data.encode().to_vec(), }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Self::Rsa(data) => proto::PrivateKey { - Type: KeyType::RSA, + Type: proto::KeyType::RSA, Data: data.to_raw_bytes(), }, #[cfg(feature = "secp256k1")] Self::Secp256k1(data) => proto::PrivateKey { - Type: KeyType::Secp256k1, + Type: proto::KeyType::Secp256k1, Data: data.secret().encode().into(), }, #[cfg(feature = "ecdsa")] Self::Ecdsa(data) => proto::PrivateKey { - Type: KeyType::ECDSA, + Type: proto::KeyType::ECDSA, Data: data.secret().to_bytes(), }, }; diff --git a/identity/src/lib.rs b/identity/src/lib.rs index d89607a75ac..fcd4fc11f16 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -120,4 +120,13 @@ pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; -pub type KeyType = proto::KeyType; +pub enum KeyType{ + #[cfg(feature = "ed25519")] + Ed25519, + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + RSA, + #[cfg(feature = "secp256k1")] + Secp256k1, + #[cfg(feature = "ecdsa")] + Ecdsa +} From 672b9b9257881d55a9eb810ea7ccdcd8feb31f56 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 29 Mar 2023 18:20:05 +0800 Subject: [PATCH 19/83] make 'to_protobuf_encoding' for 'Keypair' infallible --- identity/src/keypair.rs | 10 +++++----- identity/tests/keypair_api.rs | 2 +- misc/keygen/src/config.rs | 2 +- misc/keygen/src/main.rs | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 4c963fd7d41..89278a611b6 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -175,7 +175,7 @@ impl Keypair { /// Encode a private key as protobuf structure. #[cfg_attr(not(feature = "ed25519"), allow(unused_variables, unused_mut))] - pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { + pub fn to_protobuf_encoding(&self) -> Vec { use quick_protobuf::MessageWrite; #[allow(deprecated)] @@ -206,7 +206,7 @@ impl Keypair { let mut writer = Writer::new(&mut buf); pk.write_message(&mut writer).expect("Encoding to succeed"); - Ok(buf) + buf } /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. @@ -387,7 +387,7 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. - pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { + pub fn to_protobuf_encoding(&self) -> Vec { use quick_protobuf::MessageWrite; let public_key = proto::PublicKey::from(self); @@ -398,7 +398,7 @@ impl PublicKey { .write_message(&mut writer) .expect("Encoding to succeed"); - Ok(buf) + buf } /// Decode a public key from a protobuf structure, e.g. read from storage @@ -521,7 +521,7 @@ mod tests { let expected_keypair = Keypair::generate_ed25519(); let expected_peer_id = expected_keypair.public().to_peer_id(); - let encoded = expected_keypair.try_to_protobuf_encoding().unwrap(); + let encoded = expected_keypair.to_protobuf_encoding(); let keypair = Keypair::try_from_protobuf_encoding(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); diff --git a/identity/tests/keypair_api.rs b/identity/tests/keypair_api.rs index 19649053134..356d46389fe 100644 --- a/identity/tests/keypair_api.rs +++ b/identity/tests/keypair_api.rs @@ -7,7 +7,7 @@ fn calling_keypair_api() { #[allow(dead_code)] fn using_keypair(kp: Keypair) { - let _ = kp.try_to_protobuf_encoding(); + let _ = kp.to_protobuf_encoding(); let _ = kp.sign(&[]); let _ = kp.public(); } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index 8959a294bbd..5bd62f39230 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -18,7 +18,7 @@ impl Config { } pub fn from_key_material(peer_id: PeerId, keypair: &Keypair) -> Result> { - let priv_key = BASE64_STANDARD.encode(keypair.try_to_protobuf_encoding()?); + let priv_key = BASE64_STANDARD.encode(keypair.to_protobuf_encoding()); let peer_id = peer_id.to_base58(); Ok(Self { identity: Identity { peer_id, priv_key }, diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index ad8a7f62973..db4679fe5df 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -118,7 +118,7 @@ fn main() -> Result<(), Box> { println!( "PeerId: {:?} Keypair: {:?}", local_peer_id, - local_keypair.try_to_protobuf_encoding().unwrap() + local_keypair.to_protobuf_encoding() ); } From 33c9b9bf445f0d10f9ca94ad5c73e1c32359b37e Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 29 Mar 2023 18:20:38 +0800 Subject: [PATCH 20/83] make 'to_protobuf_encoding' for 'PublicKey' infallible --- core/src/signed_envelope.rs | 2 +- identity/src/peer_id.rs | 4 ++-- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/identify/src/protocol.rs | 4 ++-- transports/noise/src/io/handshake.rs | 2 +- transports/plaintext/src/handshake.rs | 2 +- transports/tls/src/certificate.rs | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index d4e4b47b4bc..9e311e1229b 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -76,7 +76,7 @@ impl SignedEnvelope { use quick_protobuf::MessageWrite; let envelope = proto::Envelope { - public_key: self.key.try_to_protobuf_encoding().unwrap(), + public_key: self.key.to_protobuf_encoding(), payload_type: self.payload_type, payload: self.payload, signature: self.signature, diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 8987f55c617..ae9ffc80c0a 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -57,7 +57,7 @@ impl PeerId { pub fn from_public_key(key: &crate::keypair::PublicKey) -> PeerId { use multihash::MultihashDigest as _; - let key_enc = key.try_to_protobuf_encoding().unwrap(); + let key_enc = key.to_protobuf_encoding(); let hash_algorithm = if key_enc.len() <= MAX_INLINE_KEY_LENGTH { Code::Identity @@ -132,7 +132,7 @@ impl PeerId { let alg = Code::try_from(self.multihash.code()) .expect("Internal multihash is always a valid `Code`"); - let enc = public_key.try_to_protobuf_encoding().unwrap(); + let enc = public_key.to_protobuf_encoding(); Some(alg.digest(&enc) == self.multihash) } } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 41bf95390b6..4b358afdd02 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -178,7 +178,7 @@ impl From for PublishConfig { match authenticity { MessageAuthenticity::Signed(keypair) => { let public_key = keypair.public(); - let key_enc = public_key.try_to_protobuf_encoding().unwrap(); + let key_enc = public_key.to_protobuf_encoding(); let key = if key_enc.len() <= 42 { // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it // specifically in the [`rpc_proto::proto::Message::key`] field. diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 13b818757cf..25493ac14bf 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -169,7 +169,7 @@ where .map(|addr| addr.to_vec()) .collect(); - let pubkey_bytes = info.public_key.try_to_protobuf_encoding().unwrap(); + let pubkey_bytes = info.public_key.to_protobuf_encoding(); let message = proto::Identify { agentVersion: Some(info.agent_version), @@ -386,7 +386,7 @@ mod tests { publicKey: Some( identity::Keypair::generate_ed25519() .public() - .try_to_protobuf_encoding().unwrap(), + .to_protobuf_encoding(), ), }; diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index b96094b77e3..ce478297876 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -236,7 +236,7 @@ where T: AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { - identity_key: state.identity.public.try_to_protobuf_encoding().unwrap(), + identity_key: state.identity.public.to_protobuf_encoding(), ..Default::default() }; diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 70208d084ba..b5d931a3f38 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -54,7 +54,7 @@ impl HandshakeContext { fn new(config: PlainText2Config) -> Self { let exchange = Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.try_to_protobuf_encoding().unwrap()), + pubkey: Some(config.local_public_key.to_protobuf_encoding()), }; let mut buf = Vec::with_capacity(exchange.get_size()); let mut writer = Writer::new(&mut buf); diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 317c7c4474a..70e0b76750d 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -215,7 +215,7 @@ fn make_libp2p_extension( // signature OCTET STRING // } let extension_content = { - let serialized_pubkey = identity_keypair.public().try_to_protobuf_encoding().unwrap(); + let serialized_pubkey = identity_keypair.public().to_protobuf_encoding(); yasna::encode_der(&(serialized_pubkey, signature)) }; From f4a764d666d015546830b1ba61fa4918e6cf38a2 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 29 Mar 2023 20:19:34 +0800 Subject: [PATCH 21/83] revert renamed method and deprecate instead --- identity/src/ecdsa.rs | 43 ++++++++++++++++++++++++++++++++++++--- identity/src/ed25519.rs | 43 +++++++++++++++++++++++++++++++++++++++ identity/src/keypair.rs | 17 ++++++++++++++++ identity/src/rsa.rs | 35 +++++++++++++++++++++++++++++-- identity/src/secp256k1.rs | 31 +++++++++++++++++++++++++++- 5 files changed, 163 insertions(+), 6 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index e08e24dea31..28e9f7541a8 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -114,8 +114,19 @@ impl SecretKey { self.0.to_bytes().to_vec() } + /// Decode a secret key from a byte buffer. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead" + )] + pub fn from_bytes(buf: &[u8]) -> Result { + SigningKey::from_bytes(buf) + .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) + .map(SecretKey) + } + /// Try to parse a secret key from a byte buffer. - pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { + pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { SigningKey::from_bytes(buf.as_ref()) .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) .map(SecretKey) @@ -143,7 +154,21 @@ impl PublicKey { } /// Decode a public key from a byte buffer without compression. - pub fn try_from_bytes(k: &[u8]) -> Result { + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_from_bytes` instead." + )] + pub fn from_bytes(k: &[u8]) -> Result { + let enc_pt = EncodedPoint::from_bytes(k) + .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; + + VerifyingKey::from_encoded_point(&enc_pt) + .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 public key", err)) + .map(PublicKey) + } + + /// Decode a public key from a byte buffer without compression. + pub fn try_from_bytes(k: impl AsRef<[u8]>) -> Result { let enc_pt = EncodedPoint::from_bytes(k) .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; @@ -164,13 +189,25 @@ impl PublicKey { } /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. - pub fn try_decode_der(k: &[u8]) -> Result { + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_decode_der` instead." + )] + pub fn decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { DecodingError::failed_to_parse::("ASN.1-encoded ecdsa p256 public key", None) })?; Self::try_from_bytes(buf) } + /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. + pub fn try_decode_der(k: impl AsRef<[u8]>) -> Result { + let buf = Self::del_asn1_header(k.as_ref()).ok_or_else(|| { + DecodingError::failed_to_parse::("ASN.1-encoded ecdsa p256 public key", None) + })?; + Self::try_from_bytes(buf) + } + // ecPublicKey (ANSI X9.62 public key type) OID: 1.2.840.10045.2.1 const EC_PUBLIC_KEY_OID: [u8; 9] = [0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01]; // secp256r1 OID: 1.2.840.10045.3.1.7 diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index caac7be37a2..4c7c4b9bd6d 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -45,6 +45,23 @@ impl Keypair { self.0.to_bytes() } + /// Decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by [`Keypair::encode`], zeroing the input on success. + /// + /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_decode` instead." + )] + pub fn decode(kp: &mut [u8]) -> Result { + ed25519::Keypair::from_bytes(kp) + .map(|k| { + kp.zeroize(); + Keypair(k) + }) + .map_err(|e| DecodingError::failed_to_parse("Ed25519 keypair", e)) + } + /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) /// produced by [`Keypair::encode`], zeroing the input on success. /// @@ -166,6 +183,17 @@ impl PublicKey { self.0.to_bytes() } + /// Decode a public key from a byte array as produced by `encode`. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_decode` instead." + )] + pub fn decode(k: &[u8]) -> Result { + ed25519::PublicKey::from_bytes(k) + .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e)) + .map(PublicKey) + } + /// Try to decode a public key from a byte array as produced by `encode`. pub fn try_decode(k: &[u8]) -> Result { ed25519::PublicKey::from_bytes(k) @@ -210,6 +238,21 @@ impl SecretKey { ) } + /// Create an Ed25519 secret key from a byte slice, zeroing the input on success. + /// If the bytes do not constitute a valid Ed25519 secret key, an error is + /// returned. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead." + )] + pub fn from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { + let sk_bytes = sk_bytes.as_mut(); + let secret = ed25519::SecretKey::from_bytes(&*sk_bytes) + .map_err(|e| DecodingError::failed_to_parse("Ed25519 secret key", e))?; + sk_bytes.zeroize(); + Ok(SecretKey(secret)) + } + /// Try to create an Ed25519 secret key from a byte slice, zeroing the input on success. /// If the bytes do not constitute a valid Ed25519 secret key, an error is /// returned. diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 89278a611b6..2138895a474 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -385,6 +385,23 @@ impl PublicKey { } } + /// Encode the public key into a protobuf structure for storage or + /// exchange with other nodes. + #[deprecated(since = "0.2.0", note = "This method is infallible, use `to_protobuf_encoding` instead.")] + pub fn try_to_protobuf_encoding(&self) -> Result,DecodingError> { + use quick_protobuf::MessageWrite; + + let public_key = proto::PublicKey::from(self); + + let mut buf = Vec::with_capacity(public_key.get_size()); + let mut writer = Writer::new(&mut buf); + public_key + .write_message(&mut writer) + .expect("Encoding to succeed"); + + Ok(buf) + } + /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn to_protobuf_encoding(&self) -> Vec { diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index df45077177c..c5de84bee79 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -45,6 +45,25 @@ impl std::fmt::Debug for Keypair { } impl Keypair { + /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo + /// format (i.e. unencrypted) as defined in [RFC5208]. + /// + /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_from_pkcs8` instead." + )] + pub fn from_pkcs8(der: &mut [u8]) -> Result { + let kp = RsaKeyPair::from_pkcs8(der) + .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; + let kp = Keypair { + inner: Arc::new(kp), + raw_key: der.to_vec(), + }; + der.zeroize(); + Ok(kp) + } + /// Get the public key from the keypair. pub fn public(&self) -> PublicKey { PublicKey(self.inner.public_key().as_ref().to_vec()) @@ -67,7 +86,7 @@ impl Keypair { /// format (i.e. unencrypted) as defined in [RFC5208]. /// Decoding from DER-encoded private key bytes is also supported. /// Note that a copy of the undecoded byte array will be stored for encoding. - /// + /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 pub fn try_decode(bytes: &mut [u8]) -> Result { let from_pkcs8_error = match RsaKeyPair::from_pkcs8(bytes) { @@ -156,11 +175,23 @@ impl PublicKey { /// Decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo /// structure. See also `encode_x509`. - pub fn try_decode_x509(pk: &[u8]) -> Result { + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_decode_x509` instead." + )] + pub fn decode_x509(pk: &[u8]) -> Result { Asn1SubjectPublicKeyInfo::decode(pk) .map_err(|e| DecodingError::failed_to_parse("RSA X.509", e)) .map(|spki| spki.subjectPublicKey.0) } + + /// Try to decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo + /// structure. See also `encode_x509`. + pub fn try_decode_x509(pk: impl AsRef<[u8]>) -> Result { + Asn1SubjectPublicKeyInfo::decode(pk.as_ref()) + .map_err(|e| DecodingError::failed_to_parse("RSA X.509", e)) + .map(|spki| spki.subjectPublicKey.0) + } } impl fmt::Debug for PublicKey { diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 73070eadfaf..39bc4c0c300 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -52,7 +52,7 @@ impl Keypair { &self.secret } - /// Try to parse a secret key byte arrray into a secp256k1 `SecretKey` + /// Try to parse a secret key byte arrray into a secp256k1 `SecretKey` /// and promote it into a `Keypair`. pub fn try_from_bytes(bytes: impl AsMut<[u8]>) -> Result { let secret_key = SecretKey::try_from_bytes(bytes)?; @@ -99,6 +99,23 @@ impl SecretKey { SecretKey(libsecp256k1::SecretKey::random(&mut rand::thread_rng())) } + /// Create a secret key from a byte slice, zeroing the slice on success. + /// If the bytes do not constitute a valid Secp256k1 secret key, an + /// error is returned. + /// + /// Note that the expected binary format is the same as `libsecp256k1`'s. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead." + )] + pub fn from_bytes(mut sk: impl AsMut<[u8]>) -> Result { + let sk_bytes = sk.as_mut(); + let secret = libsecp256k1::SecretKey::parse_slice(&*sk_bytes) + .map_err(|e| DecodingError::failed_to_parse("parse secp256k1 secret key", e))?; + sk_bytes.zeroize(); + Ok(SecretKey(secret)) + } + /// Try to parse a secret key from a byte slice, zeroing the slice on success. /// If the bytes do not constitute a valid Secp256k1 secret key, an /// error is returned. @@ -221,6 +238,18 @@ impl PublicKey { /// Decode a public key from a byte slice in the the format produced /// by `encode`. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_decode` instead." + )] + pub fn decode(k: &[u8]) -> Result { + libsecp256k1::PublicKey::parse_slice(k, Some(libsecp256k1::PublicKeyFormat::Compressed)) + .map_err(|e| DecodingError::failed_to_parse("secp256k1 public key", e)) + .map(PublicKey) + } + + /// Try to decode a public key from a byte slice in the the format produced + /// by `encode`. pub fn try_decode(k: &[u8]) -> Result { libsecp256k1::PublicKey::parse_slice(k, Some(libsecp256k1::PublicKeyFormat::Compressed)) .map_err(|e| DecodingError::failed_to_parse("secp256k1 public key", e)) From 8b8bfc5105c3214419ed7d2c0832d8981f67fcd7 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 30 Mar 2023 16:14:58 +0800 Subject: [PATCH 22/83] split 'rsa::Keypair::try_decode' into two separate method split 'rsa::Keypair::try_decode' into 'try_decode_pkcs8' and 'try_decode_der' --- identity/src/keypair.rs | 4 ++-- identity/src/rsa.rs | 53 +++++++++++++++++++---------------------- 2 files changed, 26 insertions(+), 31 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 2138895a474..bacd8de3fa7 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -114,7 +114,7 @@ impl Keypair { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_from_pkcs8_rsa(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] - rsa::Keypair::try_decode(pkcs8_der).map(Keypair::Rsa) + rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) } /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey @@ -228,7 +228,7 @@ impl Keypair { } proto::KeyType::RSA => { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - return rsa::Keypair::try_decode(&mut private_key.Data).map(Keypair::Rsa); + return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data).map(Keypair::Rsa); Err(DecodingError::missing_feature("rsa")) } proto::KeyType::Secp256k1 => { diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index c5de84bee79..729816f21e6 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -51,7 +51,7 @@ impl Keypair { /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_from_pkcs8` instead." + note = "This method name is inaccurate, use `Keypair::try_decode_pkcs8` instead." )] pub fn from_pkcs8(der: &mut [u8]) -> Result { let kp = RsaKeyPair::from_pkcs8(der) @@ -82,45 +82,40 @@ impl Keypair { } } - /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo - /// format (i.e. unencrypted) as defined in [RFC5208]. - /// Decoding from DER-encoded private key bytes is also supported. + /// Try to decode an RSA keypair from a DER-encoded private key. /// Note that a copy of the undecoded byte array will be stored for encoding. - /// - /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - pub fn try_decode(bytes: &mut [u8]) -> Result { - let from_pkcs8_error = match RsaKeyPair::from_pkcs8(bytes) { + pub fn try_decode_der(bytes: &mut [u8]) -> Result { + match RsaKeyPair::from_der(bytes) { Ok(kp) => { let kp = Self { inner: Arc::new(kp), raw_key: bytes.to_vec(), }; bytes.zeroize(); - return Ok(kp); + Ok(kp) } - Err(e) => e, - }; - let from_der_error = match RsaKeyPair::from_der(bytes) { + Err(e) => Err(DecodingError::failed_to_parse("RSA", e)), + } + } + + /// Try to decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo + /// format (i.e. unencrypted) as defined in [RFC5208]. + /// Decoding from DER-encoded private key bytes is also supported. + /// Note that a copy of the undecoded byte array will be stored for encoding. + /// + /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 + pub fn try_decode_pkcs8(bytes: &mut [u8]) -> Result { + match RsaKeyPair::from_pkcs8(bytes) { Ok(kp) => { let kp = Self { inner: Arc::new(kp), raw_key: bytes.to_vec(), }; bytes.zeroize(); - return Ok(kp); + Ok(kp) } - Err(e) => e, - }; - Err(DecodingError::failed_to_parse( - "Ed25519 keypair", - std::io::Error::new( - std::io::ErrorKind::InvalidInput, - format!( - "Cannot parse key from pkcs8 encoding or der encoding: {}\n{}", - from_pkcs8_error, from_der_error - ), - ), - )) + Err(e) => Err(DecodingError::failed_to_parse("RSA", e)), + } } /// Get the byte array used to parse the keypair from. @@ -395,15 +390,15 @@ mod tests { impl Arbitrary for SomeKeypair { fn arbitrary(g: &mut Gen) -> SomeKeypair { let mut key = g.choose(&[KEY1, KEY2, KEY3]).unwrap().to_vec(); - SomeKeypair(Keypair::try_decode(&mut key).unwrap()) + SomeKeypair(Keypair::try_decode_pkcs8(&mut key).unwrap()) } } #[test] fn rsa_from_pkcs8() { - assert!(Keypair::try_decode(&mut KEY1.to_vec()).is_ok()); - assert!(Keypair::try_decode(&mut KEY2.to_vec()).is_ok()); - assert!(Keypair::try_decode(&mut KEY3.to_vec()).is_ok()); + assert!(Keypair::try_decode_pkcs8(&mut KEY1.to_vec()).is_ok()); + assert!(Keypair::try_decode_pkcs8(&mut KEY2.to_vec()).is_ok()); + assert!(Keypair::try_decode_pkcs8(&mut KEY3.to_vec()).is_ok()); } #[test] From 921fc4ac483b2ae7652d2deba06db664ed497f4d Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 30 Mar 2023 16:31:30 +0800 Subject: [PATCH 23/83] introduce 'ConversionError' for associated 'Error' type in 'TryInto' --- identity/src/error.rs | 32 ++++++++++++++++- identity/src/keypair.rs | 77 +++++++++++++++++++++++++---------------- identity/src/lib.rs | 19 ++++++++-- 3 files changed, 96 insertions(+), 32 deletions(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index 83b0f1c8030..51877dd8ae5 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -21,7 +21,9 @@ //! Errors during identity key operations. use std::error::Error; -use std::fmt; +use std::fmt::{self, Display}; + +use crate::KeyType; /// An error during decoding of key material. #[derive(Debug)] @@ -118,3 +120,31 @@ impl Error for SigningError { self.source.as_ref().map(|s| &**s as &dyn Error) } } + +#[derive(Debug)] +pub struct ConversionError { + actual: KeyType, +} + +impl ConversionError { + pub fn new(actual: KeyType) -> ConversionError { + ConversionError { + actual, + } + } +} + +impl Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&format!( + "Cannot convert to the given type, the actual key type inside is {}", + self.actual + )) + } +} + +impl Error for ConversionError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + None + } +} diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index bacd8de3fa7..b3afdbca9ec 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -18,8 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodingError, SigningError}; -use crate::proto; +use crate::error::{ConversionError, DecodingError, SigningError}; +use crate::{proto, KeyType}; use quick_protobuf::{BytesReader, Writer}; use std::convert::TryFrom; @@ -280,52 +280,60 @@ impl From for Keypair { #[cfg(feature = "ed25519")] impl TryInto for Keypair { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] Keypair::Ed25519(inner) => Ok(inner), - _ => Err(()), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } #[cfg(feature = "ecdsa")] impl TryInto for Keypair { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] Keypair::Ecdsa(inner) => Ok(inner), - _ => Err(()), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), } } } #[cfg(feature = "secp256k1")] impl TryInto for Keypair { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] Keypair::Secp256k1(inner) => Ok(inner), - _ => Err(()), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl TryInto for Keypair { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] Keypair::Rsa(inner) => Ok(inner), - _ => Err(()), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } @@ -387,8 +395,11 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. - #[deprecated(since = "0.2.0", note = "This method is infallible, use `to_protobuf_encoding` instead.")] - pub fn try_to_protobuf_encoding(&self) -> Result,DecodingError> { + #[deprecated( + since = "0.2.0", + note = "This method is infallible, use `to_protobuf_encoding` instead." + )] + pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { use quick_protobuf::MessageWrite; let public_key = proto::PublicKey::from(self); @@ -401,7 +412,7 @@ impl PublicKey { Ok(buf) } - + /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn to_protobuf_encoding(&self) -> Vec { @@ -475,52 +486,60 @@ impl TryFrom for PublicKey { #[cfg(feature = "ed25519")] impl TryInto for PublicKey { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] PublicKey::Ed25519(inner) => Ok(inner), - _ => Err(()), + PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } #[cfg(feature = "ecdsa")] impl TryInto for PublicKey { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] PublicKey::Ecdsa(inner) => Ok(inner), - _ => Err(()), + PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), } } } #[cfg(feature = "secp256k1")] impl TryInto for PublicKey { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] PublicKey::Secp256k1(inner) => Ok(inner), - _ => Err(()), + PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl TryInto for PublicKey { - type Error = (); + type Error = ConversionError; fn try_into(self) -> Result { + #[allow(deprecated)] match self { - #[allow(deprecated)] PublicKey::Rsa(inner) => Ok(inner), - _ => Err(()), + PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), } } } diff --git a/identity/src/lib.rs b/identity/src/lib.rs index fcd4fc11f16..0cf5cb19c7f 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -115,12 +115,16 @@ impl From<&PublicKey> for proto::PublicKey { } } +use std::fmt::Display; + pub use error::{DecodingError, SigningError}; pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; -pub enum KeyType{ +#[derive(Debug, PartialEq, Eq)] +/// The type of key a `KeyPair` is holding. +pub enum KeyType { #[cfg(feature = "ed25519")] Ed25519, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] @@ -128,5 +132,16 @@ pub enum KeyType{ #[cfg(feature = "secp256k1")] Secp256k1, #[cfg(feature = "ecdsa")] - Ecdsa + Ecdsa, +} + +impl Display for KeyType{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self{ + KeyType::Ed25519 => f.write_str("Ed25519"), + KeyType::RSA => f.write_str("RSA"), + KeyType::Secp256k1 => f.write_str("Secp256k1"), + KeyType::Ecdsa => f.write_str("Ecdsa"), + } + } } From b555d7fecfaa295cea8ae250769ec5d931c5f829 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 19:22:34 +0800 Subject: [PATCH 24/83] add 'try' prefix to 'into_[keytype]' for 'libp2p_identity::Keypair' --- identity/src/keypair.rs | 43 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index b3afdbca9ec..bda0a8fecfc 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -107,6 +107,49 @@ impl Keypair { Keypair::Ecdsa(ecdsa::Keypair::generate()) } + pub fn into_ed25519(self) -> Result { + #[allow(deprecated)] + match self { + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ed25519(inner) => Ok(inner), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + } + } + + #[cfg(feature = "secp256k1")] + pub fn try_into_secp256k1(self) -> Result { + #[allow(deprecated)] + match self { + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Secp256k1(inner) => Ok(inner), + } + } + + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub fn try_into_rsa(self) -> Result { + #[allow(deprecated)] + match self { + Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Rsa(inner) => Ok(inner), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + } + } + + #[cfg(feature = "ecdsa")] + pub fn try_into_ecdsa(self) -> Result { + #[allow(deprecated)] + match self { + Keypair::Ecdsa(inner) => Ok(inner), + Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + } + } + /// Decode an keypair from a DER-encoded secret key in PKCS#8 PrivateKeyInfo /// format (i.e. unencrypted) as defined in [RFC5208]. /// From d65ce3e797daca912f1dc291a7dea29733564f43 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 19:49:28 +0800 Subject: [PATCH 25/83] revert renaming of method in 'libp2p_identity::Keypair' --- identity/src/keypair.rs | 85 ++++++++++++++++++++++++++++++++++------- 1 file changed, 71 insertions(+), 14 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index bda0a8fecfc..addd368eaec 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -59,28 +59,28 @@ pub enum Keypair { #[cfg(feature = "ed25519")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into_ed25519` instead." )] Ed25519(ed25519::Keypair), /// An RSA keypair. #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into_rsa` instead." )] Rsa(rsa::Keypair), /// A Secp256k1 keypair. #[cfg(feature = "secp256k1")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into_secp256k1` instead." )] Secp256k1(secp256k1::Keypair), /// An ECDSA keypair. #[cfg(feature = "ecdsa")] #[deprecated( since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into::()` instead." + note = "This enum will be made opaque in the future, use `Keypair::try_into_ecdsa` instead." )] Ecdsa(ecdsa::Keypair), } @@ -107,7 +107,8 @@ impl Keypair { Keypair::Ecdsa(ecdsa::Keypair::generate()) } - pub fn into_ed25519(self) -> Result { + #[cfg(feature = "ed25519")] + pub fn try_into_ed25519(self) -> Result { #[allow(deprecated)] match self { Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), @@ -155,11 +156,40 @@ impl Keypair { /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_from_pkcs8_rsa` instead." + )] + pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { + #[allow(deprecated)] + rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) + } + + /// Try to decode an keypair from a DER-encoded secret key in PKCS#8 PrivateKeyInfo + /// format (i.e. unencrypted) as defined in [RFC5208]. + /// + /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_from_pkcs8_rsa(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) } + /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey + /// structure as defined in [RFC5915]. + /// + /// [RFC5915]: https://tools.ietf.org/html/rfc5915 + #[cfg(feature = "secp256k1")] + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_from_der_secp256k1` instead." + )] + pub fn secp256k1_from_der(der: &mut [u8]) -> Result { + #[allow(deprecated)] + secp256k1::SecretKey::try_decode_der(der) + .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) + } + /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey /// structure as defined in [RFC5915]. /// @@ -171,6 +201,22 @@ impl Keypair { .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) } + /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by [`Keypair::encode`], zeroing the input on success. + /// + /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. + #[cfg(feature = "ed25519")] + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_from_bytes_ed25519` instead." + )] + pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { + #[allow(deprecated)] + Ok(Keypair::Ed25519(ed25519::Keypair::from( + ed25519::SecretKey::try_from_bytes(bytes)?, + ))) + } + /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) /// produced by [`Keypair::encode`], zeroing the input on success. /// @@ -253,6 +299,15 @@ impl Keypair { } /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `Keypair::try_from_protobuf_encoding` instead." + )] + pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + Self::try_from_protobuf_encoding(bytes) + } + + /// Try to decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[cfg_attr(not(feature = "ed25519"), allow(unused_mut))] pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; @@ -271,7 +326,9 @@ impl Keypair { } proto::KeyType::RSA => { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data).map(Keypair::Rsa); + return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data) + .or(rsa::Keypair::try_decode_der(&mut private_key.Data)) + .map(Keypair::Rsa); Err(DecodingError::missing_feature("rsa")) } proto::KeyType::Secp256k1 => { @@ -289,14 +346,6 @@ impl Keypair { } } -#[cfg(feature = "ed25519")] -impl From for Keypair { - fn from(kp: ed25519::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Ed25519(kp) - } -} - #[cfg(feature = "ecdsa")] impl From for Keypair { fn from(kp: ecdsa::Keypair) -> Self { @@ -305,6 +354,14 @@ impl From for Keypair { } } +#[cfg(feature = "ed25519")] +impl From for Keypair { + fn from(kp: ed25519::Keypair) -> Self { + #[allow(deprecated)] + Keypair::Ed25519(kp) + } +} + #[cfg(feature = "secp256k1")] impl From for Keypair { fn from(kp: secp256k1::Keypair) -> Self { From c587b73c028516b384b28a297abc3b5cfd8b5871 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:01:40 +0800 Subject: [PATCH 26/83] recommend key promotion for deprecated methods in 'libp2p_identity::Keypair' --- identity/src/keypair.rs | 43 +++++------------------------------------ 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index addd368eaec..2fefa60d873 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -158,23 +158,13 @@ impl Keypair { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_from_pkcs8_rsa` instead." + note = "Deprecated, use `rsa::Keypair::try_decode_pkcs8` or `rsa::Keypair::try_decode_der` and promote it into `Keypair` instead." )] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) } - /// Try to decode an keypair from a DER-encoded secret key in PKCS#8 PrivateKeyInfo - /// format (i.e. unencrypted) as defined in [RFC5208]. - /// - /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub fn try_from_pkcs8_rsa(pkcs8_der: &mut [u8]) -> Result { - #[allow(deprecated)] - rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) - } - /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey /// structure as defined in [RFC5915]. /// @@ -182,7 +172,7 @@ impl Keypair { #[cfg(feature = "secp256k1")] #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_from_der_secp256k1` instead." + note = "Deprecated, use `secp256k1::Keypair::try_from_bytes` and promote it into `Keypair` instead." )] pub fn secp256k1_from_der(der: &mut [u8]) -> Result { #[allow(deprecated)] @@ -190,25 +180,14 @@ impl Keypair { .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) } - /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey - /// structure as defined in [RFC5915]. - /// - /// [RFC5915]: https://tools.ietf.org/html/rfc5915 - #[cfg(feature = "secp256k1")] - pub fn try_from_der_secp256k1(der: &mut [u8]) -> Result { - #[allow(deprecated)] - secp256k1::SecretKey::try_decode_der(der) - .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) - } - - /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) - /// produced by [`Keypair::encode`], zeroing the input on success. + /// Decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by `ed25519::Keypair::encode`, zeroing the input on success. /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[cfg(feature = "ed25519")] #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_from_bytes_ed25519` instead." + note = "Deprecated, use `ed25519::Keypair::try_decode` and promote it into `Keypair` instead." )] pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { #[allow(deprecated)] @@ -217,18 +196,6 @@ impl Keypair { ))) } - /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) - /// produced by [`Keypair::encode`], zeroing the input on success. - /// - /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. - #[cfg(feature = "ed25519")] - pub fn try_from_bytes_ed25519(bytes: impl AsMut<[u8]>) -> Result { - #[allow(deprecated)] - Ok(Keypair::Ed25519(ed25519::Keypair::from( - ed25519::SecretKey::try_from_bytes(bytes)?, - ))) - } - /// Sign a message using the private key of this keypair, producing /// a signature that can be verified using the corresponding public key. pub fn sign(&self, msg: &[u8]) -> Result, SigningError> { From 2a77a267fff5846e91d805046ffcd805b3fabc82 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:20:39 +0800 Subject: [PATCH 27/83] revert renaming of method in 'libp2p_identity::secp256k1' --- identity/src/secp256k1.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 39bc4c0c300..59908769280 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -129,6 +129,18 @@ impl SecretKey { Ok(SecretKey(secret)) } + /// Decode a DER-encoded Secp256k1 secret key in an ECPrivateKey + /// structure as defined in [RFC5915], zeroing the input slice on success. + /// + /// [RFC5915]: https://tools.ietf.org/html/rfc5915 + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `SecretKey::try_decode_der` instead." + )] + pub fn decode_der(der: impl AsMut<[u8]>) -> Result { + Self::try_decode_der(der) + } + /// Try to decode a DER-encoded Secp256k1 secret key in an ECPrivateKey /// structure as defined in [RFC5915], zeroing the input slice on success. /// @@ -157,7 +169,7 @@ impl SecretKey { } /// Returns the raw bytes of the secret key. - pub fn encode(&self) -> [u8; 32] { + pub fn to_bytes(&self) -> [u8; 32] { self.0.serialize() } From 7d0ceaacdaf3c0a62a9e20164256fa7316b14bb1 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:25:12 +0800 Subject: [PATCH 28/83] renaming 'ConversionError' to 'OtherVariantError' --- identity/src/error.rs | 13 +++--- identity/src/keypair.rs | 100 ++++++++++++++++++++-------------------- 2 files changed, 57 insertions(+), 56 deletions(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index 51877dd8ae5..25188fc36e2 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -122,19 +122,20 @@ impl Error for SigningError { } #[derive(Debug)] -pub struct ConversionError { +/// Error produced when trying to convert `libp2p_identity::Keypair` to a more concrete keypair failed. +pub struct OtherVariantError { actual: KeyType, } -impl ConversionError { - pub fn new(actual: KeyType) -> ConversionError { - ConversionError { +impl OtherVariantError { + pub fn new(actual: KeyType) -> OtherVariantError { + OtherVariantError { actual, } } } -impl Display for ConversionError { +impl Display for OtherVariantError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&format!( "Cannot convert to the given type, the actual key type inside is {}", @@ -143,7 +144,7 @@ impl Display for ConversionError { } } -impl Error for ConversionError { +impl Error for OtherVariantError { fn source(&self) -> Option<&(dyn Error + 'static)> { None } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 2fefa60d873..58ecbef56e2 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{ConversionError, DecodingError, SigningError}; +use crate::error::{OtherVariantError, DecodingError, SigningError}; use crate::{proto, KeyType}; use quick_protobuf::{BytesReader, Writer}; use std::convert::TryFrom; @@ -108,46 +108,46 @@ impl Keypair { } #[cfg(feature = "ed25519")] - pub fn try_into_ed25519(self) -> Result { + pub fn try_into_ed25519(self) -> Result { #[allow(deprecated)] match self { - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), Keypair::Ed25519(inner) => Ok(inner), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } #[cfg(feature = "secp256k1")] - pub fn try_into_secp256k1(self) -> Result { + pub fn try_into_secp256k1(self) -> Result { #[allow(deprecated)] match self { - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), Keypair::Secp256k1(inner) => Ok(inner), } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - pub fn try_into_rsa(self) -> Result { + pub fn try_into_rsa(self) -> Result { #[allow(deprecated)] match self { - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), Keypair::Rsa(inner) => Ok(inner), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } #[cfg(feature = "ecdsa")] - pub fn try_into_ecdsa(self) -> Result { + pub fn try_into_ecdsa(self) -> Result { #[allow(deprecated)] match self { Keypair::Ecdsa(inner) => Ok(inner), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } @@ -249,7 +249,7 @@ impl Keypair { #[cfg(feature = "secp256k1")] Self::Secp256k1(data) => proto::PrivateKey { Type: proto::KeyType::Secp256k1, - Data: data.secret().encode().into(), + Data: data.secret().to_bytes().into(), }, #[cfg(feature = "ecdsa")] Self::Ecdsa(data) => proto::PrivateKey { @@ -347,60 +347,60 @@ impl From for Keypair { #[cfg(feature = "ed25519")] impl TryInto for Keypair { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { Keypair::Ed25519(inner) => Ok(inner), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } #[cfg(feature = "ecdsa")] impl TryInto for Keypair { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { Keypair::Ecdsa(inner) => Ok(inner), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } } #[cfg(feature = "secp256k1")] impl TryInto for Keypair { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { Keypair::Secp256k1(inner) => Ok(inner), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl TryInto for Keypair { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { Keypair::Rsa(inner) => Ok(inner), - Keypair::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - Keypair::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), - Keypair::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } @@ -553,60 +553,60 @@ impl TryFrom for PublicKey { #[cfg(feature = "ed25519")] impl TryInto for PublicKey { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { PublicKey::Ed25519(inner) => Ok(inner), - PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), - PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } #[cfg(feature = "ecdsa")] impl TryInto for PublicKey { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { PublicKey::Ecdsa(inner) => Ok(inner), - PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), + PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } } #[cfg(feature = "secp256k1")] impl TryInto for PublicKey { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { PublicKey::Secp256k1(inner) => Ok(inner), - PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - PublicKey::Rsa(_) => Err(ConversionError::new(KeyType::RSA)), - PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl TryInto for PublicKey { - type Error = ConversionError; + type Error = OtherVariantError; fn try_into(self) -> Result { #[allow(deprecated)] match self { PublicKey::Rsa(inner) => Ok(inner), - PublicKey::Ed25519(_) => Err(ConversionError::new(KeyType::Ed25519)), - PublicKey::Secp256k1(_) => Err(ConversionError::new(KeyType::Secp256k1)), - PublicKey::Ecdsa(_) => Err(ConversionError::new(KeyType::Ecdsa)), + PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } } From c846a3dd98fddfdd35fa6c69f72bf8f6f011f880 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:28:12 +0800 Subject: [PATCH 29/83] revert wrongly renamed 'secp256k1::SecretKey::from_der' --- identity/src/secp256k1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 59908769280..47e578edd0f 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -137,7 +137,7 @@ impl SecretKey { since = "0.2.0", note = "This method name is inaccurate, use `SecretKey::try_decode_der` instead." )] - pub fn decode_der(der: impl AsMut<[u8]>) -> Result { + pub fn from_der(der: impl AsMut<[u8]>) -> Result { Self::try_decode_der(der) } From a3eae1c20bc63feb0bc9917f36349fc02ad12b63 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:37:17 +0800 Subject: [PATCH 30/83] formatting --- examples/dcutr/src/main.rs | 2 +- examples/file-sharing/src/network.rs | 2 +- examples/relay-server/src/main.rs | 3 +-- identity/src/error.rs | 4 +--- identity/src/keypair.rs | 2 +- identity/src/lib.rs | 4 ++-- 6 files changed, 7 insertions(+), 10 deletions(-) diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 27e28297a71..fd455cbaa86 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -281,5 +281,5 @@ fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - identity::Keypair::try_from_bytes_ed25519(bytes).expect("only errors on wrong length") + identity::ed25519::Keypair::try_decode(&mut bytes).expect("only errors on wrong length").into() } diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index 0c91112b99c..64bf8f48b97 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -39,7 +39,7 @@ pub async fn new( Some(seed) => { let mut bytes = [0u8; 32]; bytes[0] = seed; - identity::Keypair::try_from_bytes_ed25519(bytes).unwrap() + identity::ed25519::Keypair::try_decode(&mut bytes).unwrap().into() } None => identity::Keypair::generate_ed25519(), }; diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 68fd32a9666..0c64317336b 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -102,8 +102,7 @@ struct Behaviour { fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - - identity::Keypair::try_from_bytes_ed25519(bytes).expect("only errors on wrong length") + identity::ed25519::Keypair::try_decode(&mut bytes).expect("only errors on wrong length").into() } #[derive(Debug, Parser)] diff --git a/identity/src/error.rs b/identity/src/error.rs index 25188fc36e2..c604f978b23 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -129,9 +129,7 @@ pub struct OtherVariantError { impl OtherVariantError { pub fn new(actual: KeyType) -> OtherVariantError { - OtherVariantError { - actual, - } + OtherVariantError { actual } } } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 58ecbef56e2..bb1b1ebf8e1 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{OtherVariantError, DecodingError, SigningError}; +use crate::error::{DecodingError, OtherVariantError, SigningError}; use crate::{proto, KeyType}; use quick_protobuf::{BytesReader, Writer}; use std::convert::TryFrom; diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 0cf5cb19c7f..2595b206522 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -135,9 +135,9 @@ pub enum KeyType { Ecdsa, } -impl Display for KeyType{ +impl Display for KeyType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self{ + match self { KeyType::Ed25519 => f.write_str("Ed25519"), KeyType::RSA => f.write_str("RSA"), KeyType::Secp256k1 => f.write_str("Secp256k1"), From 3adb27f8f115e57deff8fe347861f5c5dda21a81 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:48:08 +0800 Subject: [PATCH 31/83] revert removal of methods in 'libp2p_identity::PublicKey' and rename instead --- identity/src/keypair.rs | 52 +++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index bb1b1ebf8e1..798fa1bc644 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -109,46 +109,22 @@ impl Keypair { #[cfg(feature = "ed25519")] pub fn try_into_ed25519(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), - Keypair::Ed25519(inner) => Ok(inner), - Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), - } + self.try_into() } #[cfg(feature = "secp256k1")] pub fn try_into_secp256k1(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), - Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), - Keypair::Secp256k1(inner) => Ok(inner), - } + self.try_into() } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_into_rsa(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), - Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), - Keypair::Rsa(inner) => Ok(inner), - Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), - } + self.try_into() } #[cfg(feature = "ecdsa")] pub fn try_into_ecdsa(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(inner) => Ok(inner), - Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), - Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), - Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), - } + self.try_into() } /// Decode an keypair from a DER-encoded secret key in PKCS#8 PrivateKeyInfo @@ -460,6 +436,26 @@ impl PublicKey { } } + #[cfg(feature = "ed25519")] + pub fn try_into_ed25519(self) -> Result { + self.try_into() + } + + #[cfg(feature = "secp256k1")] + pub fn try_into_secp256k1(self) -> Result { + self.try_into() + } + + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub fn try_into_rsa(self) -> Result { + self.try_into() + } + + #[cfg(feature = "ecdsa")] + pub fn try_into_ecdsa(self) -> Result { + self.try_into() + } + /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. #[deprecated( From 43a33ce0e427c2439ea3f897960ef73301de3b34 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:57:53 +0800 Subject: [PATCH 32/83] clippy --- identity/src/rsa.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 729816f21e6..ebd3c269d21 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -405,7 +405,7 @@ mod tests { fn rsa_x509_encode_decode() { fn prop(SomeKeypair(kp): SomeKeypair) -> Result { let pk = kp.public(); - PublicKey::try_decode_x509(&pk.encode_x509()) + PublicKey::try_decode_x509(pk.encode_x509()) .map_err(|e| e.to_string()) .map(|pk2| pk2 == pk) } From 9ae47cf6940197b53bd04f823a89c25a6c61b6c4 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 20:59:43 +0800 Subject: [PATCH 33/83] remove unnecessary libp2p_identity::PublicKey::try_to_protobuf_encoding --- identity/src/keypair.rs | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 798fa1bc644..8c737935da4 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -456,26 +456,6 @@ impl PublicKey { self.try_into() } - /// Encode the public key into a protobuf structure for storage or - /// exchange with other nodes. - #[deprecated( - since = "0.2.0", - note = "This method is infallible, use `to_protobuf_encoding` instead." - )] - pub fn try_to_protobuf_encoding(&self) -> Result, DecodingError> { - use quick_protobuf::MessageWrite; - - let public_key = proto::PublicKey::from(self); - - let mut buf = Vec::with_capacity(public_key.get_size()); - let mut writer = Writer::new(&mut buf); - public_key - .write_message(&mut writer) - .expect("Encoding to succeed"); - - Ok(buf) - } - /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn to_protobuf_encoding(&self) -> Vec { From b25a4467beac5b55ad364111603eb318a09d61a0 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 4 Apr 2023 21:04:11 +0800 Subject: [PATCH 34/83] revert rename of 'libp2p_identity::PublicKey::from_protobuf_encoding' --- identity/src/keypair.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 8c737935da4..2dc5371c144 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -472,6 +472,16 @@ impl PublicKey { buf } + /// Decode a public key from a protobuf structure, e.g. read from storage + /// or received from another node. + #[deprecated( + since = "0.2.0", + note = "This method name is inaccurate, use `PublicKey::try_from_protobuf_encoding` instead." + )] + pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + Self::try_from_protobuf_encoding(bytes) + } + /// Decode a public key from a protobuf structure, e.g. read from storage /// or received from another node. pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { From d936772c4590fec88206aa73974d89011bec6c6a Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:02:49 +0800 Subject: [PATCH 35/83] improve deprecation messages --- identity/src/ecdsa.rs | 6 +++--- identity/src/ed25519.rs | 6 +++--- identity/src/keypair.rs | 4 ++-- identity/src/rsa.rs | 4 ++-- identity/src/secp256k1.rs | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 28e9f7541a8..29d0055008c 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -117,7 +117,7 @@ impl SecretKey { /// Decode a secret key from a byte buffer. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead" + note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead" )] pub fn from_bytes(buf: &[u8]) -> Result { SigningKey::from_bytes(buf) @@ -156,7 +156,7 @@ impl PublicKey { /// Decode a public key from a byte buffer without compression. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_from_bytes` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." )] pub fn from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) @@ -191,7 +191,7 @@ impl PublicKey { /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_decode_der` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_der` instead." )] pub fn decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 4c7c4b9bd6d..a6f0a902550 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -51,7 +51,7 @@ impl Keypair { /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_decode` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode` instead." )] pub fn decode(kp: &mut [u8]) -> Result { ed25519::Keypair::from_bytes(kp) @@ -186,7 +186,7 @@ impl PublicKey { /// Decode a public key from a byte array as produced by `encode`. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_decode` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode` instead." )] pub fn decode(k: &[u8]) -> Result { ed25519::PublicKey::from_bytes(k) @@ -243,7 +243,7 @@ impl SecretKey { /// returned. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead." + note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] pub fn from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { let sk_bytes = sk_bytes.as_mut(); diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 2dc5371c144..7dd1e05de1d 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -244,7 +244,7 @@ impl Keypair { /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_from_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_from_protobuf_encoding` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { Self::try_from_protobuf_encoding(bytes) @@ -476,7 +476,7 @@ impl PublicKey { /// or received from another node. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_from_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_protobuf_encoding` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { Self::try_from_protobuf_encoding(bytes) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index ebd3c269d21..32d654932d3 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -51,7 +51,7 @@ impl Keypair { /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `Keypair::try_decode_pkcs8` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_pkcs8` instead." )] pub fn from_pkcs8(der: &mut [u8]) -> Result { let kp = RsaKeyPair::from_pkcs8(der) @@ -172,7 +172,7 @@ impl PublicKey { /// structure. See also `encode_x509`. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_decode_x509` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_x509` instead." )] pub fn decode_x509(pk: &[u8]) -> Result { Asn1SubjectPublicKeyInfo::decode(pk) diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 47e578edd0f..3682f8b2152 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -106,7 +106,7 @@ impl SecretKey { /// Note that the expected binary format is the same as `libsecp256k1`'s. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `SecretKey::try_from_bytes` instead." + note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] pub fn from_bytes(mut sk: impl AsMut<[u8]>) -> Result { let sk_bytes = sk.as_mut(); @@ -135,7 +135,7 @@ impl SecretKey { /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `SecretKey::try_decode_der` instead." + note = "This method name does not follow Rust naming conventions, use `SecretKey::try_decode_der` instead." )] pub fn from_der(der: impl AsMut<[u8]>) -> Result { Self::try_decode_der(der) @@ -252,7 +252,7 @@ impl PublicKey { /// by `encode`. #[deprecated( since = "0.2.0", - note = "This method name is inaccurate, use `PublicKey::try_decode` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode` instead." )] pub fn decode(k: &[u8]) -> Result { libsecp256k1::PublicKey::parse_slice(k, Some(libsecp256k1::PublicKeyFormat::Compressed)) From 284736c959f7d33922dfebbd146fd402086a0307 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:09:43 +0800 Subject: [PATCH 36/83] delegate deprecated methods to suggested ones --- identity/src/ecdsa.rs | 11 ++--------- identity/src/ed25519.rs | 19 ++++--------------- identity/src/rsa.rs | 13 ++----------- identity/src/secp256k1.rs | 12 +++--------- 4 files changed, 11 insertions(+), 44 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 29d0055008c..c2ac772e766 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -120,9 +120,7 @@ impl SecretKey { note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead" )] pub fn from_bytes(buf: &[u8]) -> Result { - SigningKey::from_bytes(buf) - .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) - .map(SecretKey) + Self::try_from_bytes(buf) } /// Try to parse a secret key from a byte buffer. @@ -159,12 +157,7 @@ impl PublicKey { note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." )] pub fn from_bytes(k: &[u8]) -> Result { - let enc_pt = EncodedPoint::from_bytes(k) - .map_err(|e| DecodingError::failed_to_parse("ecdsa p256 encoded point", e))?; - - VerifyingKey::from_encoded_point(&enc_pt) - .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 public key", err)) - .map(PublicKey) + Self::try_from_bytes(k) } /// Decode a public key from a byte buffer without compression. diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index a6f0a902550..bbce0a6a22e 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -54,12 +54,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode` instead." )] pub fn decode(kp: &mut [u8]) -> Result { - ed25519::Keypair::from_bytes(kp) - .map(|k| { - kp.zeroize(); - Keypair(k) - }) - .map_err(|e| DecodingError::failed_to_parse("Ed25519 keypair", e)) + Self::try_decode(kp) } /// Try to decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) @@ -189,9 +184,7 @@ impl PublicKey { note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode` instead." )] pub fn decode(k: &[u8]) -> Result { - ed25519::PublicKey::from_bytes(k) - .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e)) - .map(PublicKey) + Self::try_decode(k) } /// Try to decode a public key from a byte array as produced by `encode`. @@ -245,12 +238,8 @@ impl SecretKey { since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] - pub fn from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { - let sk_bytes = sk_bytes.as_mut(); - let secret = ed25519::SecretKey::from_bytes(&*sk_bytes) - .map_err(|e| DecodingError::failed_to_parse("Ed25519 secret key", e))?; - sk_bytes.zeroize(); - Ok(SecretKey(secret)) + pub fn from_bytes(sk_bytes: impl AsMut<[u8]>) -> Result { + Self::try_from_bytes(sk_bytes) } /// Try to create an Ed25519 secret key from a byte slice, zeroing the input on success. diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 32d654932d3..5914b4072ee 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -54,14 +54,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_pkcs8` instead." )] pub fn from_pkcs8(der: &mut [u8]) -> Result { - let kp = RsaKeyPair::from_pkcs8(der) - .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; - let kp = Keypair { - inner: Arc::new(kp), - raw_key: der.to_vec(), - }; - der.zeroize(); - Ok(kp) + Self::try_decode_pkcs8(der) } /// Get the public key from the keypair. @@ -175,9 +168,7 @@ impl PublicKey { note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_x509` instead." )] pub fn decode_x509(pk: &[u8]) -> Result { - Asn1SubjectPublicKeyInfo::decode(pk) - .map_err(|e| DecodingError::failed_to_parse("RSA X.509", e)) - .map(|spki| spki.subjectPublicKey.0) + Self::try_decode_x509(pk) } /// Try to decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 3682f8b2152..088d40a455e 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -108,12 +108,8 @@ impl SecretKey { since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] - pub fn from_bytes(mut sk: impl AsMut<[u8]>) -> Result { - let sk_bytes = sk.as_mut(); - let secret = libsecp256k1::SecretKey::parse_slice(&*sk_bytes) - .map_err(|e| DecodingError::failed_to_parse("parse secp256k1 secret key", e))?; - sk_bytes.zeroize(); - Ok(SecretKey(secret)) + pub fn from_bytes(sk: impl AsMut<[u8]>) -> Result { + Self::try_from_bytes(sk) } /// Try to parse a secret key from a byte slice, zeroing the slice on success. @@ -255,9 +251,7 @@ impl PublicKey { note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode` instead." )] pub fn decode(k: &[u8]) -> Result { - libsecp256k1::PublicKey::parse_slice(k, Some(libsecp256k1::PublicKeyFormat::Compressed)) - .map_err(|e| DecodingError::failed_to_parse("secp256k1 public key", e)) - .map(PublicKey) + Self::try_decode(k) } /// Try to decode a public key from a byte slice in the the format produced From 2f02c04c79b722e0266a3e3a17cf4731a3a41643 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:36:41 +0800 Subject: [PATCH 37/83] enforce 'decode' rule for protobuf encoding --- core/src/signed_envelope.rs | 4 +-- identity/src/keypair.rs | 36 +++++++++++++++++---------- identity/src/peer_id.rs | 4 +-- identity/tests/keypair_api.rs | 4 +-- misc/keygen/src/config.rs | 2 +- misc/keygen/src/main.rs | 4 +-- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/gossipsub/src/protocol.rs | 4 +-- protocols/identify/src/protocol.rs | 6 ++--- transports/noise/src/io/handshake.rs | 4 +-- transports/plaintext/src/handshake.rs | 4 +-- transports/tls/src/certificate.rs | 4 +-- 12 files changed, 44 insertions(+), 34 deletions(-) diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 9e311e1229b..014aa912ee5 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -76,7 +76,7 @@ impl SignedEnvelope { use quick_protobuf::MessageWrite; let envelope = proto::Envelope { - public_key: self.key.to_protobuf_encoding(), + public_key: self.key.encode_protobuf_encoding(), payload_type: self.payload_type, payload: self.payload, signature: self.signature, @@ -101,7 +101,7 @@ impl SignedEnvelope { proto::Envelope::from_reader(&mut reader, bytes).map_err(DecodeError::from)?; Ok(Self { - key: PublicKey::try_from_protobuf_encoding(&envelope.public_key)?, + key: PublicKey::try_decode_protobuf_encoding(&envelope.public_key)?, payload_type: envelope.payload_type.to_vec(), payload: envelope.payload.to_vec(), signature: envelope.signature.to_vec(), diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 7dd1e05de1d..08ae2196975 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -206,8 +206,13 @@ impl Keypair { } /// Encode a private key as protobuf structure. - #[cfg_attr(not(feature = "ed25519"), allow(unused_variables, unused_mut))] + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] pub fn to_protobuf_encoding(&self) -> Vec { + self.encode_protobuf_encoding() + } + + /// Encode a private key as protobuf structure. + pub fn encode_protobuf_encoding(&self) -> Vec { use quick_protobuf::MessageWrite; #[allow(deprecated)] @@ -244,15 +249,15 @@ impl Keypair { /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `Keypair::try_from_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf_encoding` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_from_protobuf_encoding(bytes) + Self::try_decode_protobuf_encoding(bytes) } /// Try to decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[cfg_attr(not(feature = "ed25519"), allow(unused_mut))] - pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_decode_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -269,9 +274,7 @@ impl Keypair { } proto::KeyType::RSA => { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data) - .or(rsa::Keypair::try_decode_der(&mut private_key.Data)) - .map(Keypair::Rsa); + return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data).map(Keypair::Rsa); Err(DecodingError::missing_feature("rsa")) } proto::KeyType::Secp256k1 => { @@ -458,7 +461,14 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] pub fn to_protobuf_encoding(&self) -> Vec { + self.encode_protobuf_encoding() + } + + /// Encode the public key into a protobuf structure for storage or + /// exchange with other nodes. + pub fn encode_protobuf_encoding(&self) -> Vec { use quick_protobuf::MessageWrite; let public_key = proto::PublicKey::from(self); @@ -476,15 +486,15 @@ impl PublicKey { /// or received from another node. #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf_encoding` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_from_protobuf_encoding(bytes) + Self::try_decode_protobuf_encoding(bytes) } /// Decode a public key from a protobuf structure, e.g. read from storage /// or received from another node. - pub fn try_from_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_decode_protobuf_encoding(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -610,9 +620,9 @@ mod tests { let expected_keypair = Keypair::generate_ed25519(); let expected_peer_id = expected_keypair.public().to_peer_id(); - let encoded = expected_keypair.to_protobuf_encoding(); + let encoded = expected_keypair.encode_protobuf_encoding(); - let keypair = Keypair::try_from_protobuf_encoding(&encoded).unwrap(); + let keypair = Keypair::try_decode_protobuf_encoding(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); assert_eq!(expected_peer_id, peer_id); @@ -627,7 +637,7 @@ mod tests { let encoded = BASE64_STANDARD.decode(base_64_encoded).unwrap(); - let keypair = Keypair::try_from_protobuf_encoding(&encoded).unwrap(); + let keypair = Keypair::try_decode_protobuf_encoding(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); assert_eq!(expected_peer_id, peer_id); diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index ae9ffc80c0a..7bf50950dfe 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -57,7 +57,7 @@ impl PeerId { pub fn from_public_key(key: &crate::keypair::PublicKey) -> PeerId { use multihash::MultihashDigest as _; - let key_enc = key.to_protobuf_encoding(); + let key_enc = key.encode_protobuf_encoding(); let hash_algorithm = if key_enc.len() <= MAX_INLINE_KEY_LENGTH { Code::Identity @@ -132,7 +132,7 @@ impl PeerId { let alg = Code::try_from(self.multihash.code()) .expect("Internal multihash is always a valid `Code`"); - let enc = public_key.to_protobuf_encoding(); + let enc = public_key.encode_protobuf_encoding(); Some(alg.digest(&enc) == self.multihash) } } diff --git a/identity/tests/keypair_api.rs b/identity/tests/keypair_api.rs index 356d46389fe..a135395da4d 100644 --- a/identity/tests/keypair_api.rs +++ b/identity/tests/keypair_api.rs @@ -2,12 +2,12 @@ use libp2p_identity::Keypair; #[test] fn calling_keypair_api() { - let _ = Keypair::try_from_protobuf_encoding(&[]); + let _ = Keypair::try_decode_protobuf_encoding(&[]); } #[allow(dead_code)] fn using_keypair(kp: Keypair) { - let _ = kp.to_protobuf_encoding(); + let _ = kp.encode_protobuf_encoding(); let _ = kp.sign(&[]); let _ = kp.public(); } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index 5bd62f39230..d5286552428 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -18,7 +18,7 @@ impl Config { } pub fn from_key_material(peer_id: PeerId, keypair: &Keypair) -> Result> { - let priv_key = BASE64_STANDARD.encode(keypair.to_protobuf_encoding()); + let priv_key = BASE64_STANDARD.encode(keypair.encode_protobuf_encoding()); let peer_id = peer_id.to_base58(); Ok(Self { identity: Identity { peer_id, priv_key }, diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index db4679fe5df..ead72205ce6 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), Box> { Command::From { config } => { let config = Zeroizing::new(config::Config::from_file(config.as_ref())?); - let keypair = identity::Keypair::try_from_protobuf_encoding(&Zeroizing::new( + let keypair = identity::Keypair::try_decode_protobuf_encoding(&Zeroizing::new( BASE64_STANDARD.decode(config.identity.priv_key.as_bytes())?, ))?; @@ -118,7 +118,7 @@ fn main() -> Result<(), Box> { println!( "PeerId: {:?} Keypair: {:?}", local_peer_id, - local_keypair.to_protobuf_encoding() + local_keypair.encode_protobuf_encoding() ); } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 4b358afdd02..013c2dae08c 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -178,7 +178,7 @@ impl From for PublishConfig { match authenticity { MessageAuthenticity::Signed(keypair) => { let public_key = keypair.public(); - let key_enc = public_key.to_protobuf_encoding(); + let key_enc = public_key.encode_protobuf_encoding(); let key = if key_enc.len() <= 42 { // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it // specifically in the [`rpc_proto::proto::Message::key`] field. diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 9d6edfbecd1..89c418afdfb 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -237,10 +237,10 @@ impl GossipsubCodec { let public_key = match message .key .as_deref() - .map(PublicKey::try_from_protobuf_encoding) + .map(PublicKey::try_decode_protobuf_encoding) { Some(Ok(key)) => key, - _ => match PublicKey::try_from_protobuf_encoding(&source.to_bytes()[2..]) { + _ => match PublicKey::try_decode_protobuf_encoding(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { warn!("Signature verification failed: No valid public key supplied"); diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 25493ac14bf..53e9e79f0d5 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -169,7 +169,7 @@ where .map(|addr| addr.to_vec()) .collect(); - let pubkey_bytes = info.public_key.to_protobuf_encoding(); + let pubkey_bytes = info.public_key.encode_protobuf_encoding(); let message = proto::Identify { agentVersion: Some(info.agent_version), @@ -235,7 +235,7 @@ impl TryFrom for Info { addrs }; - let public_key = PublicKey::try_from_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; + let public_key = PublicKey::try_decode_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { Ok(a) => a, @@ -386,7 +386,7 @@ mod tests { publicKey: Some( identity::Keypair::generate_ed25519() .public() - .to_protobuf_encoding(), + .encode_protobuf_encoding(), ), }; diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index ce478297876..5b2f63d1449 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -214,7 +214,7 @@ where let pb = pb_result?; if !pb.identity_key.is_empty() { - let pk = identity::PublicKey::try_from_protobuf_encoding(&pb.identity_key)?; + let pk = identity::PublicKey::try_decode_protobuf_encoding(&pb.identity_key)?; if let Some(ref k) = state.id_remote_pubkey { if k != &pk { return Err(NoiseError::UnexpectedKey); @@ -236,7 +236,7 @@ where T: AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { - identity_key: state.identity.public.to_protobuf_encoding(), + identity_key: state.identity.public.encode_protobuf_encoding(), ..Default::default() }; diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index b5d931a3f38..eae855e991f 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -54,7 +54,7 @@ impl HandshakeContext { fn new(config: PlainText2Config) -> Self { let exchange = Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.to_protobuf_encoding()), + pubkey: Some(config.local_public_key.encode_protobuf_encoding()), }; let mut buf = Vec::with_capacity(exchange.get_size()); let mut writer = Writer::new(&mut buf); @@ -77,7 +77,7 @@ impl HandshakeContext { let mut reader = BytesReader::from_bytes(&exchange_bytes); let prop = Exchange::from_reader(&mut reader, &exchange_bytes)?; - let public_key = PublicKey::try_from_protobuf_encoding(&prop.pubkey.unwrap_or_default())?; + let public_key = PublicKey::try_decode_protobuf_encoding(&prop.pubkey.unwrap_or_default())?; let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; // Check the validity of the remote's `Exchange`. diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 70e0b76750d..ced60e5597a 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -159,7 +159,7 @@ fn parse_unverified(der_input: &[u8]) -> Result { // required KeyType Type = 1; // required bytes Data = 2; // } - let public_key = identity::PublicKey::try_from_protobuf_encoding(&public_key) + let public_key = identity::PublicKey::try_decode_protobuf_encoding(&public_key) .map_err(|_| webpki::Error::UnknownIssuer)?; let ext = P2pExtension { public_key, @@ -215,7 +215,7 @@ fn make_libp2p_extension( // signature OCTET STRING // } let extension_content = { - let serialized_pubkey = identity_keypair.public().to_protobuf_encoding(); + let serialized_pubkey = identity_keypair.public().encode_protobuf_encoding(); yasna::encode_der(&(serialized_pubkey, signature)) }; From b0a33fd8a64083baa176a6f6c4ad00e0356cb0ef Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:37:47 +0800 Subject: [PATCH 38/83] formatting --- identity/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index c604f978b23..1e71c84d31f 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -121,8 +121,8 @@ impl Error for SigningError { } } +/// Error produced when failing to convert `libp2p_identity::Keypair` to a more concrete keypair. #[derive(Debug)] -/// Error produced when trying to convert `libp2p_identity::Keypair` to a more concrete keypair failed. pub struct OtherVariantError { actual: KeyType, } From cb40674a7a9ad087ba9b8a7f7f44cc939817cc5f Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:38:14 +0800 Subject: [PATCH 39/83] remove support for DER-encoded RSA key --- identity/src/rsa.rs | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 5914b4072ee..66ba35b9706 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -75,22 +75,6 @@ impl Keypair { } } - /// Try to decode an RSA keypair from a DER-encoded private key. - /// Note that a copy of the undecoded byte array will be stored for encoding. - pub fn try_decode_der(bytes: &mut [u8]) -> Result { - match RsaKeyPair::from_der(bytes) { - Ok(kp) => { - let kp = Self { - inner: Arc::new(kp), - raw_key: bytes.to_vec(), - }; - bytes.zeroize(); - Ok(kp) - } - Err(e) => Err(DecodingError::failed_to_parse("RSA", e)), - } - } - /// Try to decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo /// format (i.e. unencrypted) as defined in [RFC5208]. /// Decoding from DER-encoded private key bytes is also supported. @@ -112,7 +96,7 @@ impl Keypair { } /// Get the byte array used to parse the keypair from. - pub fn to_raw_bytes(&self) -> Vec { + pub(crate) fn to_raw_bytes(&self) -> Vec { self.raw_key.clone() } } From 03213682e73580f85b6698154c9426f234a5b521 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 10:46:14 +0800 Subject: [PATCH 40/83] syncing 'keypair_dummy.rs' with 'keypair.rs' --- identity/src/keypair_dummy.rs | 40 ++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs index 4e1bb61f15c..55be856dba6 100644 --- a/identity/src/keypair_dummy.rs +++ b/identity/src/keypair_dummy.rs @@ -32,11 +32,24 @@ impl Keypair { unreachable!("Can never construct empty enum") } - pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { - unreachable!("Can never construct empty enum") + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + pub fn to_protobuf_encoding(&self) -> Vec { + unreachable!("Can never encode empty enum") + } + + pub fn encode_protobuf_encoding(&self) -> Vec { + unreachable!("Can never encode empty enum") } - pub fn from_protobuf_encoding(_: &[u8]) -> Result { + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf_encoding` instead." + )] + pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + Self::try_decode_protobuf_encoding(bytes) + } + + pub fn try_decode_protobuf_encoding(_: &[u8]) -> Result { Err(DecodingError::missing_feature( "ecdsa|rsa|ed25519|secp256k1", )) @@ -53,10 +66,27 @@ impl PublicKey { } pub fn to_protobuf_encoding(&self) -> Vec { - unreachable!("Can never construct empty enum") + unreachable!("Can never encode empty enum") + } + + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + pub fn to_protobuf_encoding(&self) -> Vec { + unreachable!("Can never encode empty enum") + } + + pub fn encode_protobuf_encoding(&self) -> Vec { + unreachable!("Can never encode empty enum") + } + + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf_encoding` instead." + )] + pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { + Self::try_decode_protobuf_encoding(bytes) } - pub fn from_protobuf_encoding(_: &[u8]) -> Result { + pub fn try_decode_protobuf_encoding(_: &[u8]) -> Result { Err(DecodingError::missing_feature( "ecdsa|rsa|ed25519|secp256k1", )) From 667fa77a5bcb4dc085cfc467f86b4071402d0084 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 5 Apr 2023 17:10:27 +0800 Subject: [PATCH 41/83] remove unnecessary 'Error' trait impl for 'OtherVariantError' --- identity/src/error.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index 1e71c84d31f..5c1811782f2 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -142,8 +142,4 @@ impl Display for OtherVariantError { } } -impl Error for OtherVariantError { - fn source(&self) -> Option<&(dyn Error + 'static)> { - None - } -} +impl Error for OtherVariantError {} From c97d661b7dca8979d8057da6f2a9b886229c9d10 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 10 Apr 2023 14:30:16 +0800 Subject: [PATCH 42/83] revert renaming of 'into' methods on 'Keypair' and deprecate instead --- identity/src/keypair.rs | 48 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 08ae2196975..769a7ae2cee 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -107,21 +107,69 @@ impl Keypair { Keypair::Ecdsa(ecdsa::Keypair::generate()) } + #[cfg(feature = "ed25519")] + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ed25519` instead." + )] + pub fn into_ed25519(self) -> Option { + match self.try_into() { + Ok(k) => Some(k), + Err(_) => None, + } + } + #[cfg(feature = "ed25519")] pub fn try_into_ed25519(self) -> Result { self.try_into() } + #[cfg(feature = "secp256k1")] + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_secp256k1` instead." + )] + pub fn into_secp256k1(self) -> Option { + match self.try_into() { + Ok(k) => Some(k), + Err(_) => None, + } + } + #[cfg(feature = "secp256k1")] pub fn try_into_secp256k1(self) -> Result { self.try_into() } + #[cfg(feature = "rsa")] + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_rsa` instead." + )] + pub fn into_rsa(self) -> Option { + match self.try_into() { + Ok(k) => Some(k), + Err(_) => None, + } + } + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_into_rsa(self) -> Result { self.try_into() } + #[cfg(feature = "ecdsa")] + #[deprecated( + since = "0.2.0", + note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ecdsa` instead." + )] + pub fn into_ecdsa(self) -> Option { + match self.try_into() { + Ok(k) => Some(k), + Err(_) => None, + } + } + #[cfg(feature = "ecdsa")] pub fn try_into_ecdsa(self) -> Result { self.try_into() From a04b17fb1eefd681d0f484ec1ba89bd3412f7944 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 10 Apr 2023 14:53:57 +0800 Subject: [PATCH 43/83] prepare keys for testing --- identity/src/rsa.rs | 6 +++--- identity/src/test/private_key_ed25519.pk8 | Bin 0 -> 48 bytes .../{rsa-2048.pk8 => private_key_rsa-2048.pk8} | Bin .../{rsa-3072.pk8 => private_key_rsa-3072.pk8} | Bin .../{rsa-4096.pk8 => private_key_rsa-4096.pk8} | Bin identity/src/test/private_key_secp256k1.pk8 | Bin 0 -> 135 bytes identity/src/test/private_key_secp256r1.pk8 | Bin 0 -> 138 bytes identity/src/test/private_key_secp384r1.pk8 | Bin 0 -> 185 bytes 8 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 identity/src/test/private_key_ed25519.pk8 rename identity/src/test/{rsa-2048.pk8 => private_key_rsa-2048.pk8} (100%) rename identity/src/test/{rsa-3072.pk8 => private_key_rsa-3072.pk8} (100%) rename identity/src/test/{rsa-4096.pk8 => private_key_rsa-4096.pk8} (100%) create mode 100644 identity/src/test/private_key_secp256k1.pk8 create mode 100644 identity/src/test/private_key_secp256r1.pk8 create mode 100644 identity/src/test/private_key_secp384r1.pk8 diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 66ba35b9706..705091052d7 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -355,9 +355,9 @@ mod tests { use super::*; use quickcheck::*; - const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); - const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); - const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); + const KEY1: &[u8] = include_bytes!("test/private_key_rsa-2048.pk8"); + const KEY2: &[u8] = include_bytes!("test/private_key_rsa-3072.pk8"); + const KEY3: &[u8] = include_bytes!("test/private_key_rsa-4096.pk8"); #[derive(Clone, Debug)] struct SomeKeypair(Keypair); diff --git a/identity/src/test/private_key_ed25519.pk8 b/identity/src/test/private_key_ed25519.pk8 new file mode 100644 index 0000000000000000000000000000000000000000..59519a13dfb705d665acf5c8f0bf5728ba3d230f GIT binary patch literal 48 zcmV-00MGw0E&>4nFa-t!D`jv5A_O4S4%j_fq@$V!_~T@Y8iljsV!^2P*2XWV3a>_| GFF|8$6cWt< literal 0 HcmV?d00001 diff --git a/identity/src/test/rsa-2048.pk8 b/identity/src/test/private_key_rsa-2048.pk8 similarity index 100% rename from identity/src/test/rsa-2048.pk8 rename to identity/src/test/private_key_rsa-2048.pk8 diff --git a/identity/src/test/rsa-3072.pk8 b/identity/src/test/private_key_rsa-3072.pk8 similarity index 100% rename from identity/src/test/rsa-3072.pk8 rename to identity/src/test/private_key_rsa-3072.pk8 diff --git a/identity/src/test/rsa-4096.pk8 b/identity/src/test/private_key_rsa-4096.pk8 similarity index 100% rename from identity/src/test/rsa-4096.pk8 rename to identity/src/test/private_key_rsa-4096.pk8 diff --git a/identity/src/test/private_key_secp256k1.pk8 b/identity/src/test/private_key_secp256k1.pk8 new file mode 100644 index 0000000000000000000000000000000000000000..5271b799a20c24b047eac6e49dcd4d74d2e03c4b GIT binary patch literal 135 zcmV;20C@i}frJ7905A{+2P%e0&OHJF1_djD1ON&IZ7^#B0RaRckv!!IAY)fe7U-Vi zc)&4nMoLLHqP9=w`93sywKLckp+o~h00cDRloz?ozQHqW!A^AD*{H0WE8rU2Qp>ZE pX26wmByfw=c{OGUt`UZC*1;wsoOFmNB%l9|LEML04tZ_p9&{v?HG%*D literal 0 HcmV?d00001 diff --git a/identity/src/test/private_key_secp256r1.pk8 b/identity/src/test/private_key_secp256r1.pk8 new file mode 100644 index 0000000000000000000000000000000000000000..81813a77b3e5703d99ca6318ee0862171d0a96f6 GIT binary patch literal 138 zcmXqLY-eI*Fc4;A*J|@PXUoLM#sOw9GqSVf8e}suGO{QXoHWsAT(-o<`RQAKo_}7) zl^mu=&f2zFU=!b|P1BX{FfVjrc4A=R(UTIooXHWw5tw&7E>8J($<)(NloV%O;$rF( qHm+q}SGDAghINqSB_3baZ~rQ{y`Lk(ctBRVxV~_yW&P%Jb?X7gpE3*p literal 0 HcmV?d00001 diff --git a/identity/src/test/private_key_secp384r1.pk8 b/identity/src/test/private_key_secp384r1.pk8 new file mode 100644 index 0000000000000000000000000000000000000000..486992f38347aecf8afa89dfd77eecfb52907c52 GIT binary patch literal 185 zcmV;q07m~Xfwlqx05A{+2P%e0&OHJF1_djD1OOrgfu1mdn*sp=1Tb@pT3Sh$R{Oug z;@(v58pbwauz{bf Date: Mon, 10 Apr 2023 10:17:45 +0100 Subject: [PATCH 44/83] Add roundtrip tests --- Cargo.lock | 11 ++++- examples/dcutr/src/main.rs | 4 +- examples/file-sharing/src/network.rs | 4 +- examples/relay-server/src/main.rs | 4 +- identity/Cargo.toml | 7 +-- identity/src/keypair.rs | 70 +++++++++++++++++++++++++--- protocols/identify/src/protocol.rs | 3 +- 7 files changed, 87 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 738ceb1f385..030506ec287 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1745,6 +1745,12 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + [[package]] name = "hex_fmt" version = "0.3.0" @@ -2439,6 +2445,7 @@ dependencies = [ "bs58", "criterion", "ed25519-dalek", + "hex-literal 0.4.1", "libsecp256k1", "log", "multiaddr", @@ -2868,7 +2875,7 @@ dependencies = [ "futures", "futures-rustls", "hex", - "hex-literal", + "hex-literal 0.3.4", "libp2p-core", "libp2p-identity", "libp2p-swarm", @@ -2919,7 +2926,7 @@ dependencies = [ "futures", "futures-timer", "hex", - "hex-literal", + "hex-literal 0.3.4", "if-watch", "libp2p-core", "libp2p-identity", diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index fd455cbaa86..406b360cd8f 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -281,5 +281,7 @@ fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - identity::ed25519::Keypair::try_decode(&mut bytes).expect("only errors on wrong length").into() + identity::ed25519::Keypair::try_decode(&mut bytes) + .expect("only errors on wrong length") + .into() } diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index 64bf8f48b97..86d231a7cff 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -39,7 +39,9 @@ pub async fn new( Some(seed) => { let mut bytes = [0u8; 32]; bytes[0] = seed; - identity::ed25519::Keypair::try_decode(&mut bytes).unwrap().into() + identity::ed25519::Keypair::try_decode(&mut bytes) + .unwrap() + .into() } None => identity::Keypair::generate_ed25519(), }; diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 0c64317336b..c0f18dfa433 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -102,7 +102,9 @@ struct Behaviour { fn generate_ed25519(secret_key_seed: u8) -> identity::Keypair { let mut bytes = [0u8; 32]; bytes[0] = secret_key_seed; - identity::ed25519::Keypair::try_decode(&mut bytes).expect("only errors on wrong length").into() + identity::ed25519::Keypair::try_decode(&mut bytes) + .expect("only errors on wrong length") + .into() } #[derive(Debug, Parser)] diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 83af0c96201..efc82762330 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -41,11 +41,12 @@ ed25519 = [ "ed25519-dalek", "prost", "rand", "zeroize" ] peerid = [ "multihash", "multiaddr", "bs58", "rand", "thiserror" ] [dev-dependencies] -quickcheck = { package = "quickcheck-ext", path = "../misc/quickcheck-ext" } base64 = "0.21.0" -serde_json = "1.0" -rmp-serde = "1.0" criterion = "0.4" +hex-literal = "0.4.1" +quickcheck = { package = "quickcheck-ext", path = "../misc/quickcheck-ext" } +rmp-serde = "1.0" +serde_json = "1.0" [[bench]] name = "peer_id" diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 769a7ae2cee..b73a2d46592 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -664,16 +664,72 @@ mod tests { #[test] #[cfg(feature = "ed25519")] - fn keypair_protobuf_roundtrip() { - let expected_keypair = Keypair::generate_ed25519(); - let expected_peer_id = expected_keypair.public().to_peer_id(); + fn keypair_protobuf_roundtrip_ed25519() { + let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!("080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e")).unwrap(); + let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!( + "080112201ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e" + )) + .unwrap(); - let encoded = expected_keypair.encode_protobuf_encoding(); + roundtrip_protobuf_encoding(&priv_key, &pub_key); + } - let keypair = Keypair::try_decode_protobuf_encoding(&encoded).unwrap(); - let peer_id = keypair.public().to_peer_id(); + #[test] + #[cfg(feature = "secp256k1")] + fn keypair_protobuf_roundtrip_secp256k1() { + let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!( + "080212201e4f6a12b43bec6871976295bcb13aace62a7e7b821334125d3ed3b720af419f" + )) + .unwrap(); + let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!( + "0802122102f0a81ddde0a3180610155ff3b2d98d683a6831fad0c84ba36cd49b81eaa7cf8f" + )) + .unwrap(); - assert_eq!(expected_peer_id, peer_id); + roundtrip_protobuf_encoding(&priv_key, &pub_key); + } + + #[test] + #[cfg(feature = "ecdsa")] + fn keypair_protobuf_roundtrip_ecdsa() { + let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!( + "08031220f0d87659b402f0d47589e7670ca0954036f87b2fbf11fafbc66f4de7c3eb10a2" + )) + .unwrap(); + let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!("0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de6af15d8bc9b7f7c6eb8b32888d0da721d33f16af062306bafc64cdad741240cd61d6d9884c4899308ea25513a5cc03495ff88200dc7ae8e603ceb6698d2fee")).unwrap(); + + roundtrip_protobuf_encoding(&priv_key, &pub_key); + } + + #[test] + #[cfg(feature = "rsa")] + fn keypair_protobuf_roundtrip_rsa() { + let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!("080012c81230820944020100300d06092a864886f70d01010105000482092e3082092a02010002820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001028202002699dd6d4c960a68443dea0bb04308b32f37690d2a92ef4c9a8cc9acfba5b6eb9d6b8cf7b701bc1fba032d2216886a725d7e82ca483d8d19e274ba4d23746c3a2b1ae3cc2083ad5ca41ab5d3f9f712858e38284ab7f843d0ba0e015c0ecb3b6df766763632ef6d12d4e3faf73578bebb8c1e88dbf5b7eb73c059eda55a5cb01f349e229af143dc9d832a5cfeb33e6b58f717f8995987f5058d4e7b9f14f390db4e1297feea016eb141ce74ed1e125133db21acb0f1af88a91f0a83ca2fa678fc2fba1743b643a09d38fe1d1102d1eb6639304d61ec7c190c5f6576c5d9a8ccd2198a398ae75333feb51324ffc60b38cb2e90d8a2694b7c0048f47016bb15cb36c482e038e455254e35fc4f0e0babc84e046bd441b0291412c784e4e9639664cad07cb09a01626049cdbfd1d9ad75b314448df811f4988c6e64d93ebefc602b574d0763e31e9d567c891349cfe75f0ca37429b743d6452d1fffc1f9f4901e5f68772b4f24542d654fd29b893e44c85e6037bba304d48873721131f18248b16bd71384abd00f9336c73f071a4ca2456878070f9704ed7df0cd64e5c3e5949a78968525865b96e71d5015dc68bff857f2bba05a3976d83d8866d4dfe8caac144741ae97879a765dc0d4c7c34aa79ef6ebc86b5bf32b50ad995780f5f1a6c052eec5671164f407061a9c6bd49251b1bb7803bb222f5d859c321601236dd893dc9d810282010100cf13fe9908fe59e947122d5606cf9f70c123b7cb43a1916463e729f01dc31c3b70cb6a37bde542ecdc6029cea39b28c99c6395d0aaa29c1c4cf14b3fed9e0fcd793e31b7a09930352261c03b3dc0b66a62f8ae3771b705382cfeb6130d4a7e5b4854117a05767b99915099e2d542fc3fa505a0dbe217b169b46714384774380408bd8b3dbf0c9a177bbd3e64af115988159f485d70c885171007646765b50eb9bbebfabe60e71c69b2b822a124e235ad05f2b55cda9ddc78d671436981a3064a80c29bb37e6b5581a9372a6366c79af695a39ea0f3839ed77ec3985252f2e126955774727955b63ccbeff64208fd7280e8ba52e4297cb6bf72b44b07618923610282010100c184cd27d3a643df768764a7c66de40c222bdb4b7e02c35aa1e4a8377676247c629df58ecb5bb541fb4aac1bde35057b0b266bddd818876909b8fff1aca4859515069258d84b0c5178e4bff6842c68d39cad9a3a03aa6533fa76b92c995f381eb9c83f5e6118fd962807c931b7ca50dc20b261f2a71928f3e882af4da979cef843970cb2af68b86477b92ca90c8c0f1d640d39e943704366314c446f7a54851419e60f4e92e1e69bd52ee7294f9eddc6dc873144b0d0d9f13eb8d6aa955cf11edbd5a0673d8b70ef937e54fdaade185facc8437496d43a53169342280718a3679170ef4a0e582af4db598210fb64616f0d8daa08519d875e37c4d02e1af1c5050282010100c14865648c3b74cac3b698b06a4d130218947131fd9f69e8ed42d0273a706a02a546888f1ce547f173c52260a8dee354436fc45f6f55b626c83e94c147d637e3cede1963cf380d021b64681c2388a3fb6b03b9013157e63c47eb3b214f4f8fdf3e04920775dfe080375da7354d5f67b9341babc87121324c7ac197e2ebf6f36df8868ad8086207d6117e5325812ecd85b2c0e8b7a6d4d33cf28e23ce4ae593a8135ab0c1500b87beb4bd203d8f02c19d0d273cd73d8b094594cb4563ce47cf506d1cb85df28ad6d5de8f0a369bb185d7d1565672deb8a4e37983b1c26d801c5d7a19962c5f4a7c7e04d0a6e77e22aae4ddd54417890dca39aa23d4c03feed4210282010100915975de1c121d9892264f6bd496655ad7afa91ea29ee0ac0a3cfc3bec3600618c90a80780a67915fdf0b0249e59a4ac2e4bc568f30e3966a36ed88e64e58d8fd4230378c7bc569c3af955558b20effb410b0373df9cf4367e40fe04898e0350d0a99f2efc2f1108df3839dda5f5c7960ed8ecc89cc9410131fa364156b1aecab9b992480387dc3759d533be25366d83ddca315d0ad21f4d7a69965d44bc86d7fa3bd9f3624f5a2e6188c1073e4e4cb5389e325b2d93309f0a453ab71548a1b253dbb886d2ab114060bfda864cf853c648b88231e7b7afb70895c272de219b5a06db945f4336e5ccd393ff25522cab220644091a06731361a8f1a28b7ea169210282010100bd80196d3d11a8257b5f439776388f4d53e4da3690f710e9aff3e3e970e545ec92d285e7049da000d5364dd7f550c17cf662d516282fe89813cab322ce5aad5cc744c52a024dd1a94aa9484037281637d1c8e3503b6ed6231225c93f7865d29269c899bbf5d248cf9d41f9aee9b9cb2afac172ba17c2df0699c6604b4ce7ab95c91c5f7fc7804f2bde268a7e15c512920f7325cfba47463da1c201549fc44c2bc4fbe5d8619cde9733470c5e38b996f5c3633c6311af88663ce4d2d0dc415ac5c8258e1aa7659f9f35d4b90b7b9a5a888867d75636e6443cce5391c57d48d56409029edef53e1a5130eb1fa708758bc821e15f7c53edf6d4c6f868a6b5b0c1e6")).unwrap(); + let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!("080012a60430820222300d06092a864886f70d01010105000382020f003082020a02820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001")).unwrap(); + + roundtrip_protobuf_encoding(&priv_key, &pub_key); + } + + fn roundtrip_protobuf_encoding(private_key: &Keypair, public_key: &PublicKey) { + assert_eq!(&private_key.public(), public_key); + + let encoded_priv = private_key.encode_protobuf_encoding(); + let decoded_priv = Keypair::try_decode_protobuf_encoding(&encoded_priv).unwrap(); + + assert_eq!( + private_key.public().to_peer_id(), + decoded_priv.public().to_peer_id(), + "PeerId from roundtripped private key should be the same" + ); + + let encoded_public = private_key.public().encode_protobuf_encoding(); + let decoded_public = PublicKey::try_decode_protobuf_encoding(&encoded_public).unwrap(); + + assert_eq!( + private_key.public().to_peer_id(), + decoded_public.to_peer_id(), + "PeerId from roundtripped public key should be the same" + ); } #[test] diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 53e9e79f0d5..82b3f7c4089 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -235,7 +235,8 @@ impl TryFrom for Info { addrs }; - let public_key = PublicKey::try_decode_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; + let public_key = + PublicKey::try_decode_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { Ok(a) => a, From a3285d732e849e454e5e3f520d2ee3ca3e0a721c Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Mon, 10 Apr 2023 10:42:29 +0100 Subject: [PATCH 45/83] Undo mdns changes --- protocols/mdns/CHANGELOG.md | 65 +++++++++++------------- protocols/mdns/Cargo.toml | 12 ++--- protocols/mdns/src/behaviour.rs | 72 ++++++++++++++++++++++++--- protocols/mdns/tests/use-async-std.rs | 20 ++++---- protocols/mdns/tests/use-tokio.rs | 16 +++--- 5 files changed, 119 insertions(+), 66 deletions(-) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 04a8162e065..5d3b4f79c55 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,16 +1,9 @@ -# 0.44.0 [unreleased] - -- Change `mdns::Event` to hold `Vec` and remove `DiscoveredAddrsIter` and `ExpiredAddrsIter`. - See [PR 3621]. - -[PR 3621]: https://github.com/libp2p/rust-libp2p/pull/3621 - -# 0.43.1 [unreleased] +## 0.43.1 - Derive `Clone` for `mdns::Event`. See [PR 3606]. [PR 3606]: https://github.com/libp2p/rust-libp2p/pull/3606 -# 0.43.0 +## 0.43.0 - Update to `libp2p-core` `v0.39.0`. @@ -25,7 +18,7 @@ [PR 3153]: https://github.com/libp2p/rust-libp2p/pull/3153 [PR 3367]: https://github.com/libp2p/rust-libp2p/pull/3367 -# 0.42.0 +## 0.42.0 - Update to `libp2p-core` `v0.38.0`. @@ -52,7 +45,7 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 3102]: https://github.com/libp2p/rust-libp2p/pull/3102 [PR 3090]: https://github.com/libp2p/rust-libp2p/pull/3090 -# 0.41.0 +## 0.41.0 - Remove default features. If you previously depended on `async-io` you need to enable this explicitly now. See [PR 2918]. @@ -71,7 +64,7 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 2977]: https://github.com/libp2p/rust-libp2p/pull/2977 [PR 2978]: https://github.com/libp2p/rust-libp2p/pull/2978 -# 0.40.0 +## 0.40.0 - Update to `libp2p-swarm` `v0.39.0`. @@ -85,30 +78,30 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 2748]: https://github.com/libp2p/rust-libp2p/pull/2748 -# 0.39.0 +## 0.39.0 - Update to `libp2p-swarm` `v0.38.0`. - Update to `if-watch` `v1.1.1`. - Update to `libp2p-core` `v0.35.0`. -# 0.38.0 +## 0.38.0 - Update to `libp2p-core` `v0.34.0`. - Update to `libp2p-swarm` `v0.37.0`. -# 0.37.0 +## 0.37.0 - Update to `libp2p-core` `v0.33.0`. - Update to `libp2p-swarm` `v0.36.0`. -# 0.36.0 +## 0.36.0 - Update to `libp2p-swarm` `v0.35.0`. -# 0.35.0 [2022-02-22] +## 0.35.0 [2022-02-22] - Update to `libp2p-core` `v0.32.0`. @@ -118,7 +111,7 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 2445]: https://github.com/libp2p/rust-libp2p/pull/2445 -# 0.34.0 [2022-01-27] +## 0.34.0 [2022-01-27] - Update dependencies. @@ -143,11 +136,11 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 2383]: https://github.com/libp2p/rust-libp2p/pull/2383 -# 0.33.0 [2021-11-16] +## 0.33.0 [2021-11-16] - Update dependencies. -# 0.32.0 [2021-11-01] +## 0.32.0 [2021-11-01] - Make default features of `libp2p-core` optional. [PR 2181](https://github.com/libp2p/rust-libp2p/pull/2181) @@ -163,27 +156,27 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. [PR 2161]: https://github.com/libp2p/rust-libp2p/pull/2161/ [PR 2212]: https://github.com/libp2p/rust-libp2p/pull/2212/ -# 0.31.0 [2021-07-12] +## 0.31.0 [2021-07-12] - Update dependencies. -# 0.30.2 [2021-05-06] +## 0.30.2 [2021-05-06] - Fix discovered event emission. [PR 2065](https://github.com/libp2p/rust-libp2p/pull/2065) -# 0.30.1 [2021-04-21] +## 0.30.1 [2021-04-21] - Fix timely discovery of peers after listening on a new address. [PR 2053](https://github.com/libp2p/rust-libp2p/pull/2053/) -# 0.30.0 [2021-04-13] +## 0.30.0 [2021-04-13] - Derive `Debug` and `Clone` for `MdnsConfig`. - Update `libp2p-swarm`. -# 0.29.0 [2021-03-17] +## 0.29.0 [2021-03-17] - Introduce `MdnsConfig` with configurable TTL of discovered peer records and configurable multicast query interval. The default @@ -200,19 +193,19 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. - Update `libp2p-swarm`. -# 0.28.1 [2021-02-15] +## 0.28.1 [2021-02-15] - Update dependencies. -# 0.28.0 [2021-01-12] +## 0.28.0 [2021-01-12] - Update dependencies. -# 0.27.0 [2020-12-17] +## 0.27.0 [2020-12-17] - Update `libp2p-swarm` and `libp2p-core`. -# 0.26.0 [2020-12-08] +## 0.26.0 [2020-12-08] - Create multiple multicast response packets as required to avoid hitting the limit of 9000 bytes per MDNS packet. @@ -229,35 +222,35 @@ and move and rename `Mdns` to `async_io::Behaviour`. See [PR 3096]. still being polled by the `tokio` runtime. [PR 1830](https://github.com/libp2p/rust-libp2p/pull/1830). -# 0.25.0 [2020-11-25] +## 0.25.0 [2020-11-25] - Update `libp2p-swarm` and `libp2p-core`. -# 0.24.0 [2020-11-09] +## 0.24.0 [2020-11-09] - Update dependencies. -# 0.23.0 [2020-10-16] +## 0.23.0 [2020-10-16] - Update `libp2p-swarm` and `libp2p-core`. - Double receive buffer to 4KiB. [PR 1779](https://github.com/libp2p/rust-libp2p/pull/1779/files). -# 0.22.0 [2020-09-09] +## 0.22.0 [2020-09-09] - Update `libp2p-swarm` and `libp2p-core`. -# 0.21.0 [2020-08-18] +## 0.21.0 [2020-08-18] - Bump `libp2p-core` and `libp2p-swarm` dependencies. - Allow libp2p-mdns to use either async-std or tokio to drive required UDP socket ([PR 1699](https://github.com/libp2p/rust-libp2p/pull/1699)). -# 0.20.0 [2020-07-01] +## 0.20.0 [2020-07-01] - Updated dependencies. -# 0.19.2 [2020-06-22] +## 0.19.2 [2020-06-22] - Updated dependencies. diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 37a2e4fc7e1..9a7d5b0f633 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = "1.62.0" -version = "0.43.0" +version = "0.43.1" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -11,18 +11,18 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "1.3.1", optional = true } +async-io = { version = "1.13.0", optional = true } data-encoding = "2.3.2" -futures = "0.3.27" +futures = "0.3.28" if-watch = "3.0.0" libp2p-core = { version = "0.39.0", path = "../../core" } -libp2p-swarm = { version = "0.42.0", path = "../../swarm" } +libp2p-swarm = { version = "0.42.1", path = "../../swarm" } libp2p-identity = { version = "0.1.0", path = "../../identity" } log = "0.4.14" rand = "0.8.3" smallvec = "1.6.1" socket2 = { version = "0.4.0", features = ["all"] } -tokio = { version = "1.19", default-features = false, features = ["net", "time"], optional = true} +tokio = { version = "1.27", default-features = false, features = ["net", "time"], optional = true} trust-dns-proto = { version = "0.22.0", default-features = false, features = ["mdns", "tokio-runtime"] } void = "1.0.2" @@ -37,7 +37,7 @@ libp2p-noise = { path = "../../transports/noise" } libp2p-swarm = { path = "../../swarm", features = ["tokio", "async-std"] } libp2p-tcp = { path = "../../transports/tcp", features = ["tokio", "async-io"] } libp2p-yamux = { path = "../../muxers/yamux" } -tokio = { version = "1.19", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } +tokio = { version = "1.27", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } [[test]] diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 5186ce91cb7..92e38c04998 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -285,7 +285,7 @@ where } } // Emit discovered event. - let mut discovered = Vec::new(); + let mut discovered = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); for iface_state in self.iface_states.values_mut() { while let Poll::Ready((peer, addr, expiration)) = iface_state.poll(cx, &self.listen_addresses) @@ -304,13 +304,15 @@ where } } if !discovered.is_empty() { - let event = Event::Discovered(discovered); + let event = Event::Discovered(DiscoveredAddrsIter { + inner: discovered.into_iter(), + }); return Poll::Ready(ToSwarm::GenerateEvent(event)); } // Emit expired event. let now = Instant::now(); let mut closest_expiration = None; - let mut expired = Vec::new(); + let mut expired = SmallVec::<[(PeerId, Multiaddr); 4]>::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { log::info!("expired: {} {}", peer, addr); @@ -321,7 +323,9 @@ where true }); if !expired.is_empty() { - let event = Event::Expired(expired); + let event = Event::Expired(ExpiredAddrsIter { + inner: expired.into_iter(), + }); return Poll::Ready(ToSwarm::GenerateEvent(event)); } if let Some(closest_expiration) = closest_expiration { @@ -338,11 +342,67 @@ where #[derive(Debug, Clone)] pub enum Event { /// Discovered nodes through mDNS. - Discovered(Vec<(PeerId, Multiaddr)>), + Discovered(DiscoveredAddrsIter), /// The given combinations of `PeerId` and `Multiaddr` have expired. /// /// Each discovered record has a time-to-live. When this TTL expires and the address hasn't /// been refreshed, we remove it from the list and emit it as an `Expired` event. - Expired(Vec<(PeerId, Multiaddr)>), + Expired(ExpiredAddrsIter), +} + +/// Iterator that produces the list of addresses that have been discovered. +#[derive(Clone)] +pub struct DiscoveredAddrsIter { + inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, +} + +impl Iterator for DiscoveredAddrsIter { + type Item = (PeerId, Multiaddr); + + #[inline] + fn next(&mut self) -> Option { + self.inner.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for DiscoveredAddrsIter {} + +impl fmt::Debug for DiscoveredAddrsIter { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("DiscoveredAddrsIter").finish() + } +} + +/// Iterator that produces the list of addresses that have expired. +#[derive(Clone)] +pub struct ExpiredAddrsIter { + inner: smallvec::IntoIter<[(PeerId, Multiaddr); 4]>, +} + +impl Iterator for ExpiredAddrsIter { + type Item = (PeerId, Multiaddr); + + #[inline] + fn next(&mut self) -> Option { + self.inner.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for ExpiredAddrsIter {} + +impl fmt::Debug for ExpiredAddrsIter { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ExpiredAddrsIter").finish() + } } diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index bfc3cd1201d..139fcca1d50 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -61,13 +61,13 @@ async fn test_expired_async_std() { loop { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Expired(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Expired(mut peers), _)) => { + if peers.any(|(p, _)| p == b_peer_id) { return; } } - Either::Right((Event::Expired(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Expired(mut peers), _)) => { + if peers.any(|(p, _)| p == a_peer_id) { return; } } @@ -93,8 +93,8 @@ async fn test_no_expiration_on_close_async_std() { // 1. Connect via address from mDNS event loop { - if let Event::Discovered(peers) = a.next_behaviour_event().await { - if let Some((_, addr)) = peers.into_iter().find(|(p, _)| p == &b_peer_id) { + if let Event::Discovered(mut peers) = a.next_behaviour_event().await { + if let Some((_, addr)) = peers.find(|(p, _)| p == &b_peer_id) { a.dial_and_wait(addr).await; break; } @@ -130,13 +130,13 @@ async fn run_discovery_test(config: Config) { while !discovered_a && !discovered_b { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Discovered(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Discovered(mut peers), _)) => { + if peers.any(|(p, _)| p == b_peer_id) { discovered_b = true; } } - Either::Right((Event::Discovered(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Discovered(mut peers), _)) => { + if peers.any(|(p, _)| p == a_peer_id) { discovered_a = true; } } diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 229418437f4..e18ae28fee7 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -59,13 +59,13 @@ async fn test_expired_tokio() { loop { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Expired(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Expired(mut peers), _)) => { + if peers.any(|(p, _)| p == b_peer_id) { return; } } - Either::Right((Event::Expired(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Expired(mut peers), _)) => { + if peers.any(|(p, _)| p == a_peer_id) { return; } } @@ -86,13 +86,13 @@ async fn run_discovery_test(config: Config) { while !discovered_a && !discovered_b { match futures::future::select(a.next_behaviour_event(), b.next_behaviour_event()).await { - Either::Left((Event::Discovered(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == b_peer_id) { + Either::Left((Event::Discovered(mut peers), _)) => { + if peers.any(|(p, _)| p == b_peer_id) { discovered_b = true; } } - Either::Right((Event::Discovered(peers), _)) => { - if peers.into_iter().any(|(p, _)| p == a_peer_id) { + Either::Right((Event::Discovered(mut peers), _)) => { + if peers.any(|(p, _)| p == a_peer_id) { discovered_a = true; } } From f5cf83466d22a7a11e0158bf11b6c5991885c0a4 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Mon, 10 Apr 2023 11:01:46 +0100 Subject: [PATCH 46/83] Fix cargo.lock --- Cargo.lock | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a72d6a2aee0..4e01a5dd32a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1799,12 +1799,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcb5b3e439c92a7191df2f9bbe733de8de55c3f86368cdb1c63f8be7e9e328e" - [[package]] name = "hex-literal" version = "0.4.1" @@ -2506,7 +2500,7 @@ dependencies = [ "bs58", "criterion", "ed25519-dalek", - "hex-literal 0.4.1", + "hex-literal", "libsecp256k1", "log", "multiaddr", @@ -2935,7 +2929,7 @@ dependencies = [ "futures", "futures-rustls", "hex", - "hex-literal 0.3.4", + "hex-literal", "libp2p-core", "libp2p-identity", "libp2p-swarm", @@ -2986,7 +2980,7 @@ dependencies = [ "futures", "futures-timer", "hex", - "hex-literal 0.3.4", + "hex-literal", "if-watch", "libp2p-core", "libp2p-identity", From 74e7cf3403125d2fa38efd77ed67a76f6a57ad82 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:07:30 +0800 Subject: [PATCH 47/83] repetitive 'encoding' method naming removed --- core/src/signed_envelope.rs | 4 +-- identity/src/keypair.rs | 50 +++++++++++++-------------- identity/src/keypair_dummy.rs | 20 +++++------ identity/src/peer_id.rs | 4 +-- identity/tests/keypair_api.rs | 4 +-- misc/keygen/src/config.rs | 2 +- misc/keygen/src/main.rs | 4 +-- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/gossipsub/src/protocol.rs | 4 +-- protocols/identify/src/protocol.rs | 6 ++-- transports/noise/src/io/handshake.rs | 4 +-- transports/plaintext/src/handshake.rs | 4 +-- transports/tls/src/certificate.rs | 4 +-- 13 files changed, 56 insertions(+), 56 deletions(-) diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index 014aa912ee5..a9d7ecd5208 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -76,7 +76,7 @@ impl SignedEnvelope { use quick_protobuf::MessageWrite; let envelope = proto::Envelope { - public_key: self.key.encode_protobuf_encoding(), + public_key: self.key.encode_protobuf(), payload_type: self.payload_type, payload: self.payload, signature: self.signature, @@ -101,7 +101,7 @@ impl SignedEnvelope { proto::Envelope::from_reader(&mut reader, bytes).map_err(DecodeError::from)?; Ok(Self { - key: PublicKey::try_decode_protobuf_encoding(&envelope.public_key)?, + key: PublicKey::try_decode_protobuf(&envelope.public_key)?, payload_type: envelope.payload_type.to_vec(), payload: envelope.payload.to_vec(), signature: envelope.signature.to_vec(), diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index b73a2d46592..745499a2d26 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -254,13 +254,13 @@ impl Keypair { } /// Encode a private key as protobuf structure. - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { - self.encode_protobuf_encoding() + self.encode_protobuf() } /// Encode a private key as protobuf structure. - pub fn encode_protobuf_encoding(&self) -> Vec { + pub fn encode_protobuf(&self) -> Vec { use quick_protobuf::MessageWrite; #[allow(deprecated)] @@ -297,15 +297,15 @@ impl Keypair { /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf_encoding(bytes) + Self::try_decode_protobuf(bytes) } /// Try to decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[cfg_attr(not(feature = "ed25519"), allow(unused_mut))] - pub fn try_decode_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_decode_protobuf(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -509,14 +509,14 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { - self.encode_protobuf_encoding() + self.encode_protobuf() } /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. - pub fn encode_protobuf_encoding(&self) -> Vec { + pub fn encode_protobuf(&self) -> Vec { use quick_protobuf::MessageWrite; let public_key = proto::PublicKey::from(self); @@ -534,15 +534,15 @@ impl PublicKey { /// or received from another node. #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf_encoding(bytes) + Self::try_decode_protobuf(bytes) } /// Decode a public key from a protobuf structure, e.g. read from storage /// or received from another node. - pub fn try_decode_protobuf_encoding(bytes: &[u8]) -> Result { + pub fn try_decode_protobuf(bytes: &[u8]) -> Result { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); @@ -665,8 +665,8 @@ mod tests { #[test] #[cfg(feature = "ed25519")] fn keypair_protobuf_roundtrip_ed25519() { - let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!("080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e")).unwrap(); - let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!( + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!("080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e")).unwrap(); + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!( "080112201ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e" )) .unwrap(); @@ -677,11 +677,11 @@ mod tests { #[test] #[cfg(feature = "secp256k1")] fn keypair_protobuf_roundtrip_secp256k1() { - let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!( + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "080212201e4f6a12b43bec6871976295bcb13aace62a7e7b821334125d3ed3b720af419f" )) .unwrap(); - let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!( + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!( "0802122102f0a81ddde0a3180610155ff3b2d98d683a6831fad0c84ba36cd49b81eaa7cf8f" )) .unwrap(); @@ -692,11 +692,11 @@ mod tests { #[test] #[cfg(feature = "ecdsa")] fn keypair_protobuf_roundtrip_ecdsa() { - let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!( + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "08031220f0d87659b402f0d47589e7670ca0954036f87b2fbf11fafbc66f4de7c3eb10a2" )) .unwrap(); - let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!("0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de6af15d8bc9b7f7c6eb8b32888d0da721d33f16af062306bafc64cdad741240cd61d6d9884c4899308ea25513a5cc03495ff88200dc7ae8e603ceb6698d2fee")).unwrap(); + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de6af15d8bc9b7f7c6eb8b32888d0da721d33f16af062306bafc64cdad741240cd61d6d9884c4899308ea25513a5cc03495ff88200dc7ae8e603ceb6698d2fee")).unwrap(); roundtrip_protobuf_encoding(&priv_key, &pub_key); } @@ -704,8 +704,8 @@ mod tests { #[test] #[cfg(feature = "rsa")] fn keypair_protobuf_roundtrip_rsa() { - let priv_key = Keypair::try_decode_protobuf_encoding(&hex_literal::hex!("080012c81230820944020100300d06092a864886f70d01010105000482092e3082092a02010002820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001028202002699dd6d4c960a68443dea0bb04308b32f37690d2a92ef4c9a8cc9acfba5b6eb9d6b8cf7b701bc1fba032d2216886a725d7e82ca483d8d19e274ba4d23746c3a2b1ae3cc2083ad5ca41ab5d3f9f712858e38284ab7f843d0ba0e015c0ecb3b6df766763632ef6d12d4e3faf73578bebb8c1e88dbf5b7eb73c059eda55a5cb01f349e229af143dc9d832a5cfeb33e6b58f717f8995987f5058d4e7b9f14f390db4e1297feea016eb141ce74ed1e125133db21acb0f1af88a91f0a83ca2fa678fc2fba1743b643a09d38fe1d1102d1eb6639304d61ec7c190c5f6576c5d9a8ccd2198a398ae75333feb51324ffc60b38cb2e90d8a2694b7c0048f47016bb15cb36c482e038e455254e35fc4f0e0babc84e046bd441b0291412c784e4e9639664cad07cb09a01626049cdbfd1d9ad75b314448df811f4988c6e64d93ebefc602b574d0763e31e9d567c891349cfe75f0ca37429b743d6452d1fffc1f9f4901e5f68772b4f24542d654fd29b893e44c85e6037bba304d48873721131f18248b16bd71384abd00f9336c73f071a4ca2456878070f9704ed7df0cd64e5c3e5949a78968525865b96e71d5015dc68bff857f2bba05a3976d83d8866d4dfe8caac144741ae97879a765dc0d4c7c34aa79ef6ebc86b5bf32b50ad995780f5f1a6c052eec5671164f407061a9c6bd49251b1bb7803bb222f5d859c321601236dd893dc9d810282010100cf13fe9908fe59e947122d5606cf9f70c123b7cb43a1916463e729f01dc31c3b70cb6a37bde542ecdc6029cea39b28c99c6395d0aaa29c1c4cf14b3fed9e0fcd793e31b7a09930352261c03b3dc0b66a62f8ae3771b705382cfeb6130d4a7e5b4854117a05767b99915099e2d542fc3fa505a0dbe217b169b46714384774380408bd8b3dbf0c9a177bbd3e64af115988159f485d70c885171007646765b50eb9bbebfabe60e71c69b2b822a124e235ad05f2b55cda9ddc78d671436981a3064a80c29bb37e6b5581a9372a6366c79af695a39ea0f3839ed77ec3985252f2e126955774727955b63ccbeff64208fd7280e8ba52e4297cb6bf72b44b07618923610282010100c184cd27d3a643df768764a7c66de40c222bdb4b7e02c35aa1e4a8377676247c629df58ecb5bb541fb4aac1bde35057b0b266bddd818876909b8fff1aca4859515069258d84b0c5178e4bff6842c68d39cad9a3a03aa6533fa76b92c995f381eb9c83f5e6118fd962807c931b7ca50dc20b261f2a71928f3e882af4da979cef843970cb2af68b86477b92ca90c8c0f1d640d39e943704366314c446f7a54851419e60f4e92e1e69bd52ee7294f9eddc6dc873144b0d0d9f13eb8d6aa955cf11edbd5a0673d8b70ef937e54fdaade185facc8437496d43a53169342280718a3679170ef4a0e582af4db598210fb64616f0d8daa08519d875e37c4d02e1af1c5050282010100c14865648c3b74cac3b698b06a4d130218947131fd9f69e8ed42d0273a706a02a546888f1ce547f173c52260a8dee354436fc45f6f55b626c83e94c147d637e3cede1963cf380d021b64681c2388a3fb6b03b9013157e63c47eb3b214f4f8fdf3e04920775dfe080375da7354d5f67b9341babc87121324c7ac197e2ebf6f36df8868ad8086207d6117e5325812ecd85b2c0e8b7a6d4d33cf28e23ce4ae593a8135ab0c1500b87beb4bd203d8f02c19d0d273cd73d8b094594cb4563ce47cf506d1cb85df28ad6d5de8f0a369bb185d7d1565672deb8a4e37983b1c26d801c5d7a19962c5f4a7c7e04d0a6e77e22aae4ddd54417890dca39aa23d4c03feed4210282010100915975de1c121d9892264f6bd496655ad7afa91ea29ee0ac0a3cfc3bec3600618c90a80780a67915fdf0b0249e59a4ac2e4bc568f30e3966a36ed88e64e58d8fd4230378c7bc569c3af955558b20effb410b0373df9cf4367e40fe04898e0350d0a99f2efc2f1108df3839dda5f5c7960ed8ecc89cc9410131fa364156b1aecab9b992480387dc3759d533be25366d83ddca315d0ad21f4d7a69965d44bc86d7fa3bd9f3624f5a2e6188c1073e4e4cb5389e325b2d93309f0a453ab71548a1b253dbb886d2ab114060bfda864cf853c648b88231e7b7afb70895c272de219b5a06db945f4336e5ccd393ff25522cab220644091a06731361a8f1a28b7ea169210282010100bd80196d3d11a8257b5f439776388f4d53e4da3690f710e9aff3e3e970e545ec92d285e7049da000d5364dd7f550c17cf662d516282fe89813cab322ce5aad5cc744c52a024dd1a94aa9484037281637d1c8e3503b6ed6231225c93f7865d29269c899bbf5d248cf9d41f9aee9b9cb2afac172ba17c2df0699c6604b4ce7ab95c91c5f7fc7804f2bde268a7e15c512920f7325cfba47463da1c201549fc44c2bc4fbe5d8619cde9733470c5e38b996f5c3633c6311af88663ce4d2d0dc415ac5c8258e1aa7659f9f35d4b90b7b9a5a888867d75636e6443cce5391c57d48d56409029edef53e1a5130eb1fa708758bc821e15f7c53edf6d4c6f868a6b5b0c1e6")).unwrap(); - let pub_key = PublicKey::try_decode_protobuf_encoding(&hex_literal::hex!("080012a60430820222300d06092a864886f70d01010105000382020f003082020a02820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001")).unwrap(); + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!("080012c81230820944020100300d06092a864886f70d01010105000482092e3082092a02010002820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001028202002699dd6d4c960a68443dea0bb04308b32f37690d2a92ef4c9a8cc9acfba5b6eb9d6b8cf7b701bc1fba032d2216886a725d7e82ca483d8d19e274ba4d23746c3a2b1ae3cc2083ad5ca41ab5d3f9f712858e38284ab7f843d0ba0e015c0ecb3b6df766763632ef6d12d4e3faf73578bebb8c1e88dbf5b7eb73c059eda55a5cb01f349e229af143dc9d832a5cfeb33e6b58f717f8995987f5058d4e7b9f14f390db4e1297feea016eb141ce74ed1e125133db21acb0f1af88a91f0a83ca2fa678fc2fba1743b643a09d38fe1d1102d1eb6639304d61ec7c190c5f6576c5d9a8ccd2198a398ae75333feb51324ffc60b38cb2e90d8a2694b7c0048f47016bb15cb36c482e038e455254e35fc4f0e0babc84e046bd441b0291412c784e4e9639664cad07cb09a01626049cdbfd1d9ad75b314448df811f4988c6e64d93ebefc602b574d0763e31e9d567c891349cfe75f0ca37429b743d6452d1fffc1f9f4901e5f68772b4f24542d654fd29b893e44c85e6037bba304d48873721131f18248b16bd71384abd00f9336c73f071a4ca2456878070f9704ed7df0cd64e5c3e5949a78968525865b96e71d5015dc68bff857f2bba05a3976d83d8866d4dfe8caac144741ae97879a765dc0d4c7c34aa79ef6ebc86b5bf32b50ad995780f5f1a6c052eec5671164f407061a9c6bd49251b1bb7803bb222f5d859c321601236dd893dc9d810282010100cf13fe9908fe59e947122d5606cf9f70c123b7cb43a1916463e729f01dc31c3b70cb6a37bde542ecdc6029cea39b28c99c6395d0aaa29c1c4cf14b3fed9e0fcd793e31b7a09930352261c03b3dc0b66a62f8ae3771b705382cfeb6130d4a7e5b4854117a05767b99915099e2d542fc3fa505a0dbe217b169b46714384774380408bd8b3dbf0c9a177bbd3e64af115988159f485d70c885171007646765b50eb9bbebfabe60e71c69b2b822a124e235ad05f2b55cda9ddc78d671436981a3064a80c29bb37e6b5581a9372a6366c79af695a39ea0f3839ed77ec3985252f2e126955774727955b63ccbeff64208fd7280e8ba52e4297cb6bf72b44b07618923610282010100c184cd27d3a643df768764a7c66de40c222bdb4b7e02c35aa1e4a8377676247c629df58ecb5bb541fb4aac1bde35057b0b266bddd818876909b8fff1aca4859515069258d84b0c5178e4bff6842c68d39cad9a3a03aa6533fa76b92c995f381eb9c83f5e6118fd962807c931b7ca50dc20b261f2a71928f3e882af4da979cef843970cb2af68b86477b92ca90c8c0f1d640d39e943704366314c446f7a54851419e60f4e92e1e69bd52ee7294f9eddc6dc873144b0d0d9f13eb8d6aa955cf11edbd5a0673d8b70ef937e54fdaade185facc8437496d43a53169342280718a3679170ef4a0e582af4db598210fb64616f0d8daa08519d875e37c4d02e1af1c5050282010100c14865648c3b74cac3b698b06a4d130218947131fd9f69e8ed42d0273a706a02a546888f1ce547f173c52260a8dee354436fc45f6f55b626c83e94c147d637e3cede1963cf380d021b64681c2388a3fb6b03b9013157e63c47eb3b214f4f8fdf3e04920775dfe080375da7354d5f67b9341babc87121324c7ac197e2ebf6f36df8868ad8086207d6117e5325812ecd85b2c0e8b7a6d4d33cf28e23ce4ae593a8135ab0c1500b87beb4bd203d8f02c19d0d273cd73d8b094594cb4563ce47cf506d1cb85df28ad6d5de8f0a369bb185d7d1565672deb8a4e37983b1c26d801c5d7a19962c5f4a7c7e04d0a6e77e22aae4ddd54417890dca39aa23d4c03feed4210282010100915975de1c121d9892264f6bd496655ad7afa91ea29ee0ac0a3cfc3bec3600618c90a80780a67915fdf0b0249e59a4ac2e4bc568f30e3966a36ed88e64e58d8fd4230378c7bc569c3af955558b20effb410b0373df9cf4367e40fe04898e0350d0a99f2efc2f1108df3839dda5f5c7960ed8ecc89cc9410131fa364156b1aecab9b992480387dc3759d533be25366d83ddca315d0ad21f4d7a69965d44bc86d7fa3bd9f3624f5a2e6188c1073e4e4cb5389e325b2d93309f0a453ab71548a1b253dbb886d2ab114060bfda864cf853c648b88231e7b7afb70895c272de219b5a06db945f4336e5ccd393ff25522cab220644091a06731361a8f1a28b7ea169210282010100bd80196d3d11a8257b5f439776388f4d53e4da3690f710e9aff3e3e970e545ec92d285e7049da000d5364dd7f550c17cf662d516282fe89813cab322ce5aad5cc744c52a024dd1a94aa9484037281637d1c8e3503b6ed6231225c93f7865d29269c899bbf5d248cf9d41f9aee9b9cb2afac172ba17c2df0699c6604b4ce7ab95c91c5f7fc7804f2bde268a7e15c512920f7325cfba47463da1c201549fc44c2bc4fbe5d8619cde9733470c5e38b996f5c3633c6311af88663ce4d2d0dc415ac5c8258e1aa7659f9f35d4b90b7b9a5a888867d75636e6443cce5391c57d48d56409029edef53e1a5130eb1fa708758bc821e15f7c53edf6d4c6f868a6b5b0c1e6")).unwrap(); + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("080012a60430820222300d06092a864886f70d01010105000382020f003082020a02820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001")).unwrap(); roundtrip_protobuf_encoding(&priv_key, &pub_key); } @@ -713,8 +713,8 @@ mod tests { fn roundtrip_protobuf_encoding(private_key: &Keypair, public_key: &PublicKey) { assert_eq!(&private_key.public(), public_key); - let encoded_priv = private_key.encode_protobuf_encoding(); - let decoded_priv = Keypair::try_decode_protobuf_encoding(&encoded_priv).unwrap(); + let encoded_priv = private_key.encode_protobuf(); + let decoded_priv = Keypair::try_decode_protobuf(&encoded_priv).unwrap(); assert_eq!( private_key.public().to_peer_id(), @@ -722,8 +722,8 @@ mod tests { "PeerId from roundtripped private key should be the same" ); - let encoded_public = private_key.public().encode_protobuf_encoding(); - let decoded_public = PublicKey::try_decode_protobuf_encoding(&encoded_public).unwrap(); + let encoded_public = private_key.public().encode_protobuf(); + let decoded_public = PublicKey::try_decode_protobuf(&encoded_public).unwrap(); assert_eq!( private_key.public().to_peer_id(), @@ -741,7 +741,7 @@ mod tests { let encoded = BASE64_STANDARD.decode(base_64_encoded).unwrap(); - let keypair = Keypair::try_decode_protobuf_encoding(&encoded).unwrap(); + let keypair = Keypair::try_decode_protobuf(&encoded).unwrap(); let peer_id = keypair.public().to_peer_id(); assert_eq!(expected_peer_id, peer_id); diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs index 55be856dba6..a3b5b3e5603 100644 --- a/identity/src/keypair_dummy.rs +++ b/identity/src/keypair_dummy.rs @@ -32,24 +32,24 @@ impl Keypair { unreachable!("Can never construct empty enum") } - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { unreachable!("Can never encode empty enum") } - pub fn encode_protobuf_encoding(&self) -> Vec { + pub fn encode_protobuf(&self) -> Vec { unreachable!("Can never encode empty enum") } #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf_encoding(bytes) + Self::try_decode_protobuf(bytes) } - pub fn try_decode_protobuf_encoding(_: &[u8]) -> Result { + pub fn try_decode_protobuf(_: &[u8]) -> Result { Err(DecodingError::missing_feature( "ecdsa|rsa|ed25519|secp256k1", )) @@ -69,24 +69,24 @@ impl PublicKey { unreachable!("Can never encode empty enum") } - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf_encoding`")] + #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { unreachable!("Can never encode empty enum") } - pub fn encode_protobuf_encoding(&self) -> Vec { + pub fn encode_protobuf(&self) -> Vec { unreachable!("Can never encode empty enum") } #[deprecated( since = "0.2.0", - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf_encoding` instead." + note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf_encoding(bytes) + Self::try_decode_protobuf(bytes) } - pub fn try_decode_protobuf_encoding(_: &[u8]) -> Result { + pub fn try_decode_protobuf(_: &[u8]) -> Result { Err(DecodingError::missing_feature( "ecdsa|rsa|ed25519|secp256k1", )) diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index b33420aa353..788e0b79666 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -66,7 +66,7 @@ impl fmt::Display for PeerId { impl PeerId { /// Builds a `PeerId` from a public key. pub fn from_public_key(key: &crate::keypair::PublicKey) -> PeerId { - let key_enc = key.encode_protobuf_encoding(); + let key_enc = key.encode_protobuf(); let multihash = if key_enc.len() <= MAX_INLINE_KEY_LENGTH { Multihash::wrap(MULTIHASH_IDENTITY_CODE, &key_enc) @@ -141,7 +141,7 @@ impl PeerId { let alg = Code::try_from(self.multihash.code()) .expect("Internal multihash is always a valid `Code`"); - let enc = public_key.encode_protobuf_encoding(); + let enc = public_key.encode_protobuf(); Some(alg.digest(&enc) == self.multihash) } } diff --git a/identity/tests/keypair_api.rs b/identity/tests/keypair_api.rs index a135395da4d..f0d16da2899 100644 --- a/identity/tests/keypair_api.rs +++ b/identity/tests/keypair_api.rs @@ -2,12 +2,12 @@ use libp2p_identity::Keypair; #[test] fn calling_keypair_api() { - let _ = Keypair::try_decode_protobuf_encoding(&[]); + let _ = Keypair::try_decode_protobuf(&[]); } #[allow(dead_code)] fn using_keypair(kp: Keypair) { - let _ = kp.encode_protobuf_encoding(); + let _ = kp.encode_protobuf(); let _ = kp.sign(&[]); let _ = kp.public(); } diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index d5286552428..61199cf7d63 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -18,7 +18,7 @@ impl Config { } pub fn from_key_material(peer_id: PeerId, keypair: &Keypair) -> Result> { - let priv_key = BASE64_STANDARD.encode(keypair.encode_protobuf_encoding()); + let priv_key = BASE64_STANDARD.encode(keypair.encode_protobuf()); let peer_id = peer_id.to_base58(); Ok(Self { identity: Identity { peer_id, priv_key }, diff --git a/misc/keygen/src/main.rs b/misc/keygen/src/main.rs index ead72205ce6..f8760d8d156 100644 --- a/misc/keygen/src/main.rs +++ b/misc/keygen/src/main.rs @@ -54,7 +54,7 @@ fn main() -> Result<(), Box> { Command::From { config } => { let config = Zeroizing::new(config::Config::from_file(config.as_ref())?); - let keypair = identity::Keypair::try_decode_protobuf_encoding(&Zeroizing::new( + let keypair = identity::Keypair::try_decode_protobuf(&Zeroizing::new( BASE64_STANDARD.decode(config.identity.priv_key.as_bytes())?, ))?; @@ -118,7 +118,7 @@ fn main() -> Result<(), Box> { println!( "PeerId: {:?} Keypair: {:?}", local_peer_id, - local_keypair.encode_protobuf_encoding() + local_keypair.encode_protobuf() ); } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 4674412a2b7..fb58e55f24a 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -203,7 +203,7 @@ impl From for PublishConfig { match authenticity { MessageAuthenticity::Signed(keypair) => { let public_key = keypair.public(); - let key_enc = public_key.encode_protobuf_encoding(); + let key_enc = public_key.encode_protobuf(); let key = if key_enc.len() <= 42 { // The public key can be inlined in [`rpc_proto::proto::::Message::from`], so we don't include it // specifically in the [`rpc_proto::proto::Message::key`] field. diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 89c418afdfb..682389b0634 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -237,10 +237,10 @@ impl GossipsubCodec { let public_key = match message .key .as_deref() - .map(PublicKey::try_decode_protobuf_encoding) + .map(PublicKey::try_decode_protobuf) { Some(Ok(key)) => key, - _ => match PublicKey::try_decode_protobuf_encoding(&source.to_bytes()[2..]) { + _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { warn!("Signature verification failed: No valid public key supplied"); diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 82b3f7c4089..1d1833d65cd 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -169,7 +169,7 @@ where .map(|addr| addr.to_vec()) .collect(); - let pubkey_bytes = info.public_key.encode_protobuf_encoding(); + let pubkey_bytes = info.public_key.encode_protobuf(); let message = proto::Identify { agentVersion: Some(info.agent_version), @@ -236,7 +236,7 @@ impl TryFrom for Info { }; let public_key = - PublicKey::try_decode_protobuf_encoding(&msg.publicKey.unwrap_or_default())?; + PublicKey::try_decode_protobuf(&msg.publicKey.unwrap_or_default())?; let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { Ok(a) => a, @@ -387,7 +387,7 @@ mod tests { publicKey: Some( identity::Keypair::generate_ed25519() .public() - .encode_protobuf_encoding(), + .encode_protobuf(), ), }; diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index 5b2f63d1449..e9428f8441c 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -214,7 +214,7 @@ where let pb = pb_result?; if !pb.identity_key.is_empty() { - let pk = identity::PublicKey::try_decode_protobuf_encoding(&pb.identity_key)?; + let pk = identity::PublicKey::try_decode_protobuf(&pb.identity_key)?; if let Some(ref k) = state.id_remote_pubkey { if k != &pk { return Err(NoiseError::UnexpectedKey); @@ -236,7 +236,7 @@ where T: AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { - identity_key: state.identity.public.encode_protobuf_encoding(), + identity_key: state.identity.public.encode_protobuf(), ..Default::default() }; diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index eae855e991f..fb156190c57 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -54,7 +54,7 @@ impl HandshakeContext { fn new(config: PlainText2Config) -> Self { let exchange = Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.encode_protobuf_encoding()), + pubkey: Some(config.local_public_key.encode_protobuf()), }; let mut buf = Vec::with_capacity(exchange.get_size()); let mut writer = Writer::new(&mut buf); @@ -77,7 +77,7 @@ impl HandshakeContext { let mut reader = BytesReader::from_bytes(&exchange_bytes); let prop = Exchange::from_reader(&mut reader, &exchange_bytes)?; - let public_key = PublicKey::try_decode_protobuf_encoding(&prop.pubkey.unwrap_or_default())?; + let public_key = PublicKey::try_decode_protobuf(&prop.pubkey.unwrap_or_default())?; let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; // Check the validity of the remote's `Exchange`. diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index ced60e5597a..8531ade72fa 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -159,7 +159,7 @@ fn parse_unverified(der_input: &[u8]) -> Result { // required KeyType Type = 1; // required bytes Data = 2; // } - let public_key = identity::PublicKey::try_decode_protobuf_encoding(&public_key) + let public_key = identity::PublicKey::try_decode_protobuf(&public_key) .map_err(|_| webpki::Error::UnknownIssuer)?; let ext = P2pExtension { public_key, @@ -215,7 +215,7 @@ fn make_libp2p_extension( // signature OCTET STRING // } let extension_content = { - let serialized_pubkey = identity_keypair.public().encode_protobuf_encoding(); + let serialized_pubkey = identity_keypair.public().encode_protobuf(); yasna::encode_der(&(serialized_pubkey, signature)) }; From 4285bda0cc75cfe7a389abfee02badd84c7fa918 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:12:50 +0800 Subject: [PATCH 48/83] use '.ok()' combinator to replace 'match' expression --- identity/src/keypair.rs | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 745499a2d26..391be2db372 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -113,10 +113,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ed25519` instead." )] pub fn into_ed25519(self) -> Option { - match self.try_into() { - Ok(k) => Some(k), - Err(_) => None, - } + self.try_into().ok() } #[cfg(feature = "ed25519")] @@ -130,10 +127,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_secp256k1` instead." )] pub fn into_secp256k1(self) -> Option { - match self.try_into() { - Ok(k) => Some(k), - Err(_) => None, - } + self.try_into().ok() } #[cfg(feature = "secp256k1")] @@ -147,10 +141,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_rsa` instead." )] pub fn into_rsa(self) -> Option { - match self.try_into() { - Ok(k) => Some(k), - Err(_) => None, - } + self.try_into().ok() } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] @@ -164,10 +155,7 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ecdsa` instead." )] pub fn into_ecdsa(self) -> Option { - match self.try_into() { - Ok(k) => Some(k), - Err(_) => None, - } + self.try_into().ok() } #[cfg(feature = "ecdsa")] From 73d9e55bfe3593cc4734860242ba1abc8c2a5a44 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:17:08 +0800 Subject: [PATCH 49/83] put link to libp2p spec in 'encode_protobuf' documentation --- identity/src/keypair.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 391be2db372..09526dfb8b2 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -248,6 +248,8 @@ impl Keypair { } /// Encode a private key as protobuf structure. + /// + /// See for details on the encoding. pub fn encode_protobuf(&self) -> Vec { use quick_protobuf::MessageWrite; From 6120997b1ff5c59467f44030661dba1dc2f7ad7f Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:25:57 +0800 Subject: [PATCH 50/83] remove roundtrip test for 'ecdsa::Keypair' --- identity/src/ecdsa.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index c2ac772e766..de8882dec21 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -296,10 +296,4 @@ mod tests { assert!(!pk.verify(invalid_msg, &sig)); } - #[test] - fn serialize_deserialize() { - let pair = Keypair::generate(); - let bytes_secret = pair.secret().to_bytes(); - assert_eq!(Keypair::try_from_bytes(bytes_secret).unwrap(), pair) - } } From 785ca7c1e75b0733194d5eb21c86ca842db9ccd1 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:28:11 +0800 Subject: [PATCH 51/83] remove 'Eq' impl for 'ecdsa::Keypair' and 'ecdsa::SecretKey' --- identity/src/ecdsa.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index de8882dec21..f141bf639c3 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -34,7 +34,7 @@ use p256::{ use void::Void; /// An ECDSA keypair. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone)] pub struct Keypair { secret: SecretKey, public: PublicKey, @@ -93,7 +93,7 @@ impl From for SecretKey { } /// An ECDSA secret key. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone)] pub struct SecretKey(SigningKey); impl SecretKey { From faa4d42c384a8d4ee9320d03627f776be2f7eec1 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:31:16 +0800 Subject: [PATCH 52/83] correct diff for 'ecdsa::from_bytes' --- identity/src/ecdsa.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index f141bf639c3..f519e37dc7a 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -119,7 +119,7 @@ impl SecretKey { since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead" )] - pub fn from_bytes(buf: &[u8]) -> Result { + pub fn from_bytes(buf: &[u8]) -> Result { Self::try_from_bytes(buf) } From 9914eab67c927548b7d7b868f7b77bd8a1a60975 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:33:44 +0800 Subject: [PATCH 53/83] deprecation note correction for 'Keypair::rsa_from_pkcs8' --- identity/src/keypair.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 09526dfb8b2..ad61aca2576 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -170,7 +170,7 @@ impl Keypair { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( since = "0.2.0", - note = "Deprecated, use `rsa::Keypair::try_decode_pkcs8` or `rsa::Keypair::try_decode_der` and promote it into `Keypair` instead." + note = "Deprecated, use `rsa::Keypair::try_decode_pkcs8` and promote it into `Keypair` instead." )] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] From 1090f14381c6f6a0e5a0eeb8306ab25b832d1602 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:34:31 +0800 Subject: [PATCH 54/83] remove feature cfg for KeyType --- identity/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/identity/src/lib.rs b/identity/src/lib.rs index f2e21ff436c..dbf855301d1 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -131,13 +131,9 @@ pub use peer_id::{ParseError, PeerId}; #[derive(Debug, PartialEq, Eq)] /// The type of key a `KeyPair` is holding. pub enum KeyType { - #[cfg(feature = "ed25519")] Ed25519, - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] RSA, - #[cfg(feature = "secp256k1")] Secp256k1, - #[cfg(feature = "ecdsa")] Ecdsa, } From 278f19efe20d1a6443fe49bbc0b437258b8065e8 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:38:50 +0800 Subject: [PATCH 55/83] add feature cfg to branches in 'TryInto' impl of 'Keypair' --- identity/src/keypair.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index ad61aca2576..df6fac3037f 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -370,8 +370,11 @@ impl TryInto for Keypair { #[allow(deprecated)] match self { Keypair::Ed25519(inner) => Ok(inner), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "secp256k1")] Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + #[cfg(feature = "ecdsa")] Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } @@ -385,8 +388,11 @@ impl TryInto for Keypair { #[allow(deprecated)] match self { Keypair::Ecdsa(inner) => Ok(inner), + #[cfg(feature = "ed25519")] Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "secp256k1")] Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } @@ -400,8 +406,11 @@ impl TryInto for Keypair { #[allow(deprecated)] match self { Keypair::Secp256k1(inner) => Ok(inner), + #[cfg(feature = "ed25519")] Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Keypair::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "ecdsa")] Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } @@ -415,8 +424,11 @@ impl TryInto for Keypair { #[allow(deprecated)] match self { Keypair::Rsa(inner) => Ok(inner), + #[cfg(feature = "ed25519")] Keypair::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(feature = "secp256k1")] Keypair::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + #[cfg(feature = "ecdsa")] Keypair::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } From 4b103bba01fae960f7d9d07b3d7f5cbc7c300c82 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:44:36 +0800 Subject: [PATCH 56/83] add feature cfg to branches in 'TryInto' impl of 'PublicKey' --- identity/src/keypair.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index df6fac3037f..4f5a3ebcced 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -248,7 +248,7 @@ impl Keypair { } /// Encode a private key as protobuf structure. - /// + /// /// See for details on the encoding. pub fn encode_protobuf(&self) -> Vec { use quick_protobuf::MessageWrite; @@ -605,8 +605,11 @@ impl TryInto for PublicKey { #[allow(deprecated)] match self { PublicKey::Ed25519(inner) => Ok(inner), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "secp256k1")] PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + #[cfg(feature = "ecdsa")] PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } @@ -620,8 +623,11 @@ impl TryInto for PublicKey { #[allow(deprecated)] match self { PublicKey::Ecdsa(inner) => Ok(inner), + #[cfg(feature = "ed25519")] PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "secp256k1")] PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), } } @@ -635,8 +641,11 @@ impl TryInto for PublicKey { #[allow(deprecated)] match self { PublicKey::Secp256k1(inner) => Ok(inner), + #[cfg(feature = "ed25519")] PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] PublicKey::Rsa(_) => Err(OtherVariantError::new(KeyType::RSA)), + #[cfg(feature = "ecdsa")] PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } @@ -650,8 +659,11 @@ impl TryInto for PublicKey { #[allow(deprecated)] match self { PublicKey::Rsa(inner) => Ok(inner), + #[cfg(feature = "ed25519")] PublicKey::Ed25519(_) => Err(OtherVariantError::new(KeyType::Ed25519)), + #[cfg(feature = "secp256k1")] PublicKey::Secp256k1(_) => Err(OtherVariantError::new(KeyType::Secp256k1)), + #[cfg(feature = "ecdsa")] PublicKey::Ecdsa(_) => Err(OtherVariantError::new(KeyType::Ecdsa)), } } From bd0284bfbea65364295b030eb7c782f60650994e Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:53:30 +0800 Subject: [PATCH 57/83] fix missing target exclusion on 'Keypair::into_rsa' --- identity/src/keypair.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 4f5a3ebcced..92f2be64887 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -135,7 +135,7 @@ impl Keypair { self.try_into() } - #[cfg(feature = "rsa")] + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_rsa` instead." From bbf59d8ec5361d52d7994fb0e1283bf66fe46035 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 11 Apr 2023 22:54:37 +0800 Subject: [PATCH 58/83] formatting --- identity/src/ecdsa.rs | 1 - protocols/gossipsub/src/protocol.rs | 6 +----- protocols/identify/src/protocol.rs | 3 +-- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index f519e37dc7a..e54c7e9d5f7 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -295,5 +295,4 @@ mod tests { let invalid_msg = "h3ll0 w0rld".as_bytes(); assert!(!pk.verify(invalid_msg, &sig)); } - } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 682389b0634..98e05567929 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -234,11 +234,7 @@ impl GossipsubCodec { // If there is a key value in the protobuf, use that key otherwise the key must be // obtained from the inlined source peer_id. - let public_key = match message - .key - .as_deref() - .map(PublicKey::try_decode_protobuf) - { + let public_key = match message.key.as_deref().map(PublicKey::try_decode_protobuf) { Some(Ok(key)) => key, _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 1d1833d65cd..1a10b591278 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -235,8 +235,7 @@ impl TryFrom for Info { addrs }; - let public_key = - PublicKey::try_decode_protobuf(&msg.publicKey.unwrap_or_default())?; + let public_key = PublicKey::try_decode_protobuf(&msg.publicKey.unwrap_or_default())?; let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { Ok(a) => a, From 1a442facc3c3a4c7027b152f370bb6ba1e09c33c Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 12 Apr 2023 09:48:14 +0800 Subject: [PATCH 59/83] remove duplicate 'to_protobuf_encoding' in 'keypair_dummy.rs' --- identity/src/keypair_dummy.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs index a3b5b3e5603..ea2741c8b6d 100644 --- a/identity/src/keypair_dummy.rs +++ b/identity/src/keypair_dummy.rs @@ -65,10 +65,6 @@ impl PublicKey { unreachable!("Can never construct empty enum") } - pub fn to_protobuf_encoding(&self) -> Vec { - unreachable!("Can never encode empty enum") - } - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { unreachable!("Can never encode empty enum") From 676b7e67888002f28e633c89c8c23c8e37b910ec Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 12 Apr 2023 13:20:22 +0800 Subject: [PATCH 60/83] revert removal of 'into' implementation on 'PublicKey' and deprecate instead --- identity/src/keypair.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 92f2be64887..add6c2008e6 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -489,21 +489,42 @@ impl PublicKey { } } + + #[cfg(feature = "ed25519")] + pub fn into_ed25519(self) -> Option { + self.try_into().ok() + } + #[cfg(feature = "ed25519")] pub fn try_into_ed25519(self) -> Result { self.try_into() } + #[cfg(feature = "secp256k1")] + pub fn into_secp256k1(self) -> Option { + self.try_into().ok() + } + #[cfg(feature = "secp256k1")] pub fn try_into_secp256k1(self) -> Result { self.try_into() } + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub fn into_rsa(self) -> Option { + self.try_into().ok() + } + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_into_rsa(self) -> Result { self.try_into() } + #[cfg(feature = "ecdsa")] + pub fn into_ecdsa(self) -> Option { + self.try_into().ok() + } + #[cfg(feature = "ecdsa")] pub fn try_into_ecdsa(self) -> Result { self.try_into() From ffacd944a263c20922c494f02a705a70e2e071ea Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Fri, 14 Apr 2023 17:24:41 +0800 Subject: [PATCH 61/83] clippy fix --- identity/src/rsa.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index a10ef335da2..1dc0a3af0aa 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -375,7 +375,7 @@ mod tests { fn rsa_x509_encode_decode() { fn prop(SomeKeypair(kp): SomeKeypair) -> Result { let pk = kp.public(); - PublicKey::try_decode_x509(&pk.encode_x509()) + PublicKey::try_decode_x509(pk.encode_x509()) .map_err(|e| e.to_string()) .map(|pk2| pk2 == pk) } From 6c8b5c76054dbcad5f5df024339b5c152a6afa21 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 17 Apr 2023 08:32:14 +0800 Subject: [PATCH 62/83] refactor 'match' expression to '?' operator --- identity/src/rsa.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 1dc0a3af0aa..00846ef897a 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -59,17 +59,14 @@ impl Keypair { /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 pub fn try_decode_pkcs8(bytes: &mut [u8]) -> Result { - match RsaKeyPair::from_pkcs8(bytes) { - Ok(kp) => { - let kp = Self { - inner: Arc::new(kp), - raw_key: bytes.to_vec(), - }; - bytes.zeroize(); - Ok(kp) - } - Err(e) => Err(DecodingError::failed_to_parse("RSA", e)), - } + let kp = RsaKeyPair::from_pkcs8(bytes) + .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; + let kp = Keypair { + inner: Arc::new(kp), + raw_key: bytes.to_vec(), + }; + bytes.zeroize(); + Ok(kp) } /// Get the public key from the keypair. From c231e1d2d48c0dd58c8cbbde8e8ecfddfd370e32 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 17 Apr 2023 08:36:09 +0800 Subject: [PATCH 63/83] rename test keys --- identity/src/rsa.rs | 6 +++--- .../test/{private_key_ed25519.pk8 => ed25519.pk8} | Bin .../test/{private_key_rsa-2048.pk8 => rsa-2048.pk8} | Bin .../test/{private_key_rsa-3072.pk8 => rsa-3072.pk8} | Bin .../test/{private_key_rsa-4096.pk8 => rsa-4096.pk8} | Bin .../{private_key_secp256k1.pk8 => secp256k1.pk8} | Bin .../{private_key_secp256r1.pk8 => secp256r1.pk8} | Bin .../{private_key_secp384r1.pk8 => secp384r1.pk8} | Bin 8 files changed, 3 insertions(+), 3 deletions(-) rename identity/src/test/{private_key_ed25519.pk8 => ed25519.pk8} (100%) rename identity/src/test/{private_key_rsa-2048.pk8 => rsa-2048.pk8} (100%) rename identity/src/test/{private_key_rsa-3072.pk8 => rsa-3072.pk8} (100%) rename identity/src/test/{private_key_rsa-4096.pk8 => rsa-4096.pk8} (100%) rename identity/src/test/{private_key_secp256k1.pk8 => secp256k1.pk8} (100%) rename identity/src/test/{private_key_secp256r1.pk8 => secp256r1.pk8} (100%) rename identity/src/test/{private_key_secp384r1.pk8 => secp384r1.pk8} (100%) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 00846ef897a..809ce9ba46e 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -347,9 +347,9 @@ mod tests { use super::*; use quickcheck::*; - const KEY1: &[u8] = include_bytes!("test/private_key_rsa-2048.pk8"); - const KEY2: &[u8] = include_bytes!("test/private_key_rsa-3072.pk8"); - const KEY3: &[u8] = include_bytes!("test/private_key_rsa-4096.pk8"); + const KEY1: &[u8] = include_bytes!("test/rsa-2048.pk8"); + const KEY2: &[u8] = include_bytes!("test/rsa-3072.pk8"); + const KEY3: &[u8] = include_bytes!("test/rsa-4096.pk8"); #[derive(Clone, Debug)] struct SomeKeypair(Keypair); diff --git a/identity/src/test/private_key_ed25519.pk8 b/identity/src/test/ed25519.pk8 similarity index 100% rename from identity/src/test/private_key_ed25519.pk8 rename to identity/src/test/ed25519.pk8 diff --git a/identity/src/test/private_key_rsa-2048.pk8 b/identity/src/test/rsa-2048.pk8 similarity index 100% rename from identity/src/test/private_key_rsa-2048.pk8 rename to identity/src/test/rsa-2048.pk8 diff --git a/identity/src/test/private_key_rsa-3072.pk8 b/identity/src/test/rsa-3072.pk8 similarity index 100% rename from identity/src/test/private_key_rsa-3072.pk8 rename to identity/src/test/rsa-3072.pk8 diff --git a/identity/src/test/private_key_rsa-4096.pk8 b/identity/src/test/rsa-4096.pk8 similarity index 100% rename from identity/src/test/private_key_rsa-4096.pk8 rename to identity/src/test/rsa-4096.pk8 diff --git a/identity/src/test/private_key_secp256k1.pk8 b/identity/src/test/secp256k1.pk8 similarity index 100% rename from identity/src/test/private_key_secp256k1.pk8 rename to identity/src/test/secp256k1.pk8 diff --git a/identity/src/test/private_key_secp256r1.pk8 b/identity/src/test/secp256r1.pk8 similarity index 100% rename from identity/src/test/private_key_secp256r1.pk8 rename to identity/src/test/secp256r1.pk8 diff --git a/identity/src/test/private_key_secp384r1.pk8 b/identity/src/test/secp384r1.pk8 similarity index 100% rename from identity/src/test/private_key_secp384r1.pk8 rename to identity/src/test/secp384r1.pk8 From 4ecffb3970de7c20a5f5ee46512acfafd21d773f Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 17 Apr 2023 08:38:06 +0800 Subject: [PATCH 64/83] revert 'try_decode_x509' signature change --- identity/src/rsa.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 809ce9ba46e..77a8df0d5fc 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -149,8 +149,8 @@ impl PublicKey { /// Try to decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo /// structure. See also `encode_x509`. - pub fn try_decode_x509(pk: impl AsRef<[u8]>) -> Result { - Asn1SubjectPublicKeyInfo::decode(pk.as_ref()) + pub fn try_decode_x509(pk: &[u8]) -> Result { + Asn1SubjectPublicKeyInfo::decode(pk) .map_err(|e| DecodingError::failed_to_parse("RSA X.509", e)) .map(|spki| spki.subjectPublicKey.0) } @@ -372,7 +372,7 @@ mod tests { fn rsa_x509_encode_decode() { fn prop(SomeKeypair(kp): SomeKeypair) -> Result { let pk = kp.public(); - PublicKey::try_decode_x509(pk.encode_x509()) + PublicKey::try_decode_x509(&pk.encode_x509()) .map_err(|e| e.to_string()) .map(|pk2| pk2 == pk) } From c958c049fae9f284df9bfabbca568536e2a6abcb Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 17 Apr 2023 08:42:20 +0800 Subject: [PATCH 65/83] revert 'to_protobuf_encoding' signature change --- identity/src/keypair.rs | 4 ++-- identity/src/keypair_dummy.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index d21cbf572f6..c09033aa120 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -243,8 +243,8 @@ impl Keypair { /// Encode a private key as protobuf structure. #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] - pub fn to_protobuf_encoding(&self) -> Vec { - self.encode_protobuf() + pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { + Ok(self.encode_protobuf()) } /// Encode a private key as protobuf structure. diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs index ea2741c8b6d..c561b6fee5a 100644 --- a/identity/src/keypair_dummy.rs +++ b/identity/src/keypair_dummy.rs @@ -33,7 +33,7 @@ impl Keypair { } #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] - pub fn to_protobuf_encoding(&self) -> Vec { + pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { unreachable!("Can never encode empty enum") } From b613d1c2576d92352cf77c51862040f99ab9c37a Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Mon, 17 Apr 2023 10:59:52 +0800 Subject: [PATCH 66/83] implement encoding to pkcs#8 for ecdsa keys --- Cargo.lock | 207 ++++++++++++++++++++++++++++++++-------- identity/Cargo.toml | 3 +- identity/src/ecdsa.rs | 7 +- identity/src/keypair.rs | 2 +- 4 files changed, 176 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce57bf43d82..c9aa985b12a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -508,6 +508,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -855,9 +861,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" +checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" [[package]] name = "core-foundation" @@ -1020,6 +1026,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c2538c4e68e52548bacb3e83ac549f903d44f011ac9d5abb5e132e67d0808f7" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1206,7 +1224,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", - "pem-rfc7468", + "pem-rfc7468 0.6.0", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b10af9f9f9f2134a42d3f8aa74658660f2e0234b0eb81bd171df8aa32779ed" +dependencies = [ + "const-oid", + "pem-rfc7468 0.7.0", "zeroize", ] @@ -1285,6 +1314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", + "const-oid", "crypto-common", "subtle", ] @@ -1324,21 +1354,22 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", "signature 1.6.4", ] [[package]] name = "ecdsa" -version = "0.15.1" +version = "0.16.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e" +checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", + "der 0.7.3", + "digest 0.10.6", + "elliptic-curve 0.13.4", + "rfc6979 0.4.0", "signature 2.0.0", ] @@ -1377,18 +1408,38 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", - "der", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", "digest 0.10.6", - "ff", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "hkdf", - "pem-rfc7468", - "pkcs8", + "pem-rfc7468 0.6.0", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.1", + "digest 0.10.6", + "ff 0.13.0", + "generic-array", + "group 0.13.0", + "pem-rfc7468 0.7.0", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.1", "subtle", "zeroize", ] @@ -1474,6 +1525,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.1.17" @@ -1654,6 +1715,7 @@ checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -1718,7 +1780,18 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] @@ -2501,19 +2574,20 @@ dependencies = [ "base64 0.21.0", "bs58", "criterion", + "ecdsa 0.16.6", "ed25519-dalek", "hex-literal", "libsecp256k1", "log", "multiaddr", "multihash", - "p256 0.12.0", + "p256 0.13.2", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", "ring", "rmp-serde", - "sec1", + "sec1 0.3.0", "serde", "serde_json", "sha2 0.10.6", @@ -3495,18 +3569,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] [[package]] name = "p256" -version = "0.12.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.15.1", - "elliptic-curve", + "ecdsa 0.16.6", + "elliptic-curve 0.13.4", "primeorder", "sha2 0.10.6", ] @@ -3518,7 +3592,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -3616,6 +3690,15 @@ dependencies = [ "base64ct", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.2.0" @@ -3677,8 +3760,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.3", + "spki 0.7.1", ] [[package]] @@ -3777,11 +3870,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "primeorder" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2" +checksum = "cf8d3875361e28f7753baefef104386e7aa47642c93023356d97fdef4003bfb5" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.4", ] [[package]] @@ -4146,11 +4239,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -4389,10 +4492,24 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", - "der", + "base16ct 0.1.1", + "der 0.6.1", "generic-array", - "pkcs8", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + +[[package]] +name = "sec1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.3", + "generic-array", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -4623,7 +4740,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", +] + +[[package]] +name = "spki" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37a5be806ab6f127c3da44b7378837ebf01dadca8510a0e572460216b228bd0e" +dependencies = [ + "base64ct", + "der 0.7.3", ] [[package]] @@ -5386,7 +5513,7 @@ dependencies = [ "ccm", "curve25519-dalek 3.2.0", "der-parser 8.1.0", - "elliptic-curve", + "elliptic-curve 0.12.3", "hkdf", "hmac 0.10.1", "log", @@ -5399,7 +5526,7 @@ dependencies = [ "rcgen 0.9.3", "ring", "rustls 0.19.1", - "sec1", + "sec1 0.3.0", "serde", "sha-1", "sha2 0.9.9", diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 3ebae8949f6..25335f6f38c 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -19,7 +19,7 @@ libsecp256k1 = { version = "0.7.0", optional = true } log = "0.4" multiaddr = { version = "0.17.1", optional = true } multihash = { version = "0.17.0", default-features = false, features = ["std"], optional = true } -p256 = { version = "0.12", default-features = false, features = ["ecdsa", "std"], optional = true } +p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std","pkcs8"], optional = true } quick-protobuf = { version = "0.8.1", optional = true } rand = { version = "0.8", optional = true } sec1 = { version = "0.3.0", features = ["std"], optional = true } # Activate `std` feature until https://github.com/RustCrypto/traits/pull/1131 is released. @@ -28,6 +28,7 @@ sha2 = { version = "0.10.0", optional = true } thiserror = { version = "1.0", optional = true } void = { version = "1.0", optional = true } zeroize = { version = "1.6", optional = true } +ecdsa = {version = "0.16.6", features = ["pem"]} [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 67f4840a695..c109906b483 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -29,6 +29,7 @@ use p256::{ signature::{Signer, Verifier}, Signature, SigningKey, VerifyingKey, }, + elliptic_curve::pkcs8::EncodePrivateKey, EncodedPoint, }; use void::Void; @@ -125,10 +126,14 @@ impl SecretKey { /// Try to parse a secret key from a byte buffer containing raw scalar of the key. pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { - SigningKey::from_bytes(buf.as_ref()) + SigningKey::from_bytes(buf.as_ref().into()) .map_err(|err| DecodingError::failed_to_parse("ecdsa p256 secret key", err)) .map(SecretKey) } + + pub fn encode_pkcs8_der(&self) -> Vec { + self.0.to_pkcs8_der().unwrap().to_bytes().to_vec() + } } impl fmt::Debug for SecretKey { diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index c09033aa120..11e408316e2 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -273,7 +273,7 @@ impl Keypair { #[cfg(feature = "ecdsa")] Self::Ecdsa(data) => proto::PrivateKey { Type: proto::KeyType::ECDSA, - Data: data.secret().to_bytes(), + Data: data.secret().encode_pkcs8_der(), }, }; From 24a5a86f7452e0475045ecc56b9a859f7eabc249 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Tue, 25 Apr 2023 20:04:03 +0800 Subject: [PATCH 67/83] formatting --- identity/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 87b02cd5a2e..da5ab73f735 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -126,8 +126,8 @@ pub use keypair::{Keypair, PublicKey}; #[cfg(feature = "peerid")] pub use peer_id::{ParseError, PeerId}; -#[derive(Debug, PartialEq, Eq)] /// The type of key a `KeyPair` is holding. +#[derive(Debug, PartialEq, Eq)] pub enum KeyType { Ed25519, RSA, From aeea24b653e376cec550d3a02f35a4ff67ba7c18 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 26 Apr 2023 13:38:55 +0800 Subject: [PATCH 68/83] remove 'deprecation since' --- identity/src/ecdsa.rs | 3 --- identity/src/ed25519.rs | 10 ++-------- identity/src/keypair.rs | 6 +----- identity/src/keypair_dummy.rs | 6 ++---- identity/src/rsa.rs | 3 +-- identity/src/secp256k1.rs | 10 ++-------- 6 files changed, 8 insertions(+), 30 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index c109906b483..f99c334ffd5 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -117,7 +117,6 @@ impl SecretKey { /// Decode a secret key from a byte buffer containing raw scalar of the key. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead" )] pub fn from_bytes(buf: &[u8]) -> Result { @@ -158,7 +157,6 @@ impl PublicKey { /// Decode a public key from a byte buffer containing raw components of a key with or without compression. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." )] pub fn from_bytes(k: &[u8]) -> Result { @@ -188,7 +186,6 @@ impl PublicKey { /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_der` instead." )] pub fn decode_der(k: &[u8]) -> Result { diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 90782de378a..4887a425e41 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -41,7 +41,7 @@ impl Keypair { /// Encode the keypair into a byte array by concatenating the bytes /// of the secret scalar and the compressed public point, /// an informal standard for encoding Ed25519 keypairs. - #[deprecated(since = "0.2.0", note = "Renamed to `Keypair::to_bytes`")] + #[deprecated(note = "Renamed to `Keypair::to_bytes`")] pub fn encode(&self) -> [u8; 64] { self.to_bytes() } @@ -58,7 +58,6 @@ impl Keypair { /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `Keypair::try_from_bytes` instead." )] pub fn decode(kp: &mut [u8]) -> Result { @@ -184,10 +183,7 @@ impl PublicKey { /// Encode the public key into a byte array in compressed form, i.e. /// where one coordinate is represented by a single bit. - #[deprecated( - since = "0.2.0", - note = "Renamed to `PublicKey::to_bytes` to reflect actual behaviour." - )] + #[deprecated(note = "Renamed to `PublicKey::to_bytes` to reflect actual behaviour.")] pub fn encode(&self) -> [u8; 32] { self.to_bytes() } @@ -200,7 +196,6 @@ impl PublicKey { /// Decode a public key from a byte array as produced by `to_bytes`. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." )] pub fn decode(k: &[u8]) -> Result { @@ -255,7 +250,6 @@ impl SecretKey { /// If the bytes do not constitute a valid Ed25519 secret key, an error is /// returned. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] #[allow(unused_mut)] diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 7bf2cc213bf..3180ed5db09 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -165,7 +165,6 @@ impl Keypair { /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( - since = "0.2.0", note = "Deprecated, use `rsa::Keypair::try_decode_pkcs8` and promote it into `Keypair` instead." )] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { @@ -179,7 +178,6 @@ impl Keypair { /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[cfg(feature = "secp256k1")] #[deprecated( - since = "0.2.0", note = "Deprecated, use `secp256k1::Keypair::try_from_bytes` and promote it into `Keypair` instead." )] pub fn secp256k1_from_der(der: &mut [u8]) -> Result { @@ -194,7 +192,6 @@ impl Keypair { /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[cfg(feature = "ed25519")] #[deprecated( - since = "0.2.0", note = "Deprecated, use `ed25519::Keypair::try_decode` and promote it into `Keypair` instead." )] pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { @@ -238,7 +235,7 @@ impl Keypair { } /// Encode a private key as protobuf structure. - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] + #[deprecated(note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { Ok(self.encode_protobuf()) } @@ -282,7 +279,6 @@ impl Keypair { /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs index c561b6fee5a..7ae7c5030f6 100644 --- a/identity/src/keypair_dummy.rs +++ b/identity/src/keypair_dummy.rs @@ -32,7 +32,7 @@ impl Keypair { unreachable!("Can never construct empty enum") } - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] + #[deprecated(note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { unreachable!("Can never encode empty enum") } @@ -42,7 +42,6 @@ impl Keypair { } #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { @@ -65,7 +64,7 @@ impl PublicKey { unreachable!("Can never construct empty enum") } - #[deprecated(since = "0.2.0", note = "Renamed to `encode_protobuf`")] + #[deprecated(note = "Renamed to `encode_protobuf`")] pub fn to_protobuf_encoding(&self) -> Vec { unreachable!("Can never encode empty enum") } @@ -75,7 +74,6 @@ impl PublicKey { } #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf` instead." )] pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 77a8df0d5fc..3c7d8989590 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -49,7 +49,7 @@ impl Keypair { /// format (i.e. unencrypted) as defined in [RFC5208]. /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - #[deprecated(since = "0.2.0", note = "Renamed to `Keypair::try_decode_pkcs8`.")] + #[deprecated(note = "Renamed to `Keypair::try_decode_pkcs8`.")] pub fn from_pkcs8(der: &mut [u8]) -> Result { Self::try_decode_pkcs8(der) } @@ -140,7 +140,6 @@ impl PublicKey { /// Decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo /// structure. See also `encode_x509`. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_x509` instead." )] pub fn decode_x509(pk: &[u8]) -> Result { diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index f0bf591e7da..0b4b97710b9 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -105,7 +105,6 @@ impl SecretKey { /// /// Note that the expected binary format is the same as `libsecp256k1`'s. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." )] #[allow(unused_mut)] @@ -131,7 +130,6 @@ impl SecretKey { /// /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `SecretKey::try_decode_der` instead." )] pub fn from_der(der: impl AsMut<[u8]>) -> Result { @@ -236,7 +234,7 @@ impl PublicKey { /// Encode the public key in compressed form, i.e. with one coordinate /// represented by a single bit. - #[deprecated(since = "0.2.0", note = "Renamed to `PublicKey::to_bytes`.")] + #[deprecated(note = "Renamed to `PublicKey::to_bytes`.")] pub fn encode(&self) -> [u8; 33] { self.to_bytes() } @@ -248,10 +246,7 @@ impl PublicKey { } /// Encode the public key in uncompressed form. - #[deprecated( - since = "0.2.0", - note = "Renamed to `PublicKey::to_bytes_uncompressed`." - )] + #[deprecated(note = "Renamed to `PublicKey::to_bytes_uncompressed`.")] pub fn encode_uncompressed(&self) -> [u8; 65] { self.to_bytes_uncompressed() } @@ -264,7 +259,6 @@ impl PublicKey { /// Decode a public key from a byte slice in the the format produced /// by `encode`. #[deprecated( - since = "0.2.0", note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." )] pub fn decode(k: &[u8]) -> Result { From 54e2fad9fe3517f495563fb1bbfa8f87f6d42aed Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 26 Apr 2023 13:43:45 +0800 Subject: [PATCH 69/83] replace 'unwrap' with 'expect' --- identity/src/ecdsa.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index f99c334ffd5..1ed6e937a0a 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -131,7 +131,7 @@ impl SecretKey { } pub fn encode_pkcs8_der(&self) -> Vec { - self.0.to_pkcs8_der().unwrap().to_bytes().to_vec() + self.0.to_pkcs8_der().expect("Encoding to pkcs#8 should succeed").to_bytes().to_vec() } } From 2d244c79ca90d5f3f8aaff4bd1e5f23957f740b8 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 26 Apr 2023 13:46:06 +0800 Subject: [PATCH 70/83] remove redundant 'Deprecated,' message --- identity/src/keypair.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 3180ed5db09..e484683cb21 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -165,7 +165,7 @@ impl Keypair { /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] #[deprecated( - note = "Deprecated, use `rsa::Keypair::try_decode_pkcs8` and promote it into `Keypair` instead." + note = "Use `rsa::Keypair::try_decode_pkcs8` and promote it into `Keypair` instead." )] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { #[allow(deprecated)] @@ -178,7 +178,7 @@ impl Keypair { /// [RFC5915]: https://tools.ietf.org/html/rfc5915 #[cfg(feature = "secp256k1")] #[deprecated( - note = "Deprecated, use `secp256k1::Keypair::try_from_bytes` and promote it into `Keypair` instead." + note = "Use `secp256k1::Keypair::try_from_bytes` and promote it into `Keypair` instead." )] pub fn secp256k1_from_der(der: &mut [u8]) -> Result { #[allow(deprecated)] @@ -192,7 +192,7 @@ impl Keypair { /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. #[cfg(feature = "ed25519")] #[deprecated( - note = "Deprecated, use `ed25519::Keypair::try_decode` and promote it into `Keypair` instead." + note = "Use `ed25519::Keypair::try_decode` and promote it into `Keypair` instead." )] pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { #[allow(deprecated)] From 2c188ca987876f4c056aba7021389396937e26dd Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 26 Apr 2023 13:53:20 +0800 Subject: [PATCH 71/83] rename 'to_raw_bytes' to 'encode_pkcs8_der' --- identity/src/keypair.rs | 2 +- identity/src/rsa.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index e484683cb21..e6e7362156f 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -256,7 +256,7 @@ impl Keypair { #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Self::Rsa(data) => proto::PrivateKey { Type: proto::KeyType::RSA, - Data: data.to_raw_bytes(), + Data: data.encode_pkcs8_der(), }, #[cfg(feature = "secp256k1")] Self::Secp256k1(data) => proto::PrivateKey { diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 3c7d8989590..a5197bc7544 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -33,7 +33,7 @@ use zeroize::Zeroize; #[derive(Clone)] pub struct Keypair { inner: Arc, - raw_key: Vec, + key: Vec, } impl std::fmt::Debug for Keypair { @@ -63,7 +63,7 @@ impl Keypair { .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; let kp = Keypair { inner: Arc::new(kp), - raw_key: bytes.to_vec(), + key: bytes.to_vec(), }; bytes.zeroize(); Ok(kp) @@ -88,14 +88,14 @@ impl Keypair { } /// Get the byte array used to parse the keypair from. - pub(crate) fn to_raw_bytes(&self) -> Vec { - self.raw_key.clone() + pub(crate) fn encode_pkcs8_der(&self) -> Vec { + self.key.clone() } } impl Drop for Keypair { fn drop(&mut self) { - self.raw_key.zeroize() + self.key.zeroize() } } From ba9f4edd62b9cb828abef3f276022b6b2067ac92 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 26 Apr 2023 14:17:50 +0800 Subject: [PATCH 72/83] fix uncaught deprecated method --- identity/src/keypair.rs | 2 +- identity/src/lib.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index e6e7362156f..e94ed143c56 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -251,7 +251,7 @@ impl Keypair { #[cfg(feature = "ed25519")] Self::Ed25519(data) => proto::PrivateKey { Type: proto::KeyType::Ed25519, - Data: data.encode().to_vec(), + Data: data.to_bytes().to_vec(), }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] Self::Rsa(data) => proto::PrivateKey { diff --git a/identity/src/lib.rs b/identity/src/lib.rs index da5ab73f735..35130c34096 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -100,7 +100,7 @@ impl From<&PublicKey> for proto::PublicKey { #[cfg(feature = "ed25519")] PublicKey::Ed25519(key) => proto::PublicKey { Type: proto::KeyType::Ed25519, - Data: key.encode().to_vec(), + Data: key.to_bytes().to_vec(), }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] PublicKey::Rsa(key) => proto::PublicKey { @@ -110,7 +110,7 @@ impl From<&PublicKey> for proto::PublicKey { #[cfg(feature = "secp256k1")] PublicKey::Secp256k1(key) => proto::PublicKey { Type: proto::KeyType::Secp256k1, - Data: key.encode().to_vec(), + Data: key.to_bytes().to_vec(), }, #[cfg(feature = "ecdsa")] PublicKey::Ecdsa(key) => proto::PublicKey { From 1b11e4adae47633bc4095ae390b4ce9a8393a1b4 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Thu, 27 Apr 2023 16:36:10 +0800 Subject: [PATCH 73/83] downgrade 'p256' and remove 'ecdsa' --- Cargo.lock | 203 +++++++++----------------------------------- identity/Cargo.toml | 3 +- 2 files changed, 39 insertions(+), 167 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4660f84d29..4d591b3a60d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -508,12 +508,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" -[[package]] -name = "base16ct" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" - [[package]] name = "base64" version = "0.13.1" @@ -1026,18 +1020,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "crypto-bigint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2538c4e68e52548bacb3e83ac549f903d44f011ac9d5abb5e132e67d0808f7" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" version = "0.1.6" @@ -1224,18 +1206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", - "pem-rfc7468 0.6.0", - "zeroize", -] - -[[package]] -name = "der" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b10af9f9f9f2134a42d3f8aa74658660f2e0234b0eb81bd171df8aa32779ed" -dependencies = [ - "const-oid", - "pem-rfc7468 0.7.0", + "pem-rfc7468", "zeroize", ] @@ -1314,7 +1285,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", - "const-oid", "crypto-common", "subtle", ] @@ -1354,22 +1324,21 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", + "der", + "elliptic-curve", + "rfc6979", "signature 1.6.4", ] [[package]] name = "ecdsa" -version = "0.16.6" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" +checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e" dependencies = [ - "der 0.7.3", - "digest 0.10.6", - "elliptic-curve 0.13.4", - "rfc6979 0.4.0", + "der", + "elliptic-curve", + "rfc6979", "signature 2.0.0", ] @@ -1408,38 +1377,18 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", + "base16ct", + "crypto-bigint", + "der", "digest 0.10.6", - "ff 0.12.1", + "ff", "generic-array", - "group 0.12.1", + "group", "hkdf", - "pem-rfc7468 0.6.0", - "pkcs8 0.9.0", + "pem-rfc7468", + "pkcs8", "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" -dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.1", - "digest 0.10.6", - "ff 0.13.0", - "generic-array", - "group 0.13.0", - "pem-rfc7468 0.7.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1 0.7.1", + "sec1", "subtle", "zeroize", ] @@ -1525,16 +1474,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ff" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "fiat-crypto" version = "0.1.17" @@ -1715,7 +1654,6 @@ checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", - "zeroize", ] [[package]] @@ -1780,18 +1718,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "group" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" -dependencies = [ - "ff 0.13.0", + "ff", "rand_core 0.6.4", "subtle", ] @@ -2574,20 +2501,19 @@ dependencies = [ "base64 0.21.0", "bs58", "criterion", - "ecdsa 0.16.6", "ed25519-dalek", "hex-literal", "libsecp256k1", "log", "multiaddr", "multihash", - "p256 0.13.2", + "p256 0.12.0", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", "ring", "rmp-serde", - "sec1 0.3.0", + "sec1", "serde", "serde_json", "sha2 0.10.6", @@ -3569,18 +3495,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve 0.12.3", + "elliptic-curve", "sha2 0.10.6", ] [[package]] name = "p256" -version = "0.13.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55" dependencies = [ - "ecdsa 0.16.6", - "elliptic-curve 0.13.4", + "ecdsa 0.15.1", + "elliptic-curve", "primeorder", "sha2 0.10.6", ] @@ -3592,7 +3518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve 0.12.3", + "elliptic-curve", "sha2 0.10.6", ] @@ -3690,15 +3616,6 @@ dependencies = [ "base64ct", ] -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.2.0" @@ -3760,18 +3677,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - -[[package]] -name = "pkcs8" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" -dependencies = [ - "der 0.7.3", - "spki 0.7.1", + "der", + "spki", ] [[package]] @@ -3870,11 +3777,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "primeorder" -version = "0.13.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf8d3875361e28f7753baefef104386e7aa47642c93023356d97fdef4003bfb5" +checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2" dependencies = [ - "elliptic-curve 0.13.4", + "elliptic-curve", ] [[package]] @@ -4239,21 +4146,11 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint 0.4.9", + "crypto-bigint", "hmac 0.12.1", "zeroize", ] -[[package]] -name = "rfc6979" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" -dependencies = [ - "hmac 0.12.1", - "subtle", -] - [[package]] name = "ring" version = "0.16.20" @@ -4492,24 +4389,10 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", + "base16ct", + "der", "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - -[[package]] -name = "sec1" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" -dependencies = [ - "base16ct 0.2.0", - "der 0.7.3", - "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -4740,17 +4623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der 0.6.1", -] - -[[package]] -name = "spki" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a5be806ab6f127c3da44b7378837ebf01dadca8510a0e572460216b228bd0e" -dependencies = [ - "base64ct", - "der 0.7.3", + "der", ] [[package]] @@ -5513,7 +5386,7 @@ dependencies = [ "ccm", "curve25519-dalek 3.2.0", "der-parser 8.1.0", - "elliptic-curve 0.12.3", + "elliptic-curve", "hkdf", "hmac 0.10.1", "log", @@ -5526,7 +5399,7 @@ dependencies = [ "rcgen 0.9.3", "ring", "rustls 0.19.1", - "sec1 0.3.0", + "sec1", "serde", "sha-1", "sha2 0.9.9", diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 25335f6f38c..c7e28fb761a 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -19,7 +19,7 @@ libsecp256k1 = { version = "0.7.0", optional = true } log = "0.4" multiaddr = { version = "0.17.1", optional = true } multihash = { version = "0.17.0", default-features = false, features = ["std"], optional = true } -p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std","pkcs8"], optional = true } +p256 = { version = "0.12", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } quick-protobuf = { version = "0.8.1", optional = true } rand = { version = "0.8", optional = true } sec1 = { version = "0.3.0", features = ["std"], optional = true } # Activate `std` feature until https://github.com/RustCrypto/traits/pull/1131 is released. @@ -28,7 +28,6 @@ sha2 = { version = "0.10.0", optional = true } thiserror = { version = "1.0", optional = true } void = { version = "1.0", optional = true } zeroize = { version = "1.6", optional = true } -ecdsa = {version = "0.16.6", features = ["pem"]} [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} From 01b4fd654f73daf044c7d6413b20074f900f19ca Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 5 May 2023 12:50:14 +0200 Subject: [PATCH 74/83] Undo ecdsa changes --- identity/src/ecdsa.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index c57133c4e84..90a8c3089c4 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -62,13 +62,6 @@ impl Keypair { pub fn secret(&self) -> &SecretKey { &self.secret } - - /// Try to parse an secret key byte array into a ECDSA `SecretKey` - /// and promote it into a `Keypair`. - pub fn try_from_bytes(pk: impl AsRef<[u8]>) -> Result { - let secret_key = SecretKey::try_from_bytes(pk)?; - Ok(secret_key.into()) - } } impl fmt::Debug for Keypair { @@ -212,9 +205,9 @@ impl PublicKey { Self::try_decode_der(k) } - /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. - pub fn try_decode_der(k: impl AsRef<[u8]>) -> Result { - let buf = Self::del_asn1_header(k.as_ref()).ok_or_else(|| { + /// Try to decode a public key from a DER encoded byte buffer as defined by SEC1 standard. + pub fn try_decode_der(k: &[u8]) -> Result { + let buf = Self::del_asn1_header(k).ok_or_else(|| { DecodingError::failed_to_parse::("ASN.1-encoded ecdsa p256 public key", None) })?; Self::try_from_bytes(buf) From 46700cf804cbf8df09b011f1814dfd0805d516a7 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 5 May 2023 12:51:12 +0200 Subject: [PATCH 75/83] Minimize diff --- identity/src/keypair.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 6b0cc80be8e..2027cfb9ca7 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -771,7 +771,7 @@ mod tests { let priv_key = Keypair::from_protobuf_encoding(&hex_literal::hex!( "08031279307702010104203E5B1FE9712E6C314942A750BD67485DE3C1EFE85B1BFB520AE8F9AE3DFA4A4CA00A06082A8648CE3D030107A14403420004DE3D300FA36AE0E8F5D530899D83ABAB44ABF3161F162A4BC901D8E6ECDA020E8B6D5F8DA30525E71D6851510C098E5C47C646A597FB4DCEC034E9F77C409E62" )) - .unwrap(); + .unwrap(); let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de3d300fa36ae0e8f5d530899d83abab44abf3161f162a4bc901d8e6ecda020e8b6d5f8da30525e71d6851510c098e5c47c646a597fb4dcec034e9f77c409e62")).unwrap(); roundtrip_protobuf_encoding(&priv_key, &pub_key); From 76620734a133bcf2cde094110f90b0cb1a2fa19b Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Fri, 5 May 2023 19:08:15 +0800 Subject: [PATCH 76/83] lock file update --- Cargo.lock | 82 ++++-------------------------------------------------- 1 file changed, 5 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89c0f56d185..1c1e3e299fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1026,18 +1026,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "crypto-bigint" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" version = "0.1.6" @@ -1263,17 +1251,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "der" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e58dffcdcc8ee7b22f0c1f71a69243d7c2d9ad87b5a14361f2424a1565c219" -dependencies = [ - "const-oid", - "pem-rfc7468 0.7.0", - "zeroize", -] - [[package]] name = "der-parser" version = "7.0.0" @@ -1400,10 +1377,9 @@ version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e" dependencies = [ - "der 0.7.5", - "digest 0.10.6", - "elliptic-curve 0.13.4", - "rfc6979 0.4.0", + "der", + "elliptic-curve", + "rfc6979", "signature 2.0.0", ] @@ -1453,27 +1429,7 @@ dependencies = [ "pem-rfc7468", "pkcs8", "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", -] - -[[package]] -name = "elliptic-curve" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" -dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.2", - "digest 0.10.6", - "ff 0.13.0", - "generic-array", - "group 0.13.0", - "pem-rfc7468 0.7.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1 0.7.1", + "sec1 0.3.0", "subtle", "zeroize", ] @@ -3793,16 +3749,6 @@ dependencies = [ "spki", ] -[[package]] -name = "pkcs8" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" -dependencies = [ - "der 0.7.5", - "spki 0.7.1", -] - [[package]] name = "pkg-config" version = "0.3.26" @@ -4533,14 +4479,6 @@ name = "sec1" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" -dependencies = [ - "base16ct 0.2.0", - "der 0.7.5", - "generic-array", - "pkcs8 0.10.2", - "subtle", - "zeroize", -] [[package]] name = "semver" @@ -4781,16 +4719,6 @@ dependencies = [ "der", ] -[[package]] -name = "spki" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a5be806ab6f127c3da44b7378837ebf01dadca8510a0e572460216b228bd0e" -dependencies = [ - "base64ct", - "der 0.7.5", -] - [[package]] name = "static_assertions" version = "1.1.0" @@ -5570,7 +5498,7 @@ dependencies = [ "rcgen 0.9.3", "ring", "rustls 0.19.1", - "sec1", + "sec1 0.3.0", "serde", "sha-1", "sha2 0.9.9", From e4b6ec7d7ada83e5e4fc594fadeee7ed0cad4af9 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Fri, 5 May 2023 21:38:58 +0800 Subject: [PATCH 77/83] fix: wrong dependency 'p256' version --- Cargo.lock | 190 ++++++++++++++++++++++++++++++++-------- identity/Cargo.toml | 2 +- identity/src/keypair.rs | 2 +- 3 files changed, 157 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c1e3e299fa..ed3da67f673 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -517,6 +517,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -1026,6 +1032,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1247,7 +1265,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", - "pem-rfc7468", + "pem-rfc7468 0.6.0", + "zeroize", +] + +[[package]] +name = "der" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e58dffcdcc8ee7b22f0c1f71a69243d7c2d9ad87b5a14361f2424a1565c219" +dependencies = [ + "const-oid", + "pem-rfc7468 0.7.0", "zeroize", ] @@ -1326,6 +1355,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", + "const-oid", "crypto-common", "subtle", ] @@ -1365,21 +1395,22 @@ version = "0.14.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", "signature 1.6.4", ] [[package]] name = "ecdsa" -version = "0.15.1" +version = "0.16.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e" +checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", + "der 0.7.5", + "digest 0.10.6", + "elliptic-curve 0.13.4", + "rfc6979 0.4.0", "signature 2.0.0", ] @@ -1418,22 +1449,42 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", - "der", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", "digest 0.10.6", - "ff", + "ff 0.12.1", "generic-array", - "group", + "group 0.12.1", "hkdf", - "pem-rfc7468", - "pkcs8", + "pem-rfc7468 0.6.0", + "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", ] +[[package]] +name = "elliptic-curve" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.2", + "digest 0.10.6", + "ff 0.13.0", + "generic-array", + "group 0.13.0", + "pem-rfc7468 0.7.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.1", + "subtle", + "zeroize", +] + [[package]] name = "enum-as-inner" version = "0.5.1" @@ -1515,6 +1566,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.1.17" @@ -1708,6 +1769,7 @@ checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -1772,7 +1834,18 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ - "ff", + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] @@ -2561,7 +2634,7 @@ dependencies = [ "log", "multiaddr", "multihash", - "p256 0.12.0", + "p256 0.13.2", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", @@ -3563,18 +3636,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] [[package]] name = "p256" -version = "0.12.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa 0.15.1", - "elliptic-curve", + "ecdsa 0.16.6", + "elliptic-curve 0.13.4", "primeorder", "sha2 0.10.6", ] @@ -3586,7 +3659,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" dependencies = [ "ecdsa 0.14.8", - "elliptic-curve", + "elliptic-curve 0.12.3", "sha2 0.10.6", ] @@ -3684,6 +3757,15 @@ dependencies = [ "base64ct", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.2.0" @@ -3745,8 +3827,18 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ - "der", - "spki", + "der 0.6.1", + "spki 0.6.0", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.5", + "spki 0.7.2", ] [[package]] @@ -3845,11 +3937,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "primeorder" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2" +checksum = "cf8d3875361e28f7753baefef104386e7aa47642c93023356d97fdef4003bfb5" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.4", ] [[package]] @@ -4214,11 +4306,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -4466,10 +4568,10 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", - "der", + "base16ct 0.1.1", + "der 0.6.1", "generic-array", - "pkcs8", + "pkcs8 0.9.0", "subtle", "zeroize", ] @@ -4479,6 +4581,14 @@ name = "sec1" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.5", + "generic-array", + "pkcs8 0.10.2", + "subtle", + "zeroize", +] [[package]] name = "semver" @@ -4716,7 +4826,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", - "der", + "der 0.6.1", +] + +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der 0.7.5", ] [[package]] @@ -5485,7 +5605,7 @@ dependencies = [ "ccm", "curve25519-dalek 3.2.0", "der-parser 8.1.0", - "elliptic-curve", + "elliptic-curve 0.12.3", "hkdf", "hmac 0.10.1", "log", diff --git a/identity/Cargo.toml b/identity/Cargo.toml index c4ee6e6343b..5508e7c77f0 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -19,7 +19,7 @@ libsecp256k1 = { version = "0.7.0", optional = true } log = "0.4" multiaddr = { version = "0.17.1", optional = true } multihash = { version = "0.17.0", default-features = false, features = ["std"], optional = true } -p256 = { version = "0.12", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } +p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } quick-protobuf = { version = "0.8.1", optional = true } rand = { version = "0.8", optional = true } sec1 = { version = "0.7", default-features = false, optional = true } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index e9f4c757ac5..13487f4894a 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -764,7 +764,7 @@ mod tests { #[test] #[cfg(all(feature = "ecdsa", feature = "peerid"))] fn keypair_protobuf_roundtrip_ecdsa() { - let priv_key = Keypair::from_protobuf_encoding(&hex_literal::hex!( + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "08031279307702010104203E5B1FE9712E6C314942A750BD67485DE3C1EFE85B1BFB520AE8F9AE3DFA4A4CA00A06082A8648CE3D030107A14403420004DE3D300FA36AE0E8F5D530899D83ABAB44ABF3161F162A4BC901D8E6ECDA020E8B6D5F8DA30525E71D6851510C098E5C47C646A597FB4DCEC034E9F77C409E62" )) .unwrap(); From 75ee978ca81e4bd94ee9d2d7261fe250540026d7 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Fri, 5 May 2023 21:43:56 +0800 Subject: [PATCH 78/83] delete unused test keys --- identity/src/test/ed25519.pk8 | Bin 48 -> 0 bytes identity/src/test/secp256k1.pk8 | Bin 135 -> 0 bytes identity/src/test/secp256r1.pk8 | Bin 138 -> 0 bytes identity/src/test/secp384r1.pk8 | Bin 185 -> 0 bytes 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 identity/src/test/ed25519.pk8 delete mode 100644 identity/src/test/secp256k1.pk8 delete mode 100644 identity/src/test/secp256r1.pk8 delete mode 100644 identity/src/test/secp384r1.pk8 diff --git a/identity/src/test/ed25519.pk8 b/identity/src/test/ed25519.pk8 deleted file mode 100644 index 59519a13dfb705d665acf5c8f0bf5728ba3d230f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 48 zcmV-00MGw0E&>4nFa-t!D`jv5A_O4S4%j_fq@$V!_~T@Y8iljsV!^2P*2XWV3a>_| GFF|8$6cWt< diff --git a/identity/src/test/secp256k1.pk8 b/identity/src/test/secp256k1.pk8 deleted file mode 100644 index 5271b799a20c24b047eac6e49dcd4d74d2e03c4b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 135 zcmV;20C@i}frJ7905A{+2P%e0&OHJF1_djD1ON&IZ7^#B0RaRckv!!IAY)fe7U-Vi zc)&4nMoLLHqP9=w`93sywKLckp+o~h00cDRloz?ozQHqW!A^AD*{H0WE8rU2Qp>ZE pX26wmByfw=c{OGUt`UZC*1;wsoOFmNB%l9|LEML04tZ_p9&{v?HG%*D diff --git a/identity/src/test/secp256r1.pk8 b/identity/src/test/secp256r1.pk8 deleted file mode 100644 index 81813a77b3e5703d99ca6318ee0862171d0a96f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 138 zcmXqLY-eI*Fc4;A*J|@PXUoLM#sOw9GqSVf8e}suGO{QXoHWsAT(-o<`RQAKo_}7) zl^mu=&f2zFU=!b|P1BX{FfVjrc4A=R(UTIooXHWw5tw&7E>8J($<)(NloV%O;$rF( qHm+q}SGDAghINqSB_3baZ~rQ{y`Lk(ctBRVxV~_yW&P%Jb?X7gpE3*p diff --git a/identity/src/test/secp384r1.pk8 b/identity/src/test/secp384r1.pk8 deleted file mode 100644 index 486992f38347aecf8afa89dfd77eecfb52907c52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 185 zcmV;q07m~Xfwlqx05A{+2P%e0&OHJF1_djD1OOrgfu1mdn*sp=1Tb@pT3Sh$R{Oug z;@(v58pbwauz{bf Date: Fri, 5 May 2023 22:01:26 +0800 Subject: [PATCH 79/83] revert RSA changes --- identity/src/error.rs | 16 +++++++++++++++ identity/src/keypair.rs | 41 +++++++++++++++------------------------ identity/src/rsa.rs | 39 +++++++++---------------------------- misc/keygen/src/config.rs | 2 +- 4 files changed, 42 insertions(+), 56 deletions(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index 9ba693b7056..92361f17288 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -76,6 +76,22 @@ impl DecodingError { source: Some(Box::new(source)), } } + + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub(crate) fn decoding_unsupported(key_type: &'static str) -> Self { + Self { + msg: format!("decoding {key_type} key from Protobuf is unsupported"), + source: None, + } + } + + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { + Self { + msg: format!("encoding {key_type} key to Protobuf is unsupported"), + source: None, + } + } } impl fmt::Display for DecodingError { diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 13487f4894a..ec19aa1ff0f 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -20,7 +20,7 @@ use crate::error::{DecodingError, OtherVariantError, SigningError}; use crate::proto; -use quick_protobuf::{BytesReader, Writer}; +use quick_protobuf::{BytesReader, MessageWrite, Writer}; use std::convert::TryFrom; #[cfg(feature = "ed25519")] @@ -236,15 +236,13 @@ impl Keypair { /// Encode a private key as protobuf structure. pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { - Ok(self.encode_protobuf()) + self.encode_protobuf() } /// Encode a private key as protobuf structure. /// /// See for details on the encoding. - pub fn encode_protobuf(&self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn encode_protobuf(&self) -> Result, DecodingError> { #[allow(deprecated)] let pk: proto::PrivateKey = match self { #[cfg(feature = "ed25519")] @@ -253,10 +251,7 @@ impl Keypair { Data: data.to_bytes().to_vec(), }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Self::Rsa(data) => proto::PrivateKey { - Type: proto::KeyType::RSA, - Data: data.encode_pkcs8_der(), - }, + Self::Rsa(_) => return Err(DecodingError::encoding_unsupported("RSA")), #[cfg(feature = "secp256k1")] Self::Secp256k1(data) => proto::PrivateKey { Type: proto::KeyType::Secp256k1, @@ -273,7 +268,7 @@ impl Keypair { let mut writer = Writer::new(&mut buf); pk.write_message(&mut writer).expect("Encoding to succeed"); - buf + Ok(buf) } /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. @@ -302,11 +297,7 @@ impl Keypair { .map(Keypair::Ed25519); Err(DecodingError::missing_feature("ed25519")) } - proto::KeyType::RSA => { - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - return rsa::Keypair::try_decode_pkcs8(&mut private_key.Data).map(Keypair::Rsa); - Err(DecodingError::missing_feature("rsa")) - } + proto::KeyType::RSA => Err(DecodingError::decoding_unsupported("RSA")), proto::KeyType::Secp256k1 => { #[cfg(feature = "secp256k1")] return secp256k1::Keypair::try_from_bytes(&mut private_key.Data) @@ -544,8 +535,6 @@ impl PublicKey { /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn encode_protobuf(&self) -> Vec { - use quick_protobuf::MessageWrite; - let public_key = proto::PublicKey::from(self); let mut buf = Vec::with_capacity(public_key.get_size()); @@ -773,19 +762,21 @@ mod tests { roundtrip_protobuf_encoding(&priv_key, &pub_key); } - #[test] - #[cfg(feature = "rsa")] - fn keypair_protobuf_roundtrip_rsa() { - let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!("080012c81230820944020100300d06092a864886f70d01010105000482092e3082092a02010002820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001028202002699dd6d4c960a68443dea0bb04308b32f37690d2a92ef4c9a8cc9acfba5b6eb9d6b8cf7b701bc1fba032d2216886a725d7e82ca483d8d19e274ba4d23746c3a2b1ae3cc2083ad5ca41ab5d3f9f712858e38284ab7f843d0ba0e015c0ecb3b6df766763632ef6d12d4e3faf73578bebb8c1e88dbf5b7eb73c059eda55a5cb01f349e229af143dc9d832a5cfeb33e6b58f717f8995987f5058d4e7b9f14f390db4e1297feea016eb141ce74ed1e125133db21acb0f1af88a91f0a83ca2fa678fc2fba1743b643a09d38fe1d1102d1eb6639304d61ec7c190c5f6576c5d9a8ccd2198a398ae75333feb51324ffc60b38cb2e90d8a2694b7c0048f47016bb15cb36c482e038e455254e35fc4f0e0babc84e046bd441b0291412c784e4e9639664cad07cb09a01626049cdbfd1d9ad75b314448df811f4988c6e64d93ebefc602b574d0763e31e9d567c891349cfe75f0ca37429b743d6452d1fffc1f9f4901e5f68772b4f24542d654fd29b893e44c85e6037bba304d48873721131f18248b16bd71384abd00f9336c73f071a4ca2456878070f9704ed7df0cd64e5c3e5949a78968525865b96e71d5015dc68bff857f2bba05a3976d83d8866d4dfe8caac144741ae97879a765dc0d4c7c34aa79ef6ebc86b5bf32b50ad995780f5f1a6c052eec5671164f407061a9c6bd49251b1bb7803bb222f5d859c321601236dd893dc9d810282010100cf13fe9908fe59e947122d5606cf9f70c123b7cb43a1916463e729f01dc31c3b70cb6a37bde542ecdc6029cea39b28c99c6395d0aaa29c1c4cf14b3fed9e0fcd793e31b7a09930352261c03b3dc0b66a62f8ae3771b705382cfeb6130d4a7e5b4854117a05767b99915099e2d542fc3fa505a0dbe217b169b46714384774380408bd8b3dbf0c9a177bbd3e64af115988159f485d70c885171007646765b50eb9bbebfabe60e71c69b2b822a124e235ad05f2b55cda9ddc78d671436981a3064a80c29bb37e6b5581a9372a6366c79af695a39ea0f3839ed77ec3985252f2e126955774727955b63ccbeff64208fd7280e8ba52e4297cb6bf72b44b07618923610282010100c184cd27d3a643df768764a7c66de40c222bdb4b7e02c35aa1e4a8377676247c629df58ecb5bb541fb4aac1bde35057b0b266bddd818876909b8fff1aca4859515069258d84b0c5178e4bff6842c68d39cad9a3a03aa6533fa76b92c995f381eb9c83f5e6118fd962807c931b7ca50dc20b261f2a71928f3e882af4da979cef843970cb2af68b86477b92ca90c8c0f1d640d39e943704366314c446f7a54851419e60f4e92e1e69bd52ee7294f9eddc6dc873144b0d0d9f13eb8d6aa955cf11edbd5a0673d8b70ef937e54fdaade185facc8437496d43a53169342280718a3679170ef4a0e582af4db598210fb64616f0d8daa08519d875e37c4d02e1af1c5050282010100c14865648c3b74cac3b698b06a4d130218947131fd9f69e8ed42d0273a706a02a546888f1ce547f173c52260a8dee354436fc45f6f55b626c83e94c147d637e3cede1963cf380d021b64681c2388a3fb6b03b9013157e63c47eb3b214f4f8fdf3e04920775dfe080375da7354d5f67b9341babc87121324c7ac197e2ebf6f36df8868ad8086207d6117e5325812ecd85b2c0e8b7a6d4d33cf28e23ce4ae593a8135ab0c1500b87beb4bd203d8f02c19d0d273cd73d8b094594cb4563ce47cf506d1cb85df28ad6d5de8f0a369bb185d7d1565672deb8a4e37983b1c26d801c5d7a19962c5f4a7c7e04d0a6e77e22aae4ddd54417890dca39aa23d4c03feed4210282010100915975de1c121d9892264f6bd496655ad7afa91ea29ee0ac0a3cfc3bec3600618c90a80780a67915fdf0b0249e59a4ac2e4bc568f30e3966a36ed88e64e58d8fd4230378c7bc569c3af955558b20effb410b0373df9cf4367e40fe04898e0350d0a99f2efc2f1108df3839dda5f5c7960ed8ecc89cc9410131fa364156b1aecab9b992480387dc3759d533be25366d83ddca315d0ad21f4d7a69965d44bc86d7fa3bd9f3624f5a2e6188c1073e4e4cb5389e325b2d93309f0a453ab71548a1b253dbb886d2ab114060bfda864cf853c648b88231e7b7afb70895c272de219b5a06db945f4336e5ccd393ff25522cab220644091a06731361a8f1a28b7ea169210282010100bd80196d3d11a8257b5f439776388f4d53e4da3690f710e9aff3e3e970e545ec92d285e7049da000d5364dd7f550c17cf662d516282fe89813cab322ce5aad5cc744c52a024dd1a94aa9484037281637d1c8e3503b6ed6231225c93f7865d29269c899bbf5d248cf9d41f9aee9b9cb2afac172ba17c2df0699c6604b4ce7ab95c91c5f7fc7804f2bde268a7e15c512920f7325cfba47463da1c201549fc44c2bc4fbe5d8619cde9733470c5e38b996f5c3633c6311af88663ce4d2d0dc415ac5c8258e1aa7659f9f35d4b90b7b9a5a888867d75636e6443cce5391c57d48d56409029edef53e1a5130eb1fa708758bc821e15f7c53edf6d4c6f868a6b5b0c1e6")).unwrap(); - let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("080012a60430820222300d06092a864886f70d01010105000382020f003082020a02820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001")).unwrap(); + // #[test] + // #[cfg(feature = "rsa")] + // fn keypair_protobuf_roundtrip_rsa() { + // let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!("080012c81230820944020100300d06092a864886f70d01010105000482092e3082092a02010002820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001028202002699dd6d4c960a68443dea0bb04308b32f37690d2a92ef4c9a8cc9acfba5b6eb9d6b8cf7b701bc1fba032d2216886a725d7e82ca483d8d19e274ba4d23746c3a2b1ae3cc2083ad5ca41ab5d3f9f712858e38284ab7f843d0ba0e015c0ecb3b6df766763632ef6d12d4e3faf73578bebb8c1e88dbf5b7eb73c059eda55a5cb01f349e229af143dc9d832a5cfeb33e6b58f717f8995987f5058d4e7b9f14f390db4e1297feea016eb141ce74ed1e125133db21acb0f1af88a91f0a83ca2fa678fc2fba1743b643a09d38fe1d1102d1eb6639304d61ec7c190c5f6576c5d9a8ccd2198a398ae75333feb51324ffc60b38cb2e90d8a2694b7c0048f47016bb15cb36c482e038e455254e35fc4f0e0babc84e046bd441b0291412c784e4e9639664cad07cb09a01626049cdbfd1d9ad75b314448df811f4988c6e64d93ebefc602b574d0763e31e9d567c891349cfe75f0ca37429b743d6452d1fffc1f9f4901e5f68772b4f24542d654fd29b893e44c85e6037bba304d48873721131f18248b16bd71384abd00f9336c73f071a4ca2456878070f9704ed7df0cd64e5c3e5949a78968525865b96e71d5015dc68bff857f2bba05a3976d83d8866d4dfe8caac144741ae97879a765dc0d4c7c34aa79ef6ebc86b5bf32b50ad995780f5f1a6c052eec5671164f407061a9c6bd49251b1bb7803bb222f5d859c321601236dd893dc9d810282010100cf13fe9908fe59e947122d5606cf9f70c123b7cb43a1916463e729f01dc31c3b70cb6a37bde542ecdc6029cea39b28c99c6395d0aaa29c1c4cf14b3fed9e0fcd793e31b7a09930352261c03b3dc0b66a62f8ae3771b705382cfeb6130d4a7e5b4854117a05767b99915099e2d542fc3fa505a0dbe217b169b46714384774380408bd8b3dbf0c9a177bbd3e64af115988159f485d70c885171007646765b50eb9bbebfabe60e71c69b2b822a124e235ad05f2b55cda9ddc78d671436981a3064a80c29bb37e6b5581a9372a6366c79af695a39ea0f3839ed77ec3985252f2e126955774727955b63ccbeff64208fd7280e8ba52e4297cb6bf72b44b07618923610282010100c184cd27d3a643df768764a7c66de40c222bdb4b7e02c35aa1e4a8377676247c629df58ecb5bb541fb4aac1bde35057b0b266bddd818876909b8fff1aca4859515069258d84b0c5178e4bff6842c68d39cad9a3a03aa6533fa76b92c995f381eb9c83f5e6118fd962807c931b7ca50dc20b261f2a71928f3e882af4da979cef843970cb2af68b86477b92ca90c8c0f1d640d39e943704366314c446f7a54851419e60f4e92e1e69bd52ee7294f9eddc6dc873144b0d0d9f13eb8d6aa955cf11edbd5a0673d8b70ef937e54fdaade185facc8437496d43a53169342280718a3679170ef4a0e582af4db598210fb64616f0d8daa08519d875e37c4d02e1af1c5050282010100c14865648c3b74cac3b698b06a4d130218947131fd9f69e8ed42d0273a706a02a546888f1ce547f173c52260a8dee354436fc45f6f55b626c83e94c147d637e3cede1963cf380d021b64681c2388a3fb6b03b9013157e63c47eb3b214f4f8fdf3e04920775dfe080375da7354d5f67b9341babc87121324c7ac197e2ebf6f36df8868ad8086207d6117e5325812ecd85b2c0e8b7a6d4d33cf28e23ce4ae593a8135ab0c1500b87beb4bd203d8f02c19d0d273cd73d8b094594cb4563ce47cf506d1cb85df28ad6d5de8f0a369bb185d7d1565672deb8a4e37983b1c26d801c5d7a19962c5f4a7c7e04d0a6e77e22aae4ddd54417890dca39aa23d4c03feed4210282010100915975de1c121d9892264f6bd496655ad7afa91ea29ee0ac0a3cfc3bec3600618c90a80780a67915fdf0b0249e59a4ac2e4bc568f30e3966a36ed88e64e58d8fd4230378c7bc569c3af955558b20effb410b0373df9cf4367e40fe04898e0350d0a99f2efc2f1108df3839dda5f5c7960ed8ecc89cc9410131fa364156b1aecab9b992480387dc3759d533be25366d83ddca315d0ad21f4d7a69965d44bc86d7fa3bd9f3624f5a2e6188c1073e4e4cb5389e325b2d93309f0a453ab71548a1b253dbb886d2ab114060bfda864cf853c648b88231e7b7afb70895c272de219b5a06db945f4336e5ccd393ff25522cab220644091a06731361a8f1a28b7ea169210282010100bd80196d3d11a8257b5f439776388f4d53e4da3690f710e9aff3e3e970e545ec92d285e7049da000d5364dd7f550c17cf662d516282fe89813cab322ce5aad5cc744c52a024dd1a94aa9484037281637d1c8e3503b6ed6231225c93f7865d29269c899bbf5d248cf9d41f9aee9b9cb2afac172ba17c2df0699c6604b4ce7ab95c91c5f7fc7804f2bde268a7e15c512920f7325cfba47463da1c201549fc44c2bc4fbe5d8619cde9733470c5e38b996f5c3633c6311af88663ce4d2d0dc415ac5c8258e1aa7659f9f35d4b90b7b9a5a888867d75636e6443cce5391c57d48d56409029edef53e1a5130eb1fa708758bc821e15f7c53edf6d4c6f868a6b5b0c1e6")).unwrap(); + // let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("080012a60430820222300d06092a864886f70d01010105000382020f003082020a02820201009c897f33e0d0b3297f2fe404ea5b7a98096b329693292aefc2d05ef1e82fd0e121ce74ec77d75ef4b532fa34dee2a19626f3389c6d2bb9b8de614e138302bc4254727a7ee35f7827f1094403bc2fe8e1f64d0e8a2a77e8f3a879f69f94a71f3589de184f5910d6b5270f58e684f71ddd3a3f486a4cb2c390194ee6e9b65f9f1dff7b8f6c0bf4e0c4ac683bd4ba2d2fd022fdaaa3db75e90e16662fc4b3aca4c9aa65514d51690cd372c2b96c61a1ed4f9298ec213d5398aa9120379477118391104deb77ab157a59b70714e95caa9b55d15fa386b0c80f36e50d738bdd10e0baa3c3eafb4703dec3d6a757601f18541eb87ae9111f60eae17d843cf1047dbf5a8982ad9ef0aa88f59b17689f1210a305f7da8a012c1a58e4e82b48811618e98cef13c9eb28ce6fcc589ea5d902149ee4f49f8b39758b349ca90be5a8bddf4a46bacaaa48aec1c0c6e996ab13f2cb351c351d40b0a7b8e0c12b366a8555c392b0aadf71fe746eb4f8ea0b829da6ddcc39081abdd40ea2f3d8778b9a3f06a480ef34234975e919c0d64d818f2e904a9f251c8669dbb1666cb2c28e955446fc7efd460d4677ed922ccff1e24bb5a8699e050075c7897a64daa1bc2f05e4132e76c4f72baea5d073042254236c116ea3e40540bb7986468b4468aadfadad068331ef9dbe13e4012196e8eb9f8cdba096c35f09e80893ea68f3253dc41053983855e50203010001")).unwrap(); - roundtrip_protobuf_encoding(&priv_key, &pub_key); - } + // roundtrip_protobuf_encoding(&priv_key, &pub_key); + // } fn roundtrip_protobuf_encoding(private_key: &Keypair, public_key: &PublicKey) { assert_eq!(&private_key.public(), public_key); - let encoded_priv = private_key.encode_protobuf(); + let encoded_priv = private_key + .encode_protobuf() + .expect("key to support protobuf encoding"); let decoded_priv = Keypair::try_decode_protobuf(&encoded_priv).unwrap(); assert_eq!( diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index 1403baadec4..afc685b19b0 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -31,15 +31,12 @@ use zeroize::Zeroize; /// An RSA keypair. #[derive(Clone)] -pub struct Keypair { - inner: Arc, - key: Vec, -} +pub struct Keypair(Arc); impl std::fmt::Debug for Keypair { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("Keypair") - .field("public", self.inner.public_key()) + .field("public", self.0.public_key()) .finish() } } @@ -58,45 +55,27 @@ impl Keypair { /// format (i.e. unencrypted) as defined in [RFC5208]. /// /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - pub fn try_decode_pkcs8(bytes: &mut [u8]) -> Result { - let kp = RsaKeyPair::from_pkcs8(bytes) + pub fn try_decode_pkcs8(der: &mut [u8]) -> Result { + let kp = RsaKeyPair::from_pkcs8(der) .map_err(|e| DecodingError::failed_to_parse("RSA PKCS#8 PrivateKeyInfo", e))?; - let kp = Keypair { - inner: Arc::new(kp), - key: bytes.to_vec(), - }; - bytes.zeroize(); - Ok(kp) + der.zeroize(); + Ok(Keypair(Arc::new(kp))) } /// Get the public key from the keypair. pub fn public(&self) -> PublicKey { - PublicKey(self.inner.public_key().as_ref().to_vec()) + PublicKey(self.0.public_key().as_ref().to_vec()) } /// Sign a message with this keypair. pub fn sign(&self, data: &[u8]) -> Result, SigningError> { - let mut signature = vec![0; self.inner.public_modulus_len()]; + let mut signature = vec![0; self.0.public_modulus_len()]; let rng = SystemRandom::new(); - match self - .inner - .sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) - { + match self.0.sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) { Ok(()) => Ok(signature), Err(e) => Err(SigningError::new("RSA", Some(Box::new(e)))), } } - - /// Get the byte array used to parse the keypair from. - pub(crate) fn encode_pkcs8_der(&self) -> Vec { - self.key.clone() - } -} - -impl Drop for Keypair { - fn drop(&mut self) { - self.key.zeroize() - } } /// An RSA public key. diff --git a/misc/keygen/src/config.rs b/misc/keygen/src/config.rs index 39377e9e876..8e4e1ce8d54 100644 --- a/misc/keygen/src/config.rs +++ b/misc/keygen/src/config.rs @@ -21,7 +21,7 @@ impl Config { peer_id: PeerId, keypair: &Keypair, ) -> Result> { - let priv_key = BASE64_STANDARD.encode(keypair.encode_protobuf()); + let priv_key = BASE64_STANDARD.encode(keypair.encode_protobuf()?); let peer_id = peer_id.to_base58(); Ok(Self { identity: Identity { peer_id, priv_key }, From b23e111239ed6d7bfd16867a517180c5544557be Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Fri, 5 May 2023 22:21:35 +0800 Subject: [PATCH 80/83] remove feature flag on 'decoding_unsupported' --- identity/src/error.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/identity/src/error.rs b/identity/src/error.rs index 92361f17288..b27582c7f2c 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -77,7 +77,6 @@ impl DecodingError { } } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn decoding_unsupported(key_type: &'static str) -> Self { Self { msg: format!("decoding {key_type} key from Protobuf is unsupported"), From 4d7b51615f9ddbd9c08a3dd8acda1e7c256888a3 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Sat, 6 May 2023 15:55:56 +0800 Subject: [PATCH 81/83] fix: duplicated outdated test --- identity/src/keypair.rs | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index a24407d8e5f..58a019e34e6 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -735,21 +735,6 @@ mod tests { roundtrip_protobuf_encoding(&priv_key, &pub_key); } - #[test] - #[cfg(feature = "secp256k1")] - fn keypair_protobuf_roundtrip_secp256k1() { - let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( - "080212201e4f6a12b43bec6871976295bcb13aace62a7e7b821334125d3ed3b720af419f" - )) - .unwrap(); - let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!( - "0802122102f0a81ddde0a3180610155ff3b2d98d683a6831fad0c84ba36cd49b81eaa7cf8f" - )) - .unwrap(); - - roundtrip_protobuf_encoding(&priv_key, &pub_key); - } - #[test] #[cfg(all(feature = "ecdsa", feature = "peerid"))] fn keypair_protobuf_roundtrip_ecdsa() { @@ -774,7 +759,7 @@ mod tests { #[test] #[cfg(all(feature = "secp256k1", feature = "peerid"))] fn keypair_protobuf_roundtrip_secp256k1() { - let priv_key = Keypair::from_protobuf_encoding(&hex_literal::hex!( + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "0802122053DADF1D5A164D6B4ACDB15E24AA4C5B1D3461BDBD42ABEDB0A4404D56CED8FB" )) .unwrap(); From 11748b5e662f33f040c2e6dc3dd4e263adb95241 Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 27 Dec 2023 16:14:35 +0800 Subject: [PATCH 82/83] upstream sync --- .cargo/config.toml | 3 - .github/ISSUE_TEMPLATE/bug_report.md | 54 - .github/ISSUE_TEMPLATE/bug_report.yml | 55 + .github/ISSUE_TEMPLATE/config.yml | 3 - .github/ISSUE_TEMPLATE/enhancement.md | 22 - .github/ISSUE_TEMPLATE/enhancement.yml | 31 + .github/ISSUE_TEMPLATE/feature_request.md | 34 - .github/ISSUE_TEMPLATE/feature_request.yml | 42 + .../actions/cargo-semver-checks/action.yml | 36 - .github/dependabot.yml | 14 + .github/mergify.yml | 24 +- .github/workflows/cache-factory.yml | 4 +- .github/workflows/cargo-audit.yml | 2 +- .github/workflows/cargo-deny-pr.yml | 28 - .github/workflows/ci.yml | 211 +- .github/workflows/docker-image.yml | 44 + .github/workflows/docs.yml | 6 +- .github/workflows/interop-test.yml | 45 +- .github/workflows/semantic-pull-request.yml | 32 +- CHANGELOG.md | 5 +- CONTRIBUTING.md | 40 + Cargo.lock | 4569 ++++++++++------- Cargo.toml | 125 +- README.md | 22 +- ROADMAP.md | 197 +- SECURITY.md | 2 - clippy.toml | 1 + core/CHANGELOG.md | 56 +- core/Cargo.toml | 35 +- core/src/either.rs | 10 +- core/src/lib.rs | 81 +- core/src/muxing.rs | 3 +- core/src/muxing/singleton.rs | 101 - core/src/peer_record.rs | 5 +- core/src/signed_envelope.rs | 3 +- core/src/transport.rs | 41 +- core/src/transport/and_then.rs | 8 +- core/src/transport/boxed.rs | 24 +- core/src/transport/choice.rs | 66 +- core/src/transport/dummy.rs | 6 +- core/src/transport/global_only.rs | 349 ++ core/src/transport/map.rs | 8 +- core/src/transport/map_err.rs | 10 +- core/src/transport/memory.rs | 74 +- core/src/transport/optional.rs | 8 +- core/src/transport/timeout.rs | 8 +- core/src/transport/upgrade.rs | 68 +- core/src/upgrade.rs | 45 +- core/src/upgrade/apply.rs | 41 +- core/src/upgrade/select.rs | 39 +- core/src/upgrade/transfer.rs | 220 - core/tests/transport_upgrade.rs | 24 +- docs/coding-guidelines.md | 10 +- docs/maintainer-handbook.md | 68 + docs/release.md | 41 +- examples/README.md | 4 +- examples/autonat/Cargo.toml | 17 +- examples/autonat/README.md | 41 + examples/autonat/src/bin/autonat_client.rs | 47 +- examples/autonat/src/bin/autonat_server.rs | 47 +- examples/browser-webrtc/Cargo.toml | 46 + examples/browser-webrtc/README.md | 20 + examples/browser-webrtc/src/lib.rs | 110 + examples/browser-webrtc/src/main.rs | 155 + examples/browser-webrtc/static/index.html | 23 + examples/chat-example/Cargo.toml | 14 - examples/chat-example/src/main.rs | 172 - examples/chat/Cargo.toml | 20 + examples/chat/README.md | 30 + examples/chat/src/main.rs | 134 + examples/dcutr/Cargo.toml | 18 +- examples/dcutr/README.md | 35 + examples/dcutr/src/main.rs | 174 +- .../distributed-key-value-store/Cargo.toml | 16 +- .../distributed-key-value-store/README.md | 42 + .../distributed-key-value-store/src/main.rs | 148 +- examples/file-sharing/Cargo.toml | 23 +- examples/file-sharing/README.md | 72 + examples/file-sharing/src/main.rs | 70 +- examples/file-sharing/src/network.rs | 227 +- examples/identify/Cargo.toml | 14 +- examples/identify/README.md | 23 + examples/identify/src/main.rs | 65 +- examples/ipfs-kad/Cargo.toml | 18 +- examples/ipfs-kad/README.md | 91 + examples/ipfs-kad/src/main.rs | 185 +- examples/ipfs-private/Cargo.toml | 20 +- examples/ipfs-private/README.md | 40 + examples/ipfs-private/src/main.rs | 177 +- examples/metrics/Cargo.toml | 22 +- examples/metrics/README.md | 79 + examples/metrics/docker-compose.yml | 23 + examples/metrics/otel-collector-config.yaml | 25 + examples/metrics/src/http_service.rs | 17 +- examples/metrics/src/main.rs | 141 +- examples/ping-example/Cargo.toml | 13 - examples/ping/Cargo.toml | 19 + examples/ping/README.md | 30 + examples/{ping-example => ping}/src/main.rs | 69 +- examples/relay-server/Cargo.toml | 15 +- examples/relay-server/README.md | 28 + examples/relay-server/src/main.rs | 79 +- examples/rendezvous/Cargo.toml | 16 +- examples/rendezvous/README.md | 51 + examples/rendezvous/src/bin/rzv-discover.rs | 56 +- examples/rendezvous/src/bin/rzv-identify.rs | 78 +- examples/rendezvous/src/bin/rzv-register.rs | 80 +- examples/rendezvous/src/main.rs | 79 +- examples/upnp/Cargo.toml | 18 + examples/upnp/README.md | 23 + examples/upnp/src/main.rs | 74 + hole-punching-tests/Cargo.toml | 18 + hole-punching-tests/Dockerfile | 19 + hole-punching-tests/src/main.rs | 369 ++ hole-punching-tests/version.json | 8 + identity/CHANGELOG.md | 71 +- identity/Cargo.toml | 46 +- identity/src/ecdsa.rs | 31 +- identity/src/ed25519.rs | 130 +- identity/src/error.rs | 22 +- identity/src/keypair.rs | 794 +-- identity/src/keypair_dummy.rs | 93 - identity/src/lib.rs | 26 +- identity/src/peer_id.rs | 93 +- identity/src/rsa.rs | 27 +- identity/src/secp256k1.rs | 70 +- identity/tests/keypair_api.rs | 1 + interop-tests/Cargo.toml | 53 +- interop-tests/Dockerfile | 15 - interop-tests/Dockerfile.chromium | 26 + interop-tests/Dockerfile.native | 21 + interop-tests/README.md | 33 +- interop-tests/chromium-ping-version.json | 11 + ...-version.json => native-ping-version.json} | 4 +- interop-tests/pkg/readme.md | 6 + interop-tests/src/arch.rs | 308 ++ interop-tests/src/bin/config/mod.rs | 44 + interop-tests/src/bin/native_ping.rs | 23 + interop-tests/src/bin/ping.rs | 267 - interop-tests/src/bin/wasm_ping.rs | 239 + interop-tests/src/lib.rs | 274 + libp2p/CHANGELOG.md | 103 +- libp2p/Cargo.toml | 68 +- libp2p/src/bandwidth.rs | 5 + libp2p/src/builder.rs | 603 +++ libp2p/src/builder/phase.rs | 139 + libp2p/src/builder/phase/bandwidth_logging.rs | 88 + libp2p/src/builder/phase/bandwidth_metrics.rs | 69 + libp2p/src/builder/phase/behaviour.rs | 90 + libp2p/src/builder/phase/build.rs | 31 + libp2p/src/builder/phase/dns.rs | 117 + libp2p/src/builder/phase/identity.rs | 21 + libp2p/src/builder/phase/other_transport.rs | 269 + libp2p/src/builder/phase/provider.rs | 46 + libp2p/src/builder/phase/quic.rs | 316 ++ libp2p/src/builder/phase/relay.rs | 143 + libp2p/src/builder/phase/swarm.rs | 60 + libp2p/src/builder/phase/tcp.rs | 251 + libp2p/src/builder/phase/websocket.rs | 229 + libp2p/src/builder/select_muxer.rs | 98 + libp2p/src/builder/select_security.rs | 115 + libp2p/src/lib.rs | 177 +- libp2p/src/transport_ext.rs | 19 +- libp2p/src/tutorials/hole_punching.rs | 31 +- libp2p/src/tutorials/ping.rs | 236 +- misc/allow-block-list/CHANGELOG.md | 5 +- misc/allow-block-list/Cargo.toml | 9 +- misc/allow-block-list/src/lib.rs | 200 +- misc/connection-limits/CHANGELOG.md | 24 +- misc/connection-limits/Cargo.toml | 9 +- misc/connection-limits/src/lib.rs | 232 +- misc/futures-bounded/CHANGELOG.md | 23 + misc/futures-bounded/Cargo.toml | 24 + misc/futures-bounded/src/futures_map.rs | 319 ++ misc/futures-bounded/src/futures_set.rs | 65 + misc/futures-bounded/src/futures_tuple_set.rs | 94 + misc/futures-bounded/src/lib.rs | 46 + misc/futures-bounded/src/stream_map.rs | 362 ++ misc/futures-bounded/src/stream_set.rs | 64 + misc/keygen/Cargo.toml | 14 +- misc/memory-connection-limits/CHANGELOG.md | 6 + misc/memory-connection-limits/Cargo.toml | 29 + misc/memory-connection-limits/src/lib.rs | 225 + .../tests/max_bytes.rs | 96 + .../tests/max_percentage.rs | 98 + misc/memory-connection-limits/tests/util.rs | 124 + misc/metrics/CHANGELOG.md | 49 +- misc/metrics/Cargo.toml | 23 +- misc/metrics/src/bandwidth.rs | 312 ++ misc/metrics/src/dcutr.rs | 19 +- misc/metrics/src/identify.rs | 265 +- misc/metrics/src/kad.rs | 30 +- misc/metrics/src/lib.rs | 16 +- misc/metrics/src/ping.rs | 19 +- misc/metrics/src/relay.rs | 9 +- misc/metrics/src/swarm.rs | 235 +- misc/multiaddr/README.md | 1 - misc/multistream-select/CHANGELOG.md | 7 +- misc/multistream-select/Cargo.toml | 23 +- misc/multistream-select/src/dialer_select.rs | 213 +- .../src/length_delimited.rs | 16 +- misc/multistream-select/src/lib.rs | 20 +- .../multistream-select/src/listener_select.rs | 22 +- misc/multistream-select/src/negotiated.rs | 14 +- misc/multistream-select/src/protocol.rs | 2 +- .../multistream-select/tests/dialer_select.rs | 178 - misc/multistream-select/tests/transport.rs | 108 - misc/quick-protobuf-codec/CHANGELOG.md | 12 +- misc/quick-protobuf-codec/Cargo.toml | 18 +- misc/quick-protobuf-codec/benches/codec.rs | 28 + .../src/generated}/mod.rs | 2 +- .../src/generated/test.proto | 7 + .../src/generated/test.rs | 47 + misc/quick-protobuf-codec/src/lib.rs | 244 +- .../tests/large_message.rs | 16 + misc/quickcheck-ext/Cargo.toml | 6 + misc/rw-stream-sink/CHANGELOG.md | 2 +- misc/rw-stream-sink/Cargo.toml | 7 +- misc/server/CHANGELOG.md | 80 + misc/server/Cargo.toml | 30 + misc/server/Dockerfile | 21 + misc/server/README.md | 41 + misc/server/src/behaviour.rs | 78 + misc/server/src/config.rs | 39 + misc/server/src/http_service.rs | 132 + misc/server/src/main.rs | 193 + misc/webrtc-utils/CHANGELOG.md | 11 + misc/webrtc-utils/Cargo.toml | 34 + misc/webrtc-utils/src/fingerprint.rs | 109 + .../webrtc-utils}/src/generated/message.proto | 0 .../webrtc-utils}/src/generated/mod.rs | 0 .../webrtc-utils}/src/generated/webrtc/mod.rs | 0 .../webrtc-utils}/src/generated/webrtc/pb.rs | 0 misc/webrtc-utils/src/lib.rs | 15 + .../webrtc-utils/src}/noise.rs | 12 +- misc/webrtc-utils/src/sdp.rs | 157 + .../webrtc-utils/src/stream.rs | 79 +- .../webrtc-utils/src/stream}/drop_listener.rs | 47 +- .../webrtc-utils/src/stream}/framed_dc.rs | 27 +- .../webrtc-utils/src/stream}/state.rs | 2 +- misc/webrtc-utils/src/transport.rs | 101 + muxers/mplex/CHANGELOG.md | 7 +- muxers/mplex/Cargo.toml | 22 +- muxers/mplex/benches/split_send_size.rs | 26 +- muxers/mplex/src/codec.rs | 4 +- muxers/mplex/src/io.rs | 269 +- muxers/mplex/src/lib.rs | 8 +- muxers/test-harness/Cargo.toml | 12 +- muxers/test-harness/src/lib.rs | 19 +- muxers/yamux/CHANGELOG.md | 36 +- muxers/yamux/Cargo.toml | 15 +- muxers/yamux/src/lib.rs | 461 +- protocols/autonat/CHANGELOG.md | 8 +- protocols/autonat/Cargo.toml | 15 +- protocols/autonat/src/behaviour.rs | 141 +- protocols/autonat/src/behaviour/as_client.rs | 70 +- protocols/autonat/src/behaviour/as_server.rs | 83 +- protocols/autonat/src/protocol.rs | 134 +- protocols/autonat/tests/test_client.rs | 60 +- protocols/autonat/tests/test_server.rs | 47 +- protocols/dcutr/CHANGELOG.md | 16 +- protocols/dcutr/Cargo.toml | 23 +- protocols/dcutr/src/behaviour.rs | 379 ++ protocols/dcutr/src/behaviour_impl.rs | 452 -- protocols/dcutr/src/handler.rs | 1 - protocols/dcutr/src/handler/direct.rs | 98 - protocols/dcutr/src/handler/relayed.rs | 371 +- protocols/dcutr/src/lib.rs | 10 +- protocols/dcutr/src/protocol/inbound.rs | 155 +- protocols/dcutr/src/protocol/outbound.rs | 155 +- protocols/dcutr/tests/lib.rs | 170 +- protocols/floodsub/CHANGELOG.md | 7 +- protocols/floodsub/Cargo.toml | 16 +- protocols/floodsub/src/layer.rs | 63 +- protocols/floodsub/src/protocol.rs | 7 +- protocols/gossipsub/CHANGELOG.md | 44 +- protocols/gossipsub/Cargo.toml | 53 +- protocols/gossipsub/src/backoff.rs | 9 +- protocols/gossipsub/src/behaviour.rs | 1294 ++--- protocols/gossipsub/src/behaviour/tests.rs | 331 +- protocols/gossipsub/src/config.rs | 73 +- protocols/gossipsub/src/error.rs | 36 +- protocols/gossipsub/src/gossip_promises.rs | 12 +- protocols/gossipsub/src/handler.rs | 251 +- protocols/gossipsub/src/lib.rs | 58 +- protocols/gossipsub/src/mcache.rs | 11 +- protocols/gossipsub/src/peer_score.rs | 107 +- protocols/gossipsub/src/protocol.rs | 89 +- .../gossipsub/src/subscription_filter.rs | 5 +- protocols/gossipsub/src/time_cache.rs | 13 +- protocols/gossipsub/src/types.rs | 199 +- protocols/gossipsub/tests/smoke.rs | 11 +- protocols/identify/CHANGELOG.md | 44 +- protocols/identify/Cargo.toml | 27 +- protocols/identify/src/behaviour.rs | 541 +- protocols/identify/src/handler.rs | 420 +- protocols/identify/src/protocol.rs | 384 +- protocols/identify/src/structs.rs | 67 - protocols/identify/tests/smoke.rs | 302 ++ protocols/kad/CHANGELOG.md | 108 +- protocols/kad/Cargo.toml | 34 +- protocols/kad/src/addresses.rs | 76 +- protocols/kad/src/behaviour.rs | 1102 ++-- protocols/kad/src/behaviour/test.rs | 212 +- .../kad/src/{handler_priv.rs => handler.rs} | 900 ++-- protocols/kad/src/jobs.rs | 18 +- .../kad/src/{kbucket_priv.rs => kbucket.rs} | 28 +- .../src/{kbucket_priv => kbucket}/bucket.rs | 28 +- .../src/{kbucket_priv => kbucket}/entry.rs | 60 +- .../kad/src/{kbucket_priv => kbucket}/key.rs | 19 +- protocols/kad/src/lib.rs | 57 +- .../kad/src/{protocol_priv.rs => protocol.rs} | 275 +- protocols/kad/src/query.rs | 25 +- protocols/kad/src/query/peers/closest.rs | 20 +- .../kad/src/query/peers/closest/disjoint.rs | 14 +- protocols/kad/src/query/peers/fixed.rs | 4 - .../kad/src/{record_priv.rs => record.rs} | 6 +- .../kad/src/{record_priv => record}/store.rs | 0 .../{record_priv => record}/store/memory.rs | 20 +- protocols/kad/tests/client_mode.rs | 186 + protocols/mdns/CHANGELOG.md | 18 +- protocols/mdns/Cargo.toml | 32 +- protocols/mdns/src/behaviour.rs | 157 +- protocols/mdns/src/behaviour/iface.rs | 190 +- protocols/mdns/src/behaviour/iface/dns.rs | 4 +- protocols/mdns/src/behaviour/iface/query.rs | 56 +- protocols/mdns/tests/use-async-std.rs | 32 +- protocols/mdns/tests/use-tokio.rs | 30 +- protocols/perf/CHANGELOG.md | 8 +- protocols/perf/Cargo.toml | 36 +- protocols/perf/Dockerfile | 10 +- protocols/perf/src/bin/perf-client.rs | 140 - protocols/perf/src/bin/perf-server.rs | 128 - protocols/perf/src/bin/perf.rs | 294 ++ protocols/perf/src/client.rs | 33 +- protocols/perf/src/client/behaviour.rs | 49 +- protocols/perf/src/client/handler.rs | 90 +- protocols/perf/src/lib.rs | 126 + protocols/perf/src/protocol.rs | 229 +- protocols/perf/src/server.rs | 24 - protocols/perf/src/server/behaviour.rs | 34 +- protocols/perf/src/server/handler.rs | 99 +- protocols/perf/tests/lib.rs | 17 +- protocols/ping/CHANGELOG.md | 22 +- protocols/ping/Cargo.toml | 17 +- protocols/ping/src/handler.rs | 211 +- protocols/ping/src/lib.rs | 102 +- protocols/ping/src/protocol.rs | 7 +- protocols/ping/tests/ping.rs | 101 +- protocols/relay/CHANGELOG.md | 56 +- protocols/relay/Cargo.toml | 25 +- protocols/relay/src/behaviour.rs | 345 +- protocols/relay/src/behaviour/handler.rs | 640 ++- protocols/relay/src/behaviour/rate_limiter.rs | 12 +- protocols/relay/src/lib.rs | 15 +- protocols/relay/src/priv_client.rs | 182 +- protocols/relay/src/priv_client/handler.rs | 787 ++- protocols/relay/src/priv_client/transport.rs | 60 +- protocols/relay/src/protocol.rs | 12 +- protocols/relay/src/protocol/inbound_hop.rs | 165 +- protocols/relay/src/protocol/inbound_stop.rs | 119 +- protocols/relay/src/protocol/outbound_hop.rs | 443 +- protocols/relay/src/protocol/outbound_stop.rs | 234 +- protocols/relay/src/v2.rs | 163 - protocols/relay/tests/lib.rs | 245 +- protocols/rendezvous/CHANGELOG.md | 20 +- protocols/rendezvous/Cargo.toml | 23 +- protocols/rendezvous/src/client.rs | 487 +- protocols/rendezvous/src/codec.rs | 130 +- protocols/rendezvous/src/handler.rs | 50 - protocols/rendezvous/src/handler/inbound.rs | 192 - protocols/rendezvous/src/handler/outbound.rs | 134 - protocols/rendezvous/src/lib.rs | 10 +- protocols/rendezvous/src/server.rs | 365 +- protocols/rendezvous/src/substream_handler.rs | 559 -- protocols/rendezvous/tests/rendezvous.rs | 163 +- protocols/request-response/CHANGELOG.md | 54 +- protocols/request-response/Cargo.toml | 29 +- protocols/request-response/src/cbor.rs | 228 + protocols/request-response/src/handler.rs | 428 +- .../request-response/src/handler/protocol.rs | 127 +- protocols/request-response/src/json.rs | 202 + protocols/request-response/src/lib.rs | 417 +- .../request-response/tests/error_reporting.rs | 568 ++ protocols/request-response/tests/ping.rs | 137 +- protocols/upnp/CHANGELOG.md | 14 + .../wasm-ext => protocols/upnp}/Cargo.toml | 29 +- protocols/upnp/src/behaviour.rs | 547 ++ protocols/upnp/src/lib.rs | 37 + protocols/upnp/src/tokio.rs | 169 + scripts/add-changelog-header.sh | 10 + scripts/build-interop-image.sh | 18 + scripts/ensure-version-bump-and-changelog.sh | 28 + scripts/list-external-contributors.sh | 11 + swarm-derive/CHANGELOG.md | 23 +- swarm-derive/Cargo.toml | 8 +- swarm-derive/src/lib.rs | 551 +- swarm-derive/src/syn_ext.rs | 16 + swarm-test/CHANGELOG.md | 5 +- swarm-test/Cargo.toml | 17 +- swarm-test/src/lib.rs | 243 +- swarm/CHANGELOG.md | 173 +- swarm/Cargo.toml | 47 +- swarm/src/behaviour.rs | 408 +- swarm/src/behaviour/either.rs | 23 +- swarm/src/behaviour/external_addresses.rs | 158 +- swarm/src/behaviour/listen_addresses.rs | 19 +- swarm/src/behaviour/toggle.rs | 93 +- swarm/src/connection.rs | 746 ++- swarm/src/connection/error.rs | 40 +- swarm/src/connection/pool.rs | 316 +- swarm/src/connection/pool/task.rs | 39 +- swarm/src/connection/supported_protocols.rs | 88 + swarm/src/dial_opts.rs | 66 +- swarm/src/dummy.rs | 63 +- swarm/src/handler.rs | 494 +- swarm/src/handler/either.rs | 167 +- swarm/src/handler/map_in.rs | 23 +- swarm/src/handler/map_out.rs | 38 +- swarm/src/handler/multi.rs | 267 +- swarm/src/handler/one_shot.rs | 74 +- swarm/src/handler/pending.rs | 24 +- swarm/src/handler/select.rs | 268 +- swarm/src/keep_alive.rs | 142 - swarm/src/lib.rs | 1598 ++---- swarm/src/listen_opts.rs | 33 + swarm/src/registry.rs | 504 -- swarm/src/stream.rs | 98 + swarm/src/stream_protocol.rs | 30 +- swarm/src/test.rs | 126 +- swarm/src/upgrade.rs | 22 +- swarm/tests/connection_close.rs | 146 + swarm/tests/listener.rs | 139 + swarm/tests/swarm_derive.rs | 165 +- swarm/tests/ui/fail/prelude_not_string.rs | 11 + swarm/tests/ui/fail/prelude_not_string.stderr | 5 + swarm/tests/ui/fail/to_swarm_not_string.rs | 19 + .../tests/ui/fail/to_swarm_not_string.stderr | 5 + transports/deflate/CHANGELOG.md | 106 - transports/deflate/Cargo.toml | 30 - transports/deflate/src/lib.rs | 275 - transports/deflate/tests/test.rs | 80 - transports/dns/CHANGELOG.md | 26 +- transports/dns/Cargo.toml | 28 +- transports/dns/src/lib.rs | 368 +- transports/noise/CHANGELOG.md | 22 +- transports/noise/Cargo.toml | 31 +- transports/noise/src/generated/payload.proto | 8 +- .../noise/src/generated/payload/proto.rs | 44 +- transports/noise/src/io.rs | 20 +- transports/noise/src/io/framed.rs | 552 +- transports/noise/src/io/handshake.rs | 293 +- transports/noise/src/lib.rs | 698 +-- transports/noise/src/protocol.rs | 289 +- transports/noise/src/protocol/x25519.rs | 296 -- transports/noise/src/protocol/x25519_spec.rs | 200 - transports/noise/tests/smoke.rs | 17 +- .../noise/tests/webtransport_certhashes.rs | 156 + transports/plaintext/CHANGELOG.md | 19 +- transports/plaintext/Cargo.toml | 20 +- transports/plaintext/src/error.rs | 58 +- transports/plaintext/src/handshake.rs | 124 +- transports/plaintext/src/lib.rs | 113 +- transports/plaintext/tests/smoke.rs | 29 +- transports/pnet/CHANGELOG.md | 13 +- transports/pnet/Cargo.toml | 17 +- transports/pnet/src/crypt_writer.rs | 3 +- transports/pnet/src/lib.rs | 10 +- transports/pnet/tests/smoke.rs | 10 +- transports/quic/CHANGELOG.md | 58 +- transports/quic/Cargo.toml | 34 +- transports/quic/src/config.rs | 165 + transports/quic/src/connection.rs | 422 +- transports/quic/src/connection/connecting.rs | 81 +- transports/quic/src/connection/stream.rs | 86 + transports/quic/src/connection/substream.rs | 257 - transports/quic/src/endpoint.rs | 667 --- transports/quic/src/hole_punching.rs | 44 + transports/quic/src/lib.rs | 31 +- transports/quic/src/provider.rs | 55 +- transports/quic/src/provider/async_std.rs | 119 +- transports/quic/src/provider/tokio.rs | 74 +- transports/quic/src/transport.rs | 668 +-- transports/quic/tests/smoke.rs | 165 +- transports/quic/tests/stream_compliance.rs | 6 +- transports/tcp/CHANGELOG.md | 13 +- transports/tcp/Cargo.toml | 25 +- transports/tcp/src/lib.rs | 167 +- transports/tcp/src/provider/async_io.rs | 9 +- transports/tcp/src/provider/tokio.rs | 4 +- transports/tls/CHANGELOG.md | 16 +- transports/tls/Cargo.toml | 25 +- transports/tls/src/certificate.rs | 6 +- transports/tls/src/lib.rs | 2 +- transports/tls/src/upgrade.rs | 15 +- transports/tls/src/verifier.rs | 25 +- transports/tls/tests/smoke.rs | 11 +- transports/uds/CHANGELOG.md | 5 +- transports/uds/Cargo.toml | 13 +- transports/uds/src/lib.rs | 19 +- transports/wasm-ext/CHANGELOG.md | 118 - transports/wasm-ext/src/lib.rs | 652 --- transports/wasm-ext/src/websockets.js | 174 - transports/webrtc-websys/CHANGELOG.md | 18 + transports/webrtc-websys/Cargo.toml | 31 + transports/webrtc-websys/README.md | 9 + transports/webrtc-websys/src/connection.rs | 307 ++ transports/webrtc-websys/src/error.rs | 62 + transports/webrtc-websys/src/lib.rs | 13 + transports/webrtc-websys/src/sdp.rs | 55 + transports/webrtc-websys/src/stream.rs | 61 + .../src/stream/poll_data_channel.rs | 242 + transports/webrtc-websys/src/transport.rs | 140 + transports/webrtc-websys/src/upgrade.rs | 59 + transports/webrtc/CHANGELOG.md | 21 +- transports/webrtc/Cargo.toml | 45 +- transports/webrtc/examples/listen_ping.rs | 66 - transports/webrtc/src/lib.rs | 6 +- transports/webrtc/src/tokio/certificate.rs | 18 +- transports/webrtc/src/tokio/connection.rs | 50 +- transports/webrtc/src/tokio/fingerprint.rs | 61 +- transports/webrtc/src/tokio/mod.rs | 2 +- transports/webrtc/src/tokio/sdp.rs | 134 +- transports/webrtc/src/tokio/stream.rs | 80 + transports/webrtc/src/tokio/transport.rs | 145 +- transports/webrtc/src/tokio/udp_mux.rs | 38 +- transports/webrtc/src/tokio/upgrade.rs | 45 +- transports/webrtc/tests/smoke.rs | 33 +- transports/websocket-websys/CHANGELOG.md | 16 + transports/websocket-websys/Cargo.toml | 35 + transports/websocket-websys/src/lib.rs | 450 ++ .../websocket-websys/src/web_context.rs | 57 + transports/websocket/CHANGELOG.md | 19 +- transports/websocket/Cargo.toml | 24 +- transports/websocket/src/framed.rs | 101 +- transports/websocket/src/lib.rs | 39 +- transports/websocket/src/quicksink.rs | 350 ++ transports/websocket/src/tls.rs | 2 +- transports/webtransport-websys/CHANGELOG.md | 8 + transports/webtransport-websys/Cargo.toml | 51 + .../webtransport-websys/src/bindings.rs | 141 + .../webtransport-websys/src/connection.rs | 209 + .../webtransport-websys/src/endpoint.rs | 227 + transports/webtransport-websys/src/error.rs | 36 + .../src/fused_js_promise.rs | 58 + transports/webtransport-websys/src/lib.rs | 15 + transports/webtransport-websys/src/stream.rs | 228 + .../webtransport-websys/src/transport.rs | 103 + transports/webtransport-websys/src/utils.rs | 76 + wasm-tests/README.md | 11 + wasm-tests/run-all.sh | 7 + wasm-tests/webtransport-tests/Cargo.toml | 26 + wasm-tests/webtransport-tests/README.md | 27 + .../webtransport-tests/echo-server/.gitignore | 1 + .../webtransport-tests/echo-server/Dockerfile | 9 + .../webtransport-tests/echo-server/go.mod | 64 + .../webtransport-tests/echo-server/go.sum | 344 ++ .../webtransport-tests/echo-server/main.go | 99 + wasm-tests/webtransport-tests/run.sh | 28 + wasm-tests/webtransport-tests/src/lib.rs | 384 ++ 560 files changed, 37159 insertions(+), 27659 deletions(-) delete mode 100644 .cargo/config.toml delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml delete mode 100644 .github/ISSUE_TEMPLATE/enhancement.md create mode 100644 .github/ISSUE_TEMPLATE/enhancement.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml delete mode 100644 .github/actions/cargo-semver-checks/action.yml delete mode 100644 .github/workflows/cargo-deny-pr.yml create mode 100644 .github/workflows/docker-image.yml create mode 100644 CONTRIBUTING.md delete mode 100644 core/src/muxing/singleton.rs create mode 100644 core/src/transport/global_only.rs delete mode 100644 core/src/upgrade/transfer.rs create mode 100644 docs/maintainer-handbook.md create mode 100644 examples/autonat/README.md create mode 100644 examples/browser-webrtc/Cargo.toml create mode 100644 examples/browser-webrtc/README.md create mode 100644 examples/browser-webrtc/src/lib.rs create mode 100644 examples/browser-webrtc/src/main.rs create mode 100644 examples/browser-webrtc/static/index.html delete mode 100644 examples/chat-example/Cargo.toml delete mode 100644 examples/chat-example/src/main.rs create mode 100644 examples/chat/Cargo.toml create mode 100644 examples/chat/README.md create mode 100644 examples/chat/src/main.rs create mode 100644 examples/dcutr/README.md create mode 100644 examples/distributed-key-value-store/README.md create mode 100644 examples/file-sharing/README.md create mode 100644 examples/identify/README.md create mode 100644 examples/ipfs-kad/README.md create mode 100644 examples/ipfs-private/README.md create mode 100644 examples/metrics/README.md create mode 100644 examples/metrics/docker-compose.yml create mode 100644 examples/metrics/otel-collector-config.yaml delete mode 100644 examples/ping-example/Cargo.toml create mode 100644 examples/ping/Cargo.toml create mode 100644 examples/ping/README.md rename examples/{ping-example => ping}/src/main.rs (54%) create mode 100644 examples/relay-server/README.md create mode 100644 examples/rendezvous/README.md create mode 100644 examples/upnp/Cargo.toml create mode 100644 examples/upnp/README.md create mode 100644 examples/upnp/src/main.rs create mode 100644 hole-punching-tests/Cargo.toml create mode 100644 hole-punching-tests/Dockerfile create mode 100644 hole-punching-tests/src/main.rs create mode 100644 hole-punching-tests/version.json delete mode 100644 identity/src/keypair_dummy.rs delete mode 100644 interop-tests/Dockerfile create mode 100644 interop-tests/Dockerfile.chromium create mode 100644 interop-tests/Dockerfile.native create mode 100644 interop-tests/chromium-ping-version.json rename interop-tests/{ping-version.json => native-ping-version.json} (67%) create mode 100644 interop-tests/pkg/readme.md create mode 100644 interop-tests/src/arch.rs create mode 100644 interop-tests/src/bin/config/mod.rs create mode 100644 interop-tests/src/bin/native_ping.rs delete mode 100644 interop-tests/src/bin/ping.rs create mode 100644 interop-tests/src/bin/wasm_ping.rs create mode 100644 interop-tests/src/lib.rs create mode 100644 libp2p/src/builder.rs create mode 100644 libp2p/src/builder/phase.rs create mode 100644 libp2p/src/builder/phase/bandwidth_logging.rs create mode 100644 libp2p/src/builder/phase/bandwidth_metrics.rs create mode 100644 libp2p/src/builder/phase/behaviour.rs create mode 100644 libp2p/src/builder/phase/build.rs create mode 100644 libp2p/src/builder/phase/dns.rs create mode 100644 libp2p/src/builder/phase/identity.rs create mode 100644 libp2p/src/builder/phase/other_transport.rs create mode 100644 libp2p/src/builder/phase/provider.rs create mode 100644 libp2p/src/builder/phase/quic.rs create mode 100644 libp2p/src/builder/phase/relay.rs create mode 100644 libp2p/src/builder/phase/swarm.rs create mode 100644 libp2p/src/builder/phase/tcp.rs create mode 100644 libp2p/src/builder/phase/websocket.rs create mode 100644 libp2p/src/builder/select_muxer.rs create mode 100644 libp2p/src/builder/select_security.rs create mode 100644 misc/futures-bounded/CHANGELOG.md create mode 100644 misc/futures-bounded/Cargo.toml create mode 100644 misc/futures-bounded/src/futures_map.rs create mode 100644 misc/futures-bounded/src/futures_set.rs create mode 100644 misc/futures-bounded/src/futures_tuple_set.rs create mode 100644 misc/futures-bounded/src/lib.rs create mode 100644 misc/futures-bounded/src/stream_map.rs create mode 100644 misc/futures-bounded/src/stream_set.rs create mode 100644 misc/memory-connection-limits/CHANGELOG.md create mode 100644 misc/memory-connection-limits/Cargo.toml create mode 100644 misc/memory-connection-limits/src/lib.rs create mode 100644 misc/memory-connection-limits/tests/max_bytes.rs create mode 100644 misc/memory-connection-limits/tests/max_percentage.rs create mode 100644 misc/memory-connection-limits/tests/util.rs create mode 100644 misc/metrics/src/bandwidth.rs delete mode 100644 misc/multiaddr/README.md delete mode 100644 misc/multistream-select/tests/dialer_select.rs delete mode 100644 misc/multistream-select/tests/transport.rs create mode 100644 misc/quick-protobuf-codec/benches/codec.rs rename {protocols/identify/src => misc/quick-protobuf-codec/src/generated}/mod.rs (66%) create mode 100644 misc/quick-protobuf-codec/src/generated/test.proto create mode 100644 misc/quick-protobuf-codec/src/generated/test.rs create mode 100644 misc/quick-protobuf-codec/tests/large_message.rs create mode 100644 misc/server/CHANGELOG.md create mode 100644 misc/server/Cargo.toml create mode 100644 misc/server/Dockerfile create mode 100644 misc/server/README.md create mode 100644 misc/server/src/behaviour.rs create mode 100644 misc/server/src/config.rs create mode 100644 misc/server/src/http_service.rs create mode 100644 misc/server/src/main.rs create mode 100644 misc/webrtc-utils/CHANGELOG.md create mode 100644 misc/webrtc-utils/Cargo.toml create mode 100644 misc/webrtc-utils/src/fingerprint.rs rename {transports/webrtc => misc/webrtc-utils}/src/generated/message.proto (100%) rename {transports/webrtc => misc/webrtc-utils}/src/generated/mod.rs (100%) rename {transports/webrtc => misc/webrtc-utils}/src/generated/webrtc/mod.rs (100%) rename {transports/webrtc => misc/webrtc-utils}/src/generated/webrtc/pb.rs (100%) create mode 100644 misc/webrtc-utils/src/lib.rs rename {transports/webrtc/src/tokio/upgrade => misc/webrtc-utils/src}/noise.rs (94%) create mode 100644 misc/webrtc-utils/src/sdp.rs rename transports/webrtc/src/tokio/substream.rs => misc/webrtc-utils/src/stream.rs (82%) rename {transports/webrtc/src/tokio/substream => misc/webrtc-utils/src/stream}/drop_listener.rs (77%) rename {transports/webrtc/src/tokio/substream => misc/webrtc-utils/src/stream}/framed_dc.rs (69%) rename {transports/webrtc/src/tokio/substream => misc/webrtc-utils/src/stream}/state.rs (99%) create mode 100644 misc/webrtc-utils/src/transport.rs create mode 100644 protocols/dcutr/src/behaviour.rs delete mode 100644 protocols/dcutr/src/behaviour_impl.rs delete mode 100644 protocols/dcutr/src/handler/direct.rs delete mode 100644 protocols/identify/src/structs.rs create mode 100644 protocols/identify/tests/smoke.rs rename protocols/kad/src/{handler_priv.rs => handler.rs} (53%) rename protocols/kad/src/{kbucket_priv.rs => kbucket.rs} (96%) rename protocols/kad/src/{kbucket_priv => kbucket}/bucket.rs (97%) rename protocols/kad/src/{kbucket_priv => kbucket}/entry.rs (81%) rename protocols/kad/src/{kbucket_priv => kbucket}/key.rs (94%) rename protocols/kad/src/{protocol_priv.rs => protocol.rs} (77%) rename protocols/kad/src/{record_priv.rs => record.rs} (97%) rename protocols/kad/src/{record_priv => record}/store.rs (100%) rename protocols/kad/src/{record_priv => record}/store/memory.rs (94%) create mode 100644 protocols/kad/tests/client_mode.rs delete mode 100644 protocols/perf/src/bin/perf-client.rs delete mode 100644 protocols/perf/src/bin/perf-server.rs create mode 100644 protocols/perf/src/bin/perf.rs delete mode 100644 protocols/relay/src/v2.rs delete mode 100644 protocols/rendezvous/src/handler.rs delete mode 100644 protocols/rendezvous/src/handler/inbound.rs delete mode 100644 protocols/rendezvous/src/handler/outbound.rs delete mode 100644 protocols/rendezvous/src/substream_handler.rs create mode 100644 protocols/request-response/src/cbor.rs create mode 100644 protocols/request-response/src/json.rs create mode 100644 protocols/request-response/tests/error_reporting.rs create mode 100644 protocols/upnp/CHANGELOG.md rename {transports/wasm-ext => protocols/upnp}/Cargo.toml (53%) create mode 100644 protocols/upnp/src/behaviour.rs create mode 100644 protocols/upnp/src/lib.rs create mode 100644 protocols/upnp/src/tokio.rs create mode 100755 scripts/add-changelog-header.sh create mode 100755 scripts/build-interop-image.sh create mode 100755 scripts/ensure-version-bump-and-changelog.sh create mode 100755 scripts/list-external-contributors.sh create mode 100644 swarm-derive/src/syn_ext.rs create mode 100644 swarm/src/connection/supported_protocols.rs delete mode 100644 swarm/src/keep_alive.rs create mode 100644 swarm/src/listen_opts.rs delete mode 100644 swarm/src/registry.rs create mode 100644 swarm/src/stream.rs create mode 100644 swarm/tests/connection_close.rs create mode 100644 swarm/tests/listener.rs create mode 100644 swarm/tests/ui/fail/prelude_not_string.rs create mode 100644 swarm/tests/ui/fail/prelude_not_string.stderr create mode 100644 swarm/tests/ui/fail/to_swarm_not_string.rs create mode 100644 swarm/tests/ui/fail/to_swarm_not_string.stderr delete mode 100644 transports/deflate/CHANGELOG.md delete mode 100644 transports/deflate/Cargo.toml delete mode 100644 transports/deflate/src/lib.rs delete mode 100644 transports/deflate/tests/test.rs delete mode 100644 transports/noise/src/protocol/x25519.rs delete mode 100644 transports/noise/src/protocol/x25519_spec.rs create mode 100644 transports/noise/tests/webtransport_certhashes.rs create mode 100644 transports/quic/src/config.rs create mode 100644 transports/quic/src/connection/stream.rs delete mode 100644 transports/quic/src/connection/substream.rs delete mode 100644 transports/quic/src/endpoint.rs create mode 100644 transports/quic/src/hole_punching.rs delete mode 100644 transports/wasm-ext/CHANGELOG.md delete mode 100644 transports/wasm-ext/src/lib.rs delete mode 100644 transports/wasm-ext/src/websockets.js create mode 100644 transports/webrtc-websys/CHANGELOG.md create mode 100644 transports/webrtc-websys/Cargo.toml create mode 100644 transports/webrtc-websys/README.md create mode 100644 transports/webrtc-websys/src/connection.rs create mode 100644 transports/webrtc-websys/src/error.rs create mode 100644 transports/webrtc-websys/src/lib.rs create mode 100644 transports/webrtc-websys/src/sdp.rs create mode 100644 transports/webrtc-websys/src/stream.rs create mode 100644 transports/webrtc-websys/src/stream/poll_data_channel.rs create mode 100644 transports/webrtc-websys/src/transport.rs create mode 100644 transports/webrtc-websys/src/upgrade.rs delete mode 100644 transports/webrtc/examples/listen_ping.rs create mode 100644 transports/webrtc/src/tokio/stream.rs create mode 100644 transports/websocket-websys/CHANGELOG.md create mode 100644 transports/websocket-websys/Cargo.toml create mode 100644 transports/websocket-websys/src/lib.rs create mode 100644 transports/websocket-websys/src/web_context.rs create mode 100644 transports/websocket/src/quicksink.rs create mode 100644 transports/webtransport-websys/CHANGELOG.md create mode 100644 transports/webtransport-websys/Cargo.toml create mode 100644 transports/webtransport-websys/src/bindings.rs create mode 100644 transports/webtransport-websys/src/connection.rs create mode 100644 transports/webtransport-websys/src/endpoint.rs create mode 100644 transports/webtransport-websys/src/error.rs create mode 100644 transports/webtransport-websys/src/fused_js_promise.rs create mode 100644 transports/webtransport-websys/src/lib.rs create mode 100644 transports/webtransport-websys/src/stream.rs create mode 100644 transports/webtransport-websys/src/transport.rs create mode 100644 transports/webtransport-websys/src/utils.rs create mode 100644 wasm-tests/README.md create mode 100755 wasm-tests/run-all.sh create mode 100644 wasm-tests/webtransport-tests/Cargo.toml create mode 100644 wasm-tests/webtransport-tests/README.md create mode 100644 wasm-tests/webtransport-tests/echo-server/.gitignore create mode 100644 wasm-tests/webtransport-tests/echo-server/Dockerfile create mode 100644 wasm-tests/webtransport-tests/echo-server/go.mod create mode 100644 wasm-tests/webtransport-tests/echo-server/go.sum create mode 100644 wasm-tests/webtransport-tests/echo-server/main.go create mode 100755 wasm-tests/webtransport-tests/run.sh create mode 100644 wasm-tests/webtransport-tests/src/lib.rs diff --git a/.cargo/config.toml b/.cargo/config.toml deleted file mode 100644 index e55adcc9ed1..00000000000 --- a/.cargo/config.toml +++ /dev/null @@ -1,3 +0,0 @@ -[alias] -# Temporary solution to have clippy config in a single place until https://github.com/rust-lang/rust-clippy/blob/master/doc/roadmap-2021.md#lintstoml-configuration is shipped. -custom-clippy = "clippy --workspace --all-features --all-targets -- -A clippy::type_complexity -A clippy::pedantic -W clippy::used_underscore_binding -W unreachable_pub -D warnings" diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index b225bdd9a1c..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -name: Bug Report -about: Create a bug report for rust-libp2p. ---- - - - - - -## Summary - - - -## Expected behaviour - - - -## Actual behaviour - - - - -
Debug Output -

- -``` - -``` -

-
- -## Possible Solution - - -## Version - - -- libp2p version (version number, commit, or branch): - - -## Would you like to work on fixing this bug? - - - -Yes / No / Maybe. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000000..1a531e3646c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,55 @@ +name: Bug Report +description: Create a bug report for rust-libp2p + +body: + - type: markdown + attributes: + value: | + Thank you for filing a bug report! + - type: textarea + attributes: + label: Summary + description: Please provide a short summary of the bug, along with any information you feel relevant to replicate the bug. + validations: + required: true + - type: textarea + attributes: + label: Expected behavior + description: Describe what you expect to happen. + validations: + required: true + - type: textarea + attributes: + label: Actual behavior + description: Describe what actually happens. + validations: + required: true + - type: textarea + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: false + - type: textarea + attributes: + label: Possible Solution + description: Suggest a fix/reason for the bug, or ideas how to implement the addition or change. + validations: + required: false + - type: textarea + attributes: + label: Version + description: Which version of libp2p are you using? libp2p version (version number, commit, or branch) + validations: + required: false + - type: dropdown + attributes: + label: Would you like to work on fixing this bug ? + description: Any contribution towards fixing the bug is greatly appreciated. We are more than happy to provide help on the process. + options: + - "Yes" + - "No" + - Maybe + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 5fdf7ed9da4..29c54c335a4 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: true contact_links: - - name: Report a vulnerability - url: https://github.com/libp2p/rust-libp2p/security/advisories/new - about: For security related issues please file a private security vulnerability report. - name: Question url: https://github.com/libp2p/rust-libp2p/discussions/new?category=q-a about: Please ask questions in the rust-libp2p GitHub Discussions forum. diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md deleted file mode 100644 index b3a3dd30115..00000000000 --- a/.github/ISSUE_TEMPLATE/enhancement.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: Enhancement -about: Suggest an improvement to an existing rust-libp2p feature. ---- - -## Description - - - -## Motivation - - - -## Current Implementation - - - -## Are you planning to do it yourself in a pull request? - - - -Yes / No / Maybe. diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml new file mode 100644 index 00000000000..ed7aeb644b3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -0,0 +1,31 @@ +name: Enhancement +description: Suggest an improvement to an existing rust-libp2p feature. +body: + - type: textarea + attributes: + label: Description + description: Describe the enhancement that you are proposing. + validations: + required: true + - type: textarea + attributes: + label: Motivation + description: Explain why this enhancement is beneficial. + validations: + required: true + - type: textarea + attributes: + label: Current Implementation + description: Describe the current implementation. + validations: + required: true + - type: dropdown + attributes: + label: Are you planning to do it yourself in a pull request ? + description: Any contribution is greatly appreciated. We are more than happy to provide help on the process. + options: + - "Yes" + - "No" + - Maybe + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 822aa063147..00000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -name: Feature request -about: Suggest a new feature in rust-libp2p. ---- - - - -## Description - - - -## Motivation - - - -## Requirements - - - -1. -2. -3. - -## Open questions - - - -## Are you planning to do it yourself in a pull request? - - - -Yes / No / Maybe. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000000..6fa3e638be8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,42 @@ +name: Feature request +description: Suggest a new feature in rust-libp2p +body: + - type: markdown + attributes: + value: | + If you'd like to suggest a feature related to libp2p but not specifically related to the rust implementation, please file an issue at https://github.com/libp2p/specs instead. + - type: textarea + attributes: + label: Description + description: Briefly describe the feature that you are requesting. + validations: + required: true + - type: textarea + attributes: + label: Motivation + description: Explain why this feature is needed. + validations: + required: true + - type: textarea + attributes: + label: Requirements + description: Write a list of what you want this feature to do. + placeholder: "1." + validations: + required: true + - type: textarea + attributes: + label: Open questions + description: Use this section to ask any questions that are related to the feature. + validations: + required: false + - type: dropdown + attributes: + label: Are you planning to do it yourself in a pull request ? + description: Any contribution is greatly appreciated. We are more than happy to provide help on the process. + options: + - "Yes" + - "No" + - Maybe + validations: + required: true \ No newline at end of file diff --git a/.github/actions/cargo-semver-checks/action.yml b/.github/actions/cargo-semver-checks/action.yml deleted file mode 100644 index 79151e6e98c..00000000000 --- a/.github/actions/cargo-semver-checks/action.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: "Run cargo semver-checks" -description: "Install and run the cargo semver-checks tool" -inputs: - crate: - required: true - description: "The crate to run `cargo semver-checks` on." -runs: - using: "composite" - steps: - - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.20.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin - shell: bash - - - name: Get released version - shell: bash - id: get-released-version - run: | - MAX_STABLE_VERSION=$(curl https://crates.io/api/v1/crates/${{ inputs.crate }} --silent | jq '.crate.max_stable_version') - echo "version=${MAX_STABLE_VERSION}" >> $GITHUB_OUTPUT - - - shell: bash - run: | - rustc --version | tee .rustc-version - cargo semver-checks --version | tee .semver-checks-version - - - uses: actions/cache@v3 - with: - path: ${{ github.workspace }}/target/semver-checks/cache - key: semver-checks-cache-${{ hashFiles('.rustc-version') }}-${{ hashFiles('.semver-checks-version') }}-${{ inputs.crate }}-${{ steps.get-released-version.outputs.version }} - - - run: cargo semver-checks check-release --package ${{ inputs.crate }} --verbose - shell: bash - env: - CARGO_TERM_VERBOSE: "true" - # debugging https://github.com/libp2p/rust-libp2p/pull/3782#issuecomment-1523346255 - CARGO_HTTP_DEBUG: "true" - CARGO_LOG: "cargo::ops::registry=debug" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0bfe5069ae0..a0656e6162c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,6 +8,20 @@ updates: commit-message: prefix: "deps" rebase-strategy: "disabled" + groups: + hickory-dns: + patterns: + - "hickory-*" + - "async-std-resolver" + opentelemetry: + patterns: + - "opentelemetry*" + - "tracing-opentelemetry" + axum: + patterns: + - "axum" + - "tower" + - "tower-http" - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/mergify.yml b/.github/mergify.yml index eafca0cf87b..c630d23cea0 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -4,11 +4,13 @@ defaults: method: squash commit_message_template: | {{ title }} - + {{ body | get_section("## Description", "") }} - + Pull-Request: #{{ number }}. + {{ body | get_section("## Attributions", "") }} + pull_request_rules: - name: Ask to resolve conflict conditions: @@ -27,6 +29,15 @@ pull_request_rules: conditions: # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection - label=send-it + - base=master + actions: + queue: + + - name: Add approved dependabot PRs to merge queue + conditions: + # All branch protection rules are implicit: https://docs.mergify.com/conditions/#about-branch-protection + - author=dependabot[bot] + - base=master actions: queue: @@ -49,6 +60,15 @@ pull_request_rules: actions: review: + - name: Approve dependabot PRs of semver-compatible updates + conditions: + - author=dependabot[bot] + - or: + - title~=bump [^\s]+ from ([1-9]+)\..+ to \1\. # For major >= 1 versions, only approve updates with the same major version. + - title~=bump [^\s]+ from 0\.([\d]+)\..+ to 0\.\1\. # For major == 0 versions, only approve updates with the same minor version. + actions: + review: + queue_rules: - name: default conditions: [] diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 2f4e807f7d9..07e2bb1f7cf 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -18,11 +18,11 @@ jobs: make_stable_rust_cache: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: shared-key: stable-cache diff --git a/.github/workflows/cargo-audit.yml b/.github/workflows/cargo-audit.yml index 2b5abe19292..65c5de03f22 100644 --- a/.github/workflows/cargo-audit.yml +++ b/.github/workflows/cargo-audit.yml @@ -7,7 +7,7 @@ jobs: audit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cargo-deny-pr.yml b/.github/workflows/cargo-deny-pr.yml deleted file mode 100644 index c9c0e7d447b..00000000000 --- a/.github/workflows/cargo-deny-pr.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: cargo deny - -on: - push: - paths: - - '**/Cargo.toml' - pull_request: - paths: - - '**/Cargo.toml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - cargo-deny: - runs-on: ubuntu-latest - strategy: - matrix: - checks: - - advisories - - bans licenses sources - - steps: - - uses: actions/checkout@v3 - - uses: EmbarkStudios/cargo-deny-action@v1 - with: - command: check ${{ matrix.checks }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 594f1c38d2a..f8770a29bc9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,6 +12,7 @@ concurrency: env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 2 # Fail cache download after 2 minutes. + RUSTFLAGS: '-Dwarnings' # Never tolerate warnings. jobs: test: @@ -31,16 +32,15 @@ jobs: env: CRATE: ${{ matrix.crate }} steps: - - name: Install Protoc - run: sudo apt-get install -y protobuf-compiler - - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + fetch-depth: 0 - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: shared-key: stable-cache save-if: false @@ -51,39 +51,87 @@ jobs: - name: Check if we compile without any features activated run: cargo build --package "$CRATE" --no-default-features - - run: cargo clean - - - name: Check if crate has been released - id: check-released - run: | - RESPONSE_CODE=$(curl https://crates.io/api/v1/crates/"$CRATE" --silent --write-out "%{http_code}" --output /dev/null) - echo "code=${RESPONSE_CODE}" - echo "code=${RESPONSE_CODE}" >> $GITHUB_OUTPUT - - - uses: ./.github/actions/cargo-semver-checks - if: steps.check-released.outputs.code == 200 # Workaround until https://github.com/obi1kenobi/cargo-semver-check/issues/146 is shipped. - with: - crate: env.CRATE - - name: Enforce no dependency on meta crate + if: env.CRATE != 'libp2p-server' && env.CRATE != 'libp2p-perf' run: | cargo metadata --format-version=1 --no-deps | \ jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .dependencies | all(.name != "libp2p")' - - uses: taiki-e/cache-cargo-install-action@7dd0cff2732612ac642812bcec4ada5a279239ed # v1 + - uses: taiki-e/cache-cargo-install-action@924d49e0af41f449f0ad549559bc608ee4653562 # v1 with: tool: tomlq + - name: Extract version from manifest + run: | + CRATE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') + + echo "CRATE_VERSION=$CRATE_VERSION" >> $GITHUB_ENV + - name: Enforce version in `workspace.dependencies` matches latest version if: env.CRATE != 'libp2p' run: | - PACKAGE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') SPECIFIED_VERSION=$(tomlq "workspace.dependencies.$CRATE.version" --file ./Cargo.toml) - - echo "Package version: $PACKAGE_VERSION"; + + echo "Package version: $CRATE_VERSION"; echo "Specified version: $SPECIFIED_VERSION"; - test "$PACKAGE_VERSION" = "$SPECIFIED_VERSION" + test "$CRATE_VERSION" = "$SPECIFIED_VERSION" || test "=$CRATE_VERSION" = "$SPECIFIED_VERSION" + + - name: Enforce version in CHANGELOG.md matches version in manifest + run: | + MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') + DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") + VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") + + echo "Package version: $CRATE_VERSION"; + echo "Changelog version: $VERSION_IN_CHANGELOG"; + + test "$CRATE_VERSION" = "$VERSION_IN_CHANGELOG" + + - name: Ensure manifest and CHANGELOG are properly updated + if: > + github.event_name == 'pull_request' && + !startsWith(github.event.pull_request.title, 'chore') && + !startsWith(github.event.pull_request.title, 'refactor') && + !startsWith(github.event.pull_request.title, 'deps') && + !startsWith(github.event.pull_request.title, 'docs') && + !contains(github.event.pull_request.labels.*.name, 'internal-change') + run: | + git fetch origin master:master + git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} + ./scripts/ensure-version-bump-and-changelog.sh + env: + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + PR_BASE: ${{ github.event.pull_request.base.ref }} + + wasm_tests: + name: Run all WASM tests + runs-on: ubuntu-latest + env: + CHROMEDRIVER_VERSION: '114.0.5735.90' + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + target: wasm32-unknown-unknown + + - uses: taiki-e/cache-cargo-install-action@v1 + with: + tool: wasm-pack@0.12.0 + + - name: Install Google Chrome + run: | + curl -o /tmp/google-chrome-stable_amd64.deb https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_${CHROMEDRIVER_VERSION}-1_amd64.deb + sudo dpkg -i /tmp/google-chrome-stable_amd64.deb + + - name: Install chromedriver + uses: nanasess/setup-chromedriver@v2 + with: + chromedriver-version: ${{ env.CHROMEDRIVER_VERSION }} + + - name: Run all tests + run: ./wasm-tests/run-all.sh cross: name: Compile on ${{ matrix.target }} @@ -102,15 +150,15 @@ jobs: os: windows-latest runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: target: ${{ matrix.target }} - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -121,7 +169,7 @@ jobs: name: Compile with MSRV runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Extract MSRV from workspace manifest shell: bash @@ -133,9 +181,9 @@ jobs: with: toolchain: ${{ env.MSRV }} - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -150,13 +198,13 @@ jobs: - features: "mdns tcp dns tokio" - features: "mdns tcp dns async-std" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -167,13 +215,13 @@ jobs: name: Check rustdoc intra-doc links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -186,53 +234,56 @@ jobs: fail-fast: false matrix: rust-version: [ - 1.69.0, # current stable - beta + # 1.72.0, # current stable + # beta, + nightly-2023-09-10 ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust-version }} components: clippy - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} - - name: Run cargo clippy - run: cargo custom-clippy # cargo alias to allow reuse of config locally + - run: cargo clippy --all-targets --all-features ipfs-integration-test: name: IPFS Integration tests runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} - - name: Run ipfs-kad example - run: cd ./examples/ipfs-kad/ && RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run + - name: Run ipfs-kad example - get peers + run: cd ./examples/ipfs-kad/ && RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run -- get-peers + + - name: Run ipfs-kad example - put PK record + run: cd ./examples/ipfs-kad/ && RUST_LOG=libp2p_swarm=debug,libp2p_kad=trace,libp2p_tcp=debug cargo run -- put-pk-record examples: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: shared-key: stable-cache save-if: false @@ -245,16 +296,42 @@ jobs: cargo check --manifest-path "$toml"; done + - uses: taiki-e/cache-cargo-install-action@v1 + with: + tool: wasm-pack@0.12.0 + + - name: Build webrtc-browser example + run: | + cd examples/browser-webrtc + wasm-pack build --target web --out-dir static + + semver: + runs-on: ubuntu-latest + env: + # Unset the global `RUSTFLAGS` env to allow warnings. + # cargo-semver-checks intentionally re-locks dependency versions + # before checking, and we shouldn't fail here if a dep has a warning. + # + # More context: + # https://github.com/libp2p/rust-libp2p/pull/4932#issuecomment-1829014527 + # https://github.com/obi1kenobi/cargo-semver-checks/issues/589 + RUSTFLAGS: '' + steps: + - uses: actions/checkout@v4 + - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.25.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin + shell: bash + - uses: obi1kenobi/cargo-semver-checks-action@e275dda72e250d4df5b564e969e1348d67fefa52 # v2 + rustfmt: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - name: Check formatting run: cargo fmt -- --check @@ -262,19 +339,17 @@ jobs: manifest_lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - uses: r7kamura/rust-problem-matchers@d58b70c4a13c4866d96436315da451d8106f8f08 #v1.3.0 + - uses: r7kamura/rust-problem-matchers@2c2f1016021a7455a6b5b4bbae31145f3b3cd83a #v1.4.0 - name: Ensure `full` feature contains all features run: | ALL_FEATURES=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features | keys | map(select(. != "full")) | sort | join(" ")') FULL_FEATURE=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features["full"] | sort | join(" ")') - test "$ALL_FEATURES = $FULL_FEATURE" - echo "$ALL_FEATURES"; echo "$FULL_FEATURE"; @@ -285,7 +360,9 @@ jobs: outputs: members: ${{ steps.cargo-metadata.outputs.members }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable - id: cargo-metadata run: | @@ -296,9 +373,9 @@ jobs: name: Check for changes in proto files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 - run: cargo install --version 0.10.0 pb-rs --locked @@ -323,6 +400,14 @@ jobs: name: Ensure that `Cargo.lock` is up-to-date runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@6fd3edff6979b79f87531400ad694fb7f2c84b1f # v2.2.1 + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 - run: cargo metadata --locked --format-version=1 > /dev/null + + cargo-deny: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check advisories bans licenses sources diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 00000000000..b9cd82897c2 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,44 @@ +name: Publish docker images + +on: + push: + branches: + - 'master' + tags: + - 'libp2p-server-**' + pull_request: + +jobs: + server: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }}-server + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: ./misc/server/Dockerfile + push: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} # Only push image if we have the required permissions, i.e. not running from a fork + cache-from: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} + cache-to: ${{ ! github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' && type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=rust-libp2p-server }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + env: + AWS_ACCESS_KEY_ID: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index f2fc90c04f3..196b389fd2e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install nightly toolchain run: rustup toolchain install nightly - name: Build Documentation @@ -24,7 +24,7 @@ jobs: echo "" > target/doc/index.html cp -r target/doc/* ./host-docs - name: Upload documentation - uses: actions/upload-pages-artifact@v1.0.8 + uses: actions/upload-pages-artifact@v2.0.0 with: path: "host-docs/" @@ -42,5 +42,5 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v2 + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index e6527709aa3..f3950897089 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -10,18 +10,47 @@ concurrency: cancel-in-progress: true jobs: - run-multidim-interop: - name: Run multidimensional interoperability tests - runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "xlarge"]' || '"ubuntu-latest"') }} + run-transport-interop: + name: Run transport interoperability tests + runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} + strategy: + matrix: + flavour: [chromium, native] steps: - - uses: actions/checkout@v3 - - uses: docker/setup-buildx-action@v2 + - uses: actions/checkout@v4 + + - uses: docker/setup-buildx-action@v3 + + - name: Build ${{ matrix.flavour }} image + run: ./scripts/build-interop-image.sh + env: + AWS_ACCESS_KEY_ID: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + FLAVOUR: ${{ matrix.flavour }} + + - name: Run ${{ matrix.flavour }} tests + uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master + with: + test-filter: ${{ matrix.flavour }}-rust-libp2p-head + extra-versions: ${{ github.workspace }}/interop-tests/${{ matrix.flavour }}-ping-version.json + s3-cache-bucket: libp2p-by-tf-aws-bootstrap + s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} + s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + worker-count: 16 + run-holepunching-interop: + name: Run hole-punch interoperability tests + runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} + steps: + - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 - name: Build image - run: docker buildx build --load -t rust-libp2p-head . -f interop-tests/Dockerfile - - uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master + run: docker buildx build --load -t rust-libp2p-head . -f hole-punching-tests/Dockerfile + - name: Run tests + uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master with: test-filter: rust-libp2p-head - extra-versions: ${{ github.workspace }}/interop-tests/ping-version.json + extra-versions: ${{ github.workspace }}/hole-punching-tests/version.json s3-cache-bucket: libp2p-by-tf-aws-bootstrap s3-access-key-id: ${{ vars.TEST_PLANS_BUILD_CACHE_KEY_ID }} s3-secret-access-key: ${{ secrets.TEST_PLANS_BUILD_CACHE_KEY }} + worker-count: 16 diff --git a/.github/workflows/semantic-pull-request.yml b/.github/workflows/semantic-pull-request.yml index 4f9798c2d5a..bd00f090c1a 100644 --- a/.github/workflows/semantic-pull-request.yml +++ b/.github/workflows/semantic-pull-request.yml @@ -1,4 +1,4 @@ -name: "Semantic PR" +name: Semantic PR on: pull_request_target: @@ -9,32 +9,4 @@ on: jobs: main: - name: Validate PR title - runs-on: ubuntu-latest - steps: - - uses: amannn/action-semantic-pull-request@b6bca70dcd3e56e896605356ce09b76f7e1e0d39 # v5.1.0 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - # Configure which types are allowed (newline delimited). - types: | - feat - fix - chore - docs - deps - test - refactor - ci - requireScope: false - - - name: Check PR title length - env: - TITLE: ${{ github.event.pull_request.title }} - run: | - title_length=${#TITLE} - if [ $title_length -gt 72 ] - then - echo "PR title is too long (greater than 72 characters)" - exit 1 - fi + uses: pl-strflt/.github/.github/workflows/reusable-semantic-pull-request.yml@v0.3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1cf9f7c9559..3c3466708c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,10 +20,10 @@ - [`libp2p-relay` CHANGELOG](protocols/relay/CHANGELOG.md) - [`libp2p-request-response` CHANGELOG](protocols/request-response/CHANGELOG.md) - [`libp2p-rendezvous` CHANGELOG](protocols/rendezvous/CHANGELOG.md) +- [`libp2p-upnp` CHANGELOG](protocols/upnp/CHANGELOG.md) ## Transport Protocols & Upgrades -- [`libp2p-deflate` CHANGELOG](transports/deflate/CHANGELOG.md) - [`libp2p-dns` CHANGELOG](transports/dns/CHANGELOG.md) - [`libp2p-noise` CHANGELOG](transports/noise/CHANGELOG.md) - [`libp2p-perf` CHANGELOG](transports/perf/CHANGELOG.md) @@ -31,10 +31,11 @@ - [`libp2p-pnet` CHANGELOG](transports/pnet/CHANGELOG.md) - [`libp2p-quic` CHANGELOG](transports/quic/CHANGELOG.md) - [`libp2p-tcp` CHANGELOG](transports/tcp/CHANGELOG.md) +- [`libp2p-tls` CHANGELOG](transports/tls/CHANGELOG.md) - [`libp2p-uds` CHANGELOG](transports/uds/CHANGELOG.md) - [`libp2p-wasm-ext` CHANGELOG](transports/wasm-ext/CHANGELOG.md) - [`libp2p-websocket` CHANGELOG](transports/websocket/CHANGELOG.md) -- [`libp2p-tls` CHANGELOG](transports/tls/CHANGELOG.md) +- [`libp2p-websocket-websys` CHANGELOG](transports/websocket-websys/CHANGELOG.md) ## Multiplexers diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..f69e44476af --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing Guidelines + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-libp2p-blue.svg?style=flat-square)](https://libp2p.io/) + +Welcome to the rust-libp2p contribution guide! We appreciate your interest in improving our library. + +## Looking for ways to contribute? + +There are several ways you can contribute to rust-libp2p: +- Start contributing immediately via the opened [help wanted](https://github.com/libp2p/rust-libp2p/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) or [difficulty:easy](https://github.com/libp2p/rust-libp2p/issues?q=is%3Aissue+is%3Aopen+label%3Adifficulty%3Aeasy) issues on GitHub. + These issues are suitable for newcomers and provide an excellent starting point. +- Reporting issues, bugs, mistakes, or inconsistencies. + As many open source projects, we are short-staffed, we thus kindly ask you to be open to contribute a fix for discovered issues. + +### We squash-merge pull Requests + +We always squash merge submitted pull requests. +This means that we discourage force pushes, in order to make the diff between pushes easier for us to review. +Squash merging allows us to maintain a clean and organized commit history. + +The PR title, which will become the commit message after the squashing process, should follow [conventional commit spec](https://www.conventionalcommits.org/en/v1.0.0/). + +### Write changelog entries for user-facing changes + +When making user-facing changes, it is important to include corresponding entries in the changelog, providing a comprehensive summary for the users. +For detailed instructions on how to write changelog entries, please refer to the documentation in [`docs/release.md`](https://github.com/libp2p/rust-libp2p/blob/master/docs/release.md). + + +### Merging of PRs is automated + +To streamline our workflow, we utilize Mergify and the "send-it" label. +Mergify automates merging actions and helps us manage pull requests more efficiently. +The "send-it" label indicates that a pull request is ready to be merged. +Please refrain from making further commits after the "send-it" label has been applied otherwise your PR will be dequeued from merging automatically. + +### Treat CI as a self-service platform + +We have a lot of automated CI checks for common errors. +Please treat our CI as a self-service platform and try to fix any issues before requesting a review. diff --git a/Cargo.lock b/Cargo.lock index ed3da67f673..b22fab2d0d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,101 +3,55 @@ version = 3 [[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aead" -version = "0.3.2" +name = "addr2line" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ - "generic-array", + "gimli", ] [[package]] -name = "aead" -version = "0.4.3" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array", - "rand_core 0.6.4", -] +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "aes" -version = "0.6.0" +name = "aead" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "aes-soft", - "aesni", - "cipher 0.2.5", + "crypto-common", + "generic-array", ] [[package]] name = "aes" -version = "0.7.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", - "opaque-debug", -] - -[[package]] -name = "aes-gcm" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" -dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", ] [[package]] name = "aes-gcm" -version = "0.9.4" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.8.0", - "ghash 0.4.4", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] -[[package]] -name = "aes-soft" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - -[[package]] -name = "aesni" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" -dependencies = [ - "cipher 0.2.5", - "opaque-debug", -] - [[package]] name = "ahash" version = "0.8.3" @@ -111,13 +65,28 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] +[[package]] +name = "aho-corasick" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "anes" version = "0.1.6" @@ -126,30 +95,29 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.3.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -165,9 +133,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -175,15 +143,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355" [[package]] name = "arbitrary" -version = "1.2.3" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e90af4de65aa7b293ef2d09daff88501eb254f58edde2e1ac02c82d873eadad" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" [[package]] name = "arc-swap" @@ -193,39 +161,23 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "asn1-rs" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ff05a702273012438132f449575dbc804e27b2f3cbe3069aa237d26c98fa33" -dependencies = [ - "asn1-rs-derive 0.1.0", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror", - "time", -] - -[[package]] -name = "asn1-rs" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf6690c370453db30743b373a60ba498fc0d6d83b11f4abfd87a84a075db5dd4" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive 0.4.0", + "asn1-rs-derive", "asn1-rs-impl", "displaydoc", "nom", @@ -235,18 +187,6 @@ dependencies = [ "time", ] -[[package]] -name = "asn1-rs-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "asn1-rs-derive" version = "0.4.0" @@ -288,26 +228,26 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", "futures-core", ] [[package]] name = "async-executor" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" +checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" dependencies = [ - "async-lock", + "async-lock 2.7.0", "async-task", "concurrent-queue", - "fastrand", - "futures-lite", + "fastrand 1.9.0", + "futures-lite 1.13.0", "slab", ] @@ -317,10 +257,10 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" dependencies = [ - "async-lock", + "async-lock 2.7.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] @@ -331,10 +271,10 @@ checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "blocking", - "futures-lite", + "futures-lite 1.13.0", "once_cell", ] @@ -344,28 +284,57 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.7.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", - "rustix", + "polling 2.8.0", + "rustix 0.37.25", "slab", "socket2 0.4.9", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +dependencies = [ + "async-lock 3.1.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.0.1", + "parking", + "polling 3.3.0", + "rustix 0.38.21", + "slab", + "tracing", + "windows-sys 0.52.0", +] + [[package]] name = "async-lock" -version = "2.6.0" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +checksum = "deb2ab2aa8a746e221ab826c73f48bc6ba41be6763f0855cb249eb6d154cf1d7" dependencies = [ - "event-listener", - "futures-lite", + "event-listener 3.1.0", + "event-listener-strategy", + "pin-project-lite", ] [[package]] @@ -374,28 +343,28 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" dependencies = [ - "async-io", + "async-io 1.13.0", "autocfg", "blocking", - "futures-lite", + "futures-lite 1.13.0", ] [[package]] name = "async-process" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" +checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "autocfg", "blocking", "cfg-if", - "event-listener", - "futures-lite", - "libc", + "event-listener 2.5.3", + "futures-lite 1.13.0", + "rustix 0.37.25", "signal-hook", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -407,20 +376,20 @@ dependencies = [ "async-attributes", "async-channel", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", - "pin-project-lite 0.2.9", + "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", @@ -428,64 +397,64 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" +checksum = "3c0ed2b6671c13d2c28756c5a64e04759c1e0b5d3d7ac031f521c3561e21fbcb" dependencies = [ "async-std", "async-trait", "futures-io", "futures-util", + "hickory-resolver", "pin-utils", - "socket2 0.4.9", - "trust-dns-resolver", + "socket2 0.5.5", ] [[package]] name = "async-task" -version = "4.3.0" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" +checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", ] [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite", ] [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] -name = "atty" -version = "0.2.14" +name = "attohttpc" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", + "http 0.2.9", + "log", + "url", ] [[package]] @@ -498,24 +467,132 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" name = "autonat-example" version = "0.1.0" dependencies = [ - "async-std", - "clap 4.2.7", - "env_logger 0.10.0", + "clap", "futures", "libp2p", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] -name = "base-x" -version = "0.2.11" +name = "axum" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core 0.3.4", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] [[package]] -name = "base16ct" -version = "0.1.1" +name = "axum" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "810a80b128d70e6ed2bdf3fe8ed72c0ae56f5f5948d01c2753282dd92a84fce8" +dependencies = [ + "async-trait", + "axum-core 0.4.0", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.0.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.9", + "http-body 0.4.5", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0ddc355eab88f4955090a823715df47acf0b7660aab7a69ad5ce6301ee3b73" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base16ct" @@ -531,15 +608,24 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "basic-toml" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +dependencies = [ + "serde", +] [[package]] name = "bimap" @@ -562,13 +648,19 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + [[package]] name = "blake2" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -582,96 +674,145 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-modes" -version = "0.7.0" +name = "block-padding" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" dependencies = [ - "block-padding", - "cipher 0.2.5", + "generic-array", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" +checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" dependencies = [ "async-channel", - "async-lock", + "async-lock 2.7.0", "async-task", "atomic-waker", - "fastrand", - "futures-lite", + "fastrand 1.9.0", + "futures-lite 1.13.0", + "log", +] + +[[package]] +name = "browser-webrtc-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum 0.7.1", + "futures", + "js-sys", + "libp2p", + "libp2p-webrtc", + "libp2p-webrtc-websys", + "mime_guess", + "rand 0.8.5", + "rust-embed", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", + "tracing-subscriber", + "tracing-wasm", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", ] [[package]] name = "bs58" -version = "0.4.0" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "bstr" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +dependencies = [ + "memchr", + "serde", +] [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" dependencies = [ "serde", ] -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + +[[package]] +name = "cbor4ii" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" +dependencies = [ + "serde", +] + [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "ccm" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca1a8fbc20b50ac9673ff014abfb2b5f4085ee1a850d408f14a159c5853ac7" +checksum = "9ae3c82e4355234767756212c570e29833699ab63e6ffd161887314cc5b43847" dependencies = [ - "aead 0.3.2", - "cipher 0.2.5", + "aead", + "cipher", + "ctr", "subtle", ] @@ -683,25 +824,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.3.0", + "cipher", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher", "poly1305", "zeroize", ] @@ -710,19 +850,19 @@ dependencies = [ name = "chat-example" version = "0.1.0" dependencies = [ - "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", - "libp2p-quic", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "ciborium" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" dependencies = [ "ciborium-io", "ciborium-ll", @@ -731,15 +871,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" +checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" [[package]] name = "ciborium-ll" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" dependencies = [ "ciborium-io", "half", @@ -747,94 +887,54 @@ dependencies = [ [[package]] name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array", -] - -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array", -] - -[[package]] -name = "cipher" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "bitflags", - "clap_lex 0.2.4", - "indexmap", - "textwrap", -] - -[[package]] -name = "clap" -version = "4.2.7" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938" +checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.2.7" +version = "4.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd" +checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" dependencies = [ "anstream", "anstyle", - "bitflags", - "clap_lex 0.4.1", - "strsim 0.10.0", + "clap_lex", + "strsim", ] [[package]] name = "clap_derive" -version = "4.2.0" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.15", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", + "syn 2.0.43", ] [[package]] name = "clap_lex" -version = "0.4.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" @@ -851,25 +951,46 @@ dependencies = [ "bytes", "futures-core", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite", "tokio", "tokio-util", ] [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + [[package]] name = "const-oid" -version = "0.9.2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" + +[[package]] +name = "cookie" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" +dependencies = [ + "percent-encoding", + "time", + "version_check", +] [[package]] name = "core-foundation" @@ -883,9 +1004,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -898,19 +1019,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - [[package]] name = "crc" version = "3.0.1" @@ -926,30 +1041,21 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", - "atty", "cast", "ciborium", - "clap 3.2.23", + "clap", "criterion-plot", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", @@ -973,9 +1079,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -983,9 +1089,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -994,22 +1100,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.7.1", + "memoffset 0.9.0", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1020,18 +1126,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.2" @@ -1051,6 +1145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core 0.6.4", "typenum", ] @@ -1064,52 +1159,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff07008ec701e8028e2ceb8f83f0e4274ee62bd2dbdc4fefff2e9a91824081a" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "ctor" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "ctr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" -dependencies = [ - "cipher 0.2.5", -] - -[[package]] -name = "ctr" -version = "0.8.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.3.0", + "cipher", ] [[package]] @@ -1125,112 +1181,43 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.0.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", "fiat-crypto", - "packed_simd_2", "platforms", + "rustc_version", "subtle", "zeroize", ] [[package]] -name = "darling" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" -dependencies = [ - "darling_core 0.10.2", - "darling_macro 0.10.2", -] - -[[package]] -name = "darling" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" -dependencies = [ - "darling_core 0.14.2", - "darling_macro 0.14.2", -] - -[[package]] -name = "darling_core" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.9.3", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.14.2" +name = "curve25519-dalek-derive" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" +checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "fnv", - "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" -dependencies = [ - "darling_core 0.10.2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" -dependencies = [ - "darling_core 0.14.2", - "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1238,69 +1225,46 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", ] [[package]] -name = "dcutr" +name = "dcutr-example" version = "0.1.0" dependencies = [ - "clap 4.2.7", - "env_logger 0.10.0", + "clap", "futures", - "futures-timer", - "libp2p", - "log", -] - -[[package]] -name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "pem-rfc7468 0.6.0", - "zeroize", + "futures-timer", + "libp2p", + "log", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "der" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e58dffcdcc8ee7b22f0c1f71a69243d7c2d9ad87b5a14361f2424a1565c219" +checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" dependencies = [ "const-oid", - "pem-rfc7468 0.7.0", + "pem-rfc7468", "zeroize", ] [[package]] name = "der-parser" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe398ac75057914d7d07307bf67dc7f3f574a26783b4fc7805a20ffa9f506e82" -dependencies = [ - "asn1-rs 0.3.1", - "displaydoc", - "nom", - "num-bigint", - "num-traits", - "rusticata-macros", -] - -[[package]] -name = "der-parser" -version = "8.1.0" +version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs", "displaydoc", "nom", "num-bigint", @@ -1309,192 +1273,160 @@ dependencies = [ ] [[package]] -name = "derive_builder" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07adf7be193b71cc36b193d0f5fe60b918a3a9db4dad0449f57bcfd519704a3" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.11.2" +name = "digest" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f91d4cfa921f1c05904dc3c57b4a32c38aed3340cce209f3a6fd1478babafc4" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "darling 0.14.2", - "proc-macro2", - "quote", - "syn 1.0.109", + "generic-array", ] [[package]] -name = "derive_builder_macro" -version = "0.11.2" +name = "digest" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "derive_builder_core", - "syn 1.0.109", + "block-buffer 0.10.4", + "const-oid", + "crypto-common", + "subtle", ] [[package]] -name = "digest" -version = "0.9.0" +name = "dirs" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ - "generic-array", + "dirs-sys", ] [[package]] -name = "digest" -version = "0.10.6" +name = "dirs-sys" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ - "block-buffer 0.10.3", - "const-oid", - "crypto-common", - "subtle", + "libc", + "redox_users", + "winapi", ] [[package]] name = "displaydoc" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] -name = "distributed-key-value-store" +name = "distributed-key-value-store-example" version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", - "multiaddr", + "tracing", + "tracing-subscriber", ] [[package]] name = "dtoa" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c00704156a7de8df8da0911424e30c2049957b0a714542a44e05fe693dd85313" - -[[package]] -name = "ecdsa" -version = "0.14.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "ecdsa" -version = "0.16.6" +version = "0.16.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a48e5d537b8a30c0b023116d981b16334be1485af7ca68db3a2b7024cbc957fd" +checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" dependencies = [ - "der 0.7.5", - "digest 0.10.6", - "elliptic-curve 0.13.4", - "rfc6979 0.4.0", - "signature 2.0.0", + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] name = "ed25519" -version = "1.5.3" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" dependencies = [ - "signature 1.6.4", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek", "ed25519", - "rand 0.7.3", + "rand_core 0.6.4", "serde", - "sha2 0.9.9", + "sha2 0.10.8", + "subtle", "zeroize", ] [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.12.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.6", - "ff 0.12.1", + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", "generic-array", - "group 0.12.1", + "group", "hkdf", - "pem-rfc7468 0.6.0", - "pkcs8 0.9.0", + "pem-rfc7468", + "pkcs8", "rand_core 0.6.4", - "sec1 0.3.0", + "sec1", "subtle", "zeroize", ] [[package]] -name = "elliptic-curve" -version = "0.13.4" +name = "encoding_rs" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c71eaa367f2e5d556414a8eea812bc62985c879748d6403edabd9cb03f16e7" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.2", - "digest 0.10.6", - "ff 0.13.0", - "generic-array", - "group 0.13.0", - "pem-rfc7468 0.7.0", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "sec1 0.7.1", - "subtle", - "zeroize", + "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] @@ -1509,9 +1441,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" dependencies = [ "humantime", "is-terminal", @@ -1520,15 +1452,21 @@ dependencies = [ "termcolor", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1548,24 +1486,63 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] -name = "fastrand" -version = "1.8.0" +name = "event-listener" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" dependencies = [ - "instant", + "concurrent-queue", + "parking", + "pin-project-lite", ] [[package]] -name = "ff" -version = "0.12.1" +name = "event-listener-strategy" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +checksum = "d96b852f1345da36d551b9473fa1e2b1eb5c5195585c6c018118bc92a8d91160" dependencies = [ - "rand_core 0.6.4", - "subtle", + "event-listener 3.1.0", + "pin-project-lite", +] + +[[package]] +name = "fantoccini" +version = "0.20.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5eb32b0001134a1d3b9e16010eb4b119451edf68446963a30a8130a0d056e98" +dependencies = [ + "base64 0.13.1", + "cookie", + "futures-core", + "futures-util", + "http 0.2.9", + "hyper 0.14.27", + "hyper-rustls", + "mime", + "serde", + "serde_json", + "time", + "tokio", + "url", + "webdriver", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "ff" version = "0.13.0" @@ -1578,55 +1555,60 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.17" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a214f5bb88731d436478f3ae1f8a277b62124089ba9fb67f4f93fb100ef73c90" +checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] -name = "file-sharing" +name = "file-sharing-example" version = "0.1.0" dependencies = [ "async-std", - "async-trait", - "clap 4.2.7", + "clap", "either", - "env_logger 0.10.0", "futures", "libp2p", - "multiaddr", + "serde", + "tracing", + "tracing-subscriber", + "void", ] [[package]] -name = "flate2" -version = "1.0.26" +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ - "crc32fast", - "libz-sys", - "miniz_oxide", + "foreign-types-shared", ] [[package]] -name = "fnv" -version = "1.0.7" +name = "foreign-types-shared" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1637,11 +1619,21 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.2.3" +dependencies = [ + "futures", + "futures-timer", + "futures-util", + "tokio", +] + [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1649,15 +1641,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1667,58 +1659,78 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", "parking", - "pin-project-lite 0.2.9", + "pin-project-lite", "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" +dependencies = [ + "futures-core", + "pin-project-lite", +] + [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", ] [[package]] name = "futures-rustls" -version = "0.22.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.20.8", - "webpki 0.22.0", + "rustls 0.21.9", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-ticker" +version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] [[package]] name = "futures-timer" @@ -1727,14 +1739,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" dependencies = [ "gloo-timers", - "send_wrapper", + "send_wrapper 0.4.0", ] [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1743,29 +1755,28 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.9", + "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "futures_ringbuf" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b905098b5519bd63b2a1f9f4615198b0e38a473ce201ffdbd4dea6eb63087ddc" +checksum = "6628abb6eb1fc74beaeb20cd0670c43d158b0150f7689b38c3eaf663f99bdec7" dependencies = [ "futures", "log", - "log-derive", "ringbuf", "rustc_version", ] [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1785,9 +1796,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "js-sys", @@ -1798,22 +1809,37 @@ dependencies = [ [[package]] name = "ghash" -version = "0.3.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug", - "polyval 0.4.5", + "polyval", ] [[package]] -name = "ghash" -version = "0.4.4" +name = "gimli" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "globset" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" dependencies = [ - "opaque-debug", - "polyval 0.5.3", + "aho-corasick 0.7.20", + "bstr", + "fnv", + "log", + "regex", ] [[package]] @@ -1830,39 +1856,47 @@ dependencies = [ [[package]] name = "group" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.12.1", + "ff", "rand_core 0.6.4", "subtle", ] [[package]] -name = "group" -version = "0.13.0" +name = "h2" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ - "ff 0.13.0", - "rand_core 0.6.4", - "subtle", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.9", + "indexmap 1.9.3", + "slab", + "tokio", + "tokio-util", + "tracing", ] [[package]] name = "h2" -version = "0.3.17" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66b91535aa35fea1523ad1b86cb6b53c28e0ae566ba4a460f4457e936cad7c6f" +checksum = "e1d308f63daf4181410c242d34c11f928dcb3aa105852019e043c9d1f4e4368a" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http", - "indexmap", + "http 1.0.0", + "indexmap 2.0.0", "slab", "tokio", "tokio-util", @@ -1883,11 +1917,12 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" dependencies = [ "ahash", + "allocator-api2", ] [[package]] @@ -1898,27 +1933,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1939,41 +1956,67 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] -name = "hkdf" -version = "0.12.3" +name = "hickory-proto" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" dependencies = [ - "hmac 0.12.1", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand 0.8.5", + "socket2 0.5.5", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", ] [[package]] -name = "hmac" -version = "0.8.1" +name = "hickory-resolver" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", ] [[package]] -name = "hmac" -version = "0.10.1" +name = "hkdf" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ - "crypto-mac 0.10.1", - "digest 0.9.0", + "hmac 0.12.1", ] [[package]] name = "hmac" -version = "0.11.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac", "digest 0.9.0", ] @@ -1983,7 +2026,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -1997,6 +2040,22 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "hole-punching-tests" +version = "0.1.0" +dependencies = [ + "anyhow", + "either", + "env_logger 0.10.1", + "futures", + "libp2p", + "redis", + "serde", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2010,9 +2069,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" dependencies = [ "bytes", "fnv", @@ -2026,10 +2096,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http", - "pin-project-lite 0.2.9", + "http 0.2.9", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.0.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +dependencies = [ + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ce4ef31cda248bbdb6e6820603b82dfcd9e833db65a43e997a0ccec777d11fe" + [[package]] name = "httparse" version = "1.8.0" @@ -2050,20 +2149,21 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "http", - "http-body", + "h2 0.3.20", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.9", + "pin-project-lite", "socket2 0.4.9", "tokio", "tower-service", @@ -2072,37 +2172,111 @@ dependencies = [ ] [[package]] -name = "ident_case" +name = "hyper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +checksum = "403f9214f3e703236b221f1a9cd88ec8b4adfa5296de01ab96216361f4692f56" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2 0.4.0", + "http 1.0.0", + "http-body 1.0.0", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +dependencies = [ + "http 0.2.9", + "hyper 0.14.27", + "log", + "rustls 0.20.8", + "rustls-native-certs", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper 0.14.27", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.27", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca339002caeb0d159cc6e023dff48e199f081e42fa039895c7c6f38b37f2e9d" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "hyper 1.0.1", + "pin-project-lite", + "socket2 0.5.5", + "tokio", + "tower", + "tower-service", + "tracing", +] [[package]] -name = "identify" +name = "identify-example" version = "0.1.0" dependencies = [ "async-std", "async-trait", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "idna" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -2110,21 +2284,21 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.2.2", "core-foundation", "fnv", "futures", @@ -2138,22 +2312,52 @@ dependencies = [ "windows", ] +[[package]] +name = "igd-next" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 0.2.9", + "hyper 0.14.27", + "log", + "rand 0.8.5", + "tokio", + "url", + "xmltree", +] + [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "inout" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ + "block-padding", "generic-array", ] @@ -2171,9 +2375,9 @@ dependencies = [ [[package]] name = "interceptor" -version = "0.8.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8a11ae2da61704edada656798b61c94b35ecac2c58eb955156987d5e6be90b" +checksum = "5927883184e6a819b22d5e4f5f7bc7ca134fde9b2026fbddd8d95249746ba21e" dependencies = [ "async-trait", "bytes", @@ -2193,81 +2397,101 @@ name = "interop-tests" version = "0.1.0" dependencies = [ "anyhow", + "axum 0.7.1", + "console_error_panic_hook", "either", - "env_logger 0.10.0", "futures", + "futures-timer", + "instant", "libp2p", "libp2p-mplex", - "libp2p-quic", + "libp2p-noise", + "libp2p-tls", "libp2p-webrtc", - "log", + "libp2p-webrtc-websys", + "mime_guess", "rand 0.8.5", "redis", + "reqwest", + "rust-embed", + "serde", + "serde_json", + "thirtyfour", "tokio", + "tower-http", + "tracing", + "tracing-subscriber", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-logger", ] [[package]] name = "io-lifetimes" -version = "1.0.4" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "ipconfig" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.4.9", + "socket2 0.5.5", "widestring", - "winapi", + "windows-sys 0.48.0", "winreg", ] [[package]] -name = "ipfs-kad" +name = "ipfs-kad-example" version = "0.1.0" dependencies = [ - "async-std", + "anyhow", "async-trait", - "env_logger 0.10.0", + "clap", + "env_logger 0.10.1", "futures", "libp2p", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] -name = "ipfs-private" +name = "ipfs-private-example" version = "0.1.0" dependencies = [ - "async-std", "async-trait", "either", - "env_logger 0.10.0", "futures", "libp2p", - "multiaddr", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "ipnet" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256017f749ab3117e93acb91063009e1f1bb56d03965b14c2c8df4eb02c524d8" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", - "rustix", - "windows-sys 0.45.0", + "hermit-abi", + "rustix 0.38.21", + "windows-sys 0.48.0", ] [[package]] @@ -2281,24 +2505,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -2307,8 +2531,8 @@ dependencies = [ name = "keygen" version = "0.1.0" dependencies = [ - "base64 0.21.0", - "clap 4.2.7", + "base64 0.21.5", + "clap", "libp2p-core", "libp2p-identity", "serde", @@ -2333,36 +2557,28 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.142" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" - -[[package]] -name = "libm" -version = "0.1.4" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libp2p" -version = "0.52.0" +version = "0.53.2" dependencies = [ "async-std", "async-trait", "bytes", - "clap 4.2.7", + "clap", "either", - "env_logger 0.10.0", "futures", "futures-timer", - "getrandom 0.2.9", + "getrandom 0.2.11", "instant", "libp2p-allow-block-list", "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", "libp2p-dcutr", - "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", "libp2p-gossipsub", @@ -2370,10 +2586,10 @@ dependencies = [ "libp2p-identity", "libp2p-kad", "libp2p-mdns", + "libp2p-memory-connection-limits", "libp2p-metrics", "libp2p-mplex", "libp2p-noise", - "libp2p-perf", "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", @@ -2385,18 +2601,22 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-uds", - "libp2p-wasm-ext", - "libp2p-webrtc", + "libp2p-upnp", "libp2p-websocket", + "libp2p-websocket-websys", + "libp2p-webtransport-websys", "libp2p-yamux", "multiaddr", "pin-project", + "rw-stream-sink", + "thiserror", "tokio", + "tracing-subscriber", ] [[package]] name = "libp2p-allow-block-list" -version = "0.2.0" +version = "0.3.0" dependencies = [ "async-std", "libp2p-core", @@ -2409,11 +2629,11 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.11.0" +version = "0.12.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", + "asynchronous-codec", "futures", "futures-timer", "instant", @@ -2422,14 +2642,16 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", - "log", "quick-protobuf", + "quick-protobuf-codec", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-connection-limits" -version = "0.2.0" +version = "0.3.1" dependencies = [ "async-std", "libp2p-core", @@ -2446,7 +2668,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.41.2" dependencies = [ "async-std", "either", @@ -2457,12 +2679,11 @@ dependencies = [ "libp2p-identity", "libp2p-mplex", "libp2p-noise", - "log", "multiaddr", "multihash", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot", "pin-project", "quick-protobuf", "quickcheck-ext", @@ -2471,20 +2692,21 @@ dependencies = [ "serde", "smallvec", "thiserror", - "unsigned-varint", + "tracing", + "unsigned-varint 0.8.0", "void", ] [[package]] name = "libp2p-dcutr" -version = "0.10.0" +version = "0.11.0" dependencies = [ "async-std", "asynchronous-codec", - "clap 4.2.7", + "clap", "either", - "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", @@ -2499,87 +2721,77 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", + "lru", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", + "tracing", + "tracing-subscriber", "void", ] -[[package]] -name = "libp2p-deflate" -version = "0.40.0" -dependencies = [ - "async-std", - "flate2", - "futures", - "futures_ringbuf", - "libp2p-core", - "libp2p-tcp", - "quickcheck-ext", - "rand 0.8.5", -] - [[package]] name = "libp2p-dns" -version = "0.40.0" +version = "0.41.1" dependencies = [ "async-std", "async-std-resolver", - "env_logger 0.10.0", + "async-trait", "futures", + "hickory-resolver", "libp2p-core", "libp2p-identity", - "log", - "parking_lot 0.12.1", + "parking_lot", "smallvec", "tokio", - "trust-dns-resolver", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-floodsub" -version = "0.43.0" +version = "0.44.0" dependencies = [ "asynchronous-codec", + "bytes", "cuckoofilter", "fnv", "futures", "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "smallvec", "thiserror", + "tracing", ] [[package]] name = "libp2p-gossipsub" -version = "0.45.0" +version = "0.46.1" dependencies = [ "async-std", "asynchronous-codec", - "base64 0.21.0", + "base64 0.21.5", "byteorder", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", + "futures-ticker", + "getrandom 0.2.11", "hex", "hex_fmt", "instant", "libp2p-core", "libp2p-identity", - "libp2p-mplex", "libp2p-noise", "libp2p-swarm", "libp2p-swarm-test", - "log", + "libp2p-yamux", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -2587,108 +2799,110 @@ dependencies = [ "rand 0.8.5", "regex", "serde", - "sha2 0.10.6", + "sha2 0.10.8", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", "void", - "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.43.0" +version = "0.44.1" dependencies = [ "async-std", "asynchronous-codec", "either", - "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "libp2p-core", "libp2p-identity", - "libp2p-mplex", - "libp2p-noise", "libp2p-swarm", - "libp2p-tcp", - "libp2p-yamux", - "log", + "libp2p-swarm-test", "lru", "quick-protobuf", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.0" +version = "0.2.8" dependencies = [ "asn1_der", - "base64 0.21.0", + "base64 0.21.5", "bs58", "criterion", "ed25519-dalek", "hex-literal", + "hkdf", "libsecp256k1", - "log", - "multiaddr", "multihash", - "p256 0.13.2", + "p256", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", - "ring", + "ring 0.17.5", "rmp-serde", - "sec1 0.7.1", + "sec1", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] [[package]] name = "libp2p-kad" -version = "0.44.0" +version = "0.45.3" dependencies = [ "arrayvec", + "async-std", "asynchronous-codec", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", + "libp2p-identify", "libp2p-identity", "libp2p-noise", "libp2p-swarm", + "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "serde", - "sha2 0.10.6", + "sha2 0.10.8", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "uint", - "unsigned-varint", "void", ] [[package]] name = "libp2p-mdns" -version = "0.44.0" +version = "0.45.1" dependencies = [ - "async-io", + "async-io 2.2.2", "async-std", "data-encoding", - "env_logger 0.10.0", "futures", + "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", @@ -2697,19 +2911,39 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", - "smallvec", - "socket2 0.5.2", - "tokio", - "trust-dns-proto", + "smallvec", + "socket2 0.5.5", + "tokio", + "tracing", + "tracing-subscriber", + "void", +] + +[[package]] +name = "libp2p-memory-connection-limits" +version = "0.2.0" +dependencies = [ + "async-std", + "libp2p-core", + "libp2p-identify", + "libp2p-identity", + "libp2p-swarm", + "libp2p-swarm-derive", + "libp2p-swarm-test", + "memory-stats", + "rand 0.8.5", + "sysinfo", + "tracing", "void", ] [[package]] name = "libp2p-metrics" -version = "0.13.0" +version = "0.14.1" dependencies = [ + "futures", + "instant", "libp2p-core", "libp2p-dcutr", "libp2p-gossipsub", @@ -2719,31 +2953,32 @@ dependencies = [ "libp2p-ping", "libp2p-relay", "libp2p-swarm", + "pin-project", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.40.0" +version = "0.41.0" dependencies = [ "async-std", "asynchronous-codec", "bytes", "criterion", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", "libp2p-plaintext", "libp2p-tcp", - "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot", "quickcheck-ext", "rand 0.8.5", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", + "unsigned-varint 0.8.0", ] [[package]] @@ -2754,65 +2989,72 @@ dependencies = [ "futures-timer", "futures_ringbuf", "libp2p-core", - "log", + "tracing", ] [[package]] name = "libp2p-noise" -version = "0.43.0" +version = "0.44.0" dependencies = [ + "asynchronous-codec", "bytes", - "curve25519-dalek 3.2.0", - "env_logger 0.10.0", + "curve25519-dalek", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", + "multiaddr", + "multihash", "once_cell", "quick-protobuf", "quickcheck-ext", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", - "x25519-dalek 1.1.1", + "tracing", + "tracing-subscriber", + "x25519-dalek", "zeroize", ] [[package]] name = "libp2p-perf" -version = "0.2.0" +version = "0.3.0" dependencies = [ "anyhow", - "async-std", - "clap 4.2.7", - "env_logger 0.10.0", + "clap", "futures", + "futures-bounded", + "futures-timer", "instant", + "libp2p", "libp2p-core", "libp2p-dns", "libp2p-identity", - "libp2p-noise", "libp2p-quic", "libp2p-swarm", "libp2p-swarm-test", "libp2p-tcp", + "libp2p-tls", "libp2p-yamux", - "log", "rand 0.8.5", + "serde", + "serde_json", "thiserror", + "tokio", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-ping" -version = "0.43.0" +version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2820,34 +3062,34 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-plaintext" -version = "0.40.0" +version = "0.41.0" dependencies = [ "asynchronous-codec", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "quick-protobuf", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", - "unsigned-varint", - "void", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-pnet" -version = "0.23.0" +version = "0.24.0" dependencies = [ "futures", "libp2p-core", @@ -2857,22 +3099,21 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "salsa20", "sha3", "tokio", + "tracing", ] [[package]] name = "libp2p-quic" -version = "0.8.0-alpha" +version = "0.10.2" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", @@ -2883,25 +3124,28 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", - "parking_lot 0.12.1", + "parking_lot", "quickcheck", - "quinn-proto", + "quinn", "rand 0.8.5", - "rustls 0.20.8", + "ring 0.16.20", + "rustls 0.21.9", + "socket2 0.5.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-relay" -version = "0.16.0" +version = "0.17.1" dependencies = [ "asynchronous-codec", "bytes", "either", - "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", @@ -2909,55 +3153,61 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-swarm", + "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-rendezvous" -version = "0.13.0" +version = "0.14.0" dependencies = [ "async-trait", "asynchronous-codec", "bimap", - "env_logger 0.10.0", "futures", "futures-timer", "instant", "libp2p-core", "libp2p-identify", "libp2p-identity", - "libp2p-mplex", "libp2p-noise", "libp2p-ping", + "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] [[package]] name = "libp2p-request-response" -version = "0.25.0" +version = "0.26.1" dependencies = [ + "anyhow", "async-std", "async-trait", - "env_logger 0.10.0", + "cbor4ii", "futures", + "futures-bounded", + "futures-timer", + "futures_ringbuf", "instant", "libp2p-core", "libp2p-identity", @@ -2967,20 +3217,44 @@ dependencies = [ "libp2p-tcp", "libp2p-yamux", "rand 0.8.5", + "serde", + "serde_json", "smallvec", + "tracing", + "tracing-subscriber", + "void", +] + +[[package]] +name = "libp2p-server" +version = "0.12.5" +dependencies = [ + "base64 0.21.5", + "clap", + "futures", + "futures-timer", + "hyper 0.14.27", + "libp2p", + "prometheus-client", + "serde", + "serde_derive", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", + "zeroize", ] [[package]] name = "libp2p-swarm" -version = "0.43.0" +version = "0.44.1" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", - "getrandom 0.2.9", + "getrandom 0.2.11", "instant", "libp2p-core", "libp2p-identify", @@ -2991,28 +3265,32 @@ dependencies = [ "libp2p-swarm-derive", "libp2p-swarm-test", "libp2p-yamux", - "log", + "multistream-select", "once_cell", "quickcheck-ext", "rand 0.8.5", "smallvec", "tokio", + "tracing", + "tracing-subscriber", + "trybuild", "void", "wasm-bindgen-futures", ] [[package]] name = "libp2p-swarm-derive" -version = "0.33.0" +version = "0.34.2" dependencies = [ "heck", + "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", ] [[package]] name = "libp2p-swarm-test" -version = "0.2.0" +version = "0.3.0" dependencies = [ "async-trait", "futures", @@ -3023,31 +3301,31 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", + "tracing", ] [[package]] name = "libp2p-tcp" -version = "0.40.0" +version = "0.41.0" dependencies = [ - "async-io", + "async-io 2.2.2", "async-std", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", "libp2p-identity", - "log", - "socket2 0.5.2", + "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] name = "libp2p-tls" -version = "0.2.0" +version = "0.3.0" dependencies = [ "futures", "futures-rustls", @@ -3057,81 +3335,116 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-yamux", - "rcgen 0.10.0", - "ring", - "rustls 0.20.8", + "rcgen", + "ring 0.16.20", + "rustls 0.21.9", + "rustls-webpki", "thiserror", "tokio", - "webpki 0.22.0", - "x509-parser 0.15.0", + "x509-parser", "yasna", ] [[package]] name = "libp2p-uds" -version = "0.39.0" +version = "0.40.0" dependencies = [ "async-std", "futures", "libp2p-core", - "log", "tempfile", "tokio", + "tracing", ] [[package]] -name = "libp2p-wasm-ext" -version = "0.40.0" +name = "libp2p-upnp" +version = "0.2.0" dependencies = [ "futures", - "js-sys", + "futures-timer", + "igd-next", "libp2p-core", - "parity-send-wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", + "libp2p-swarm", + "tokio", + "tracing", + "void", ] [[package]] name = "libp2p-webrtc" -version = "0.5.0-alpha" +version = "0.7.0-alpha" dependencies = [ - "anyhow", "async-trait", - "asynchronous-codec", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "hex", - "hex-literal", "if-watch", "libp2p-core", "libp2p-identity", "libp2p-noise", - "libp2p-ping", - "libp2p-swarm", - "log", + "libp2p-webrtc-utils", "multihash", - "quick-protobuf", - "quick-protobuf-codec", "quickcheck", "rand 0.8.5", - "rcgen 0.9.3", + "rcgen", "serde", - "sha2 0.10.6", "stun", "thiserror", "tinytemplate", "tokio", "tokio-util", - "unsigned-varint", - "void", + "tracing", + "tracing-subscriber", "webrtc", ] +[[package]] +name = "libp2p-webrtc-utils" +version = "0.2.0" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "hex", + "hex-literal", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "serde", + "sha2 0.10.8", + "thiserror", + "tinytemplate", + "tracing", +] + +[[package]] +name = "libp2p-webrtc-websys" +version = "0.3.0-alpha" +dependencies = [ + "bytes", + "futures", + "getrandom 0.2.11", + "hex", + "js-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-webrtc-utils", + "send_wrapper 0.6.0", + "thiserror", + "tracing", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "libp2p-websocket" -version = "0.42.0" +version = "0.43.0" dependencies = [ "async-std", "either", @@ -3141,27 +3454,68 @@ dependencies = [ "libp2p-dns", "libp2p-identity", "libp2p-tcp", - "log", - "parking_lot 0.12.1", - "quicksink", - "rcgen 0.9.3", + "parking_lot", + "pin-project-lite", + "rcgen", "rw-stream-sink", "soketto", + "tracing", "url", - "webpki-roots 0.23.0", + "webpki-roots", +] + +[[package]] +name = "libp2p-websocket-websys" +version = "0.3.1" +dependencies = [ + "bytes", + "futures", + "js-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "libp2p-yamux", + "parking_lot", + "send_wrapper 0.6.0", + "thiserror", + "tracing", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "libp2p-webtransport-websys" +version = "0.2.0" +dependencies = [ + "futures", + "js-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "multiaddr", + "multibase", + "multihash", + "send_wrapper 0.6.0", + "thiserror", + "tracing", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", ] [[package]] name = "libp2p-yamux" -version = "0.44.0" +version = "0.45.1" dependencies = [ "async-std", + "either", "futures", "libp2p-core", "libp2p-muxer-test-harness", - "log", "thiserror", - "yamux", + "tracing", + "yamux 0.12.1", + "yamux 0.13.1", ] [[package]] @@ -3212,17 +3566,6 @@ dependencies = [ "libsecp256k1-core", ] -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -3231,15 +3574,21 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.0" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd550e73688e6d578f0ac2119e32b797a327631a42f9433e59d02e139c8df60d" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -3247,33 +3596,20 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ - "cfg-if", "value-bag", ] -[[package]] -name = "log-derive" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a42526bb432bcd1b43571d5f163984effa25409a29f1a3242a54d0577d55bcf" -dependencies = [ - "darling 0.10.2", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "lru" -version = "0.10.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -3292,10 +3628,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] -name = "matches" -version = "0.1.10" +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matchit" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" [[package]] name = "md-5" @@ -3303,44 +3648,74 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] [[package]] name = "memoffset" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] -name = "metrics-example" -version = "0.1.0" +name = "memory-stats" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34f79cf9964c5c9545493acda1263f1912f8d2c56c8a2ffee2606cb960acaacc" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "metrics-example" +version = "0.1.0" +dependencies = [ + "futures", + "hyper 0.14.27", + "libp2p", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_api", + "prometheus-client", + "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ - "env_logger 0.10.0", - "futures", - "hyper", - "libp2p", - "log", - "prometheus-client", - "tokio", + "mime", + "unicase", ] [[package]] @@ -3360,32 +3735,31 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "multiaddr" -version = "0.17.1" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", "data-encoding", - "log", + "libp2p-identity", "multibase", "multihash", "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.7.2", "url", ] @@ -3402,32 +3776,16 @@ dependencies = [ [[package]] name = "multihash" -version = "0.17.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "arbitrary", "core2", - "multihash-derive", "quickcheck", "rand 0.8.5", "serde", - "serde-big-array", - "unsigned-varint", -] - -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", + "unsigned-varint 0.7.2", ] [[package]] @@ -3436,20 +3794,34 @@ version = "0.13.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", - "libp2p-core", - "libp2p-identity", - "libp2p-mplex", - "libp2p-plaintext", - "libp2p-swarm", - "log", + "futures_ringbuf", "pin-project", "quickcheck-ext", "rand 0.8.5", "rw-stream-sink", "smallvec", - "unsigned-varint", + "tracing", + "tracing-subscriber", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] @@ -3471,7 +3843,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", - "bitflags", + "bitflags 1.3.2", "byteorder", "libc", "netlink-packet-core", @@ -3507,11 +3879,11 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260e21fbb6f3d253a14df90eb0000a6066780a15dd901a7519ce02d77a94985b" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ - "async-io", + "async-io 1.13.0", "bytes", "futures", "libc", @@ -3525,10 +3897,22 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ - "bitflags", + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.6.5", + "memoffset 0.7.1", + "pin-utils", ] [[package]] @@ -3547,6 +3931,25 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.3" @@ -3570,30 +3973,30 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi", "libc", ] [[package]] -name = "oid-registry" -version = "0.4.0" +name = "object" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38e20717fa0541f39bd146692035c37bedfa532b3e5071b35761082407546b2a" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ - "asn1-rs 0.3.1", + "memchr", ] [[package]] @@ -3602,14 +4005,14 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs 0.5.1", + "asn1-rs", ] [[package]] name = "once_cell" -version = "1.17.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -3624,137 +4027,220 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] -name = "os_str_bytes" -version = "6.4.1" +name = "openssl" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] [[package]] -name = "p256" -version = "0.11.1" +name = "openssl-macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", + "proc-macro2", + "quote", + "syn 2.0.43", ] [[package]] -name = "p256" -version = "0.13.2" +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ - "ecdsa 0.16.6", - "elliptic-curve 0.13.4", - "primeorder", - "sha2 0.10.6", + "cc", + "libc", + "pkg-config", + "vcpkg", ] [[package]] -name = "p384" -version = "0.11.2" +name = "opentelemetry" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" +checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" dependencies = [ - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.6", + "opentelemetry_api", + "opentelemetry_sdk", ] [[package]] -name = "packed_simd_2" -version = "0.3.8" +name = "opentelemetry-otlp" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" +checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" dependencies = [ - "cfg-if", - "libm", + "async-trait", + "futures-core", + "http 0.2.9", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", ] [[package]] -name = "parity-send-wrapper" -version = "0.1.0" +name = "opentelemetry-proto" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" +checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "tonic", +] [[package]] -name = "parking" -version = "2.0.0" +name = "opentelemetry-semantic-conventions" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +dependencies = [ + "opentelemetry", +] [[package]] -name = "parking_lot" -version = "0.11.2" +name = "opentelemetry_api" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", ] [[package]] -name = "parking_lot" -version = "0.12.1" +name = "opentelemetry_sdk" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" dependencies = [ - "lock_api", - "parking_lot_core 0.9.6", + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "ordered-float", + "percent-encoding", + "rand 0.8.5", + "regex", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", ] [[package]] -name = "parking_lot_core" -version = "0.8.6" +name = "ordered-float" +version = "3.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "num-traits", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.8", +] + +[[package]] +name = "p384" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.8", +] + +[[package]] +name = "parking" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.42.0", + "windows-targets 0.48.5", ] [[package]] name = "paste" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "pem-rfc7468" -version = "0.6.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d159833a9105500e0398934e205e0773f0b27529557134ecfc51c27646adac" +checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" dependencies = [ - "base64ct", + "base64 0.21.5", + "serde", ] [[package]] @@ -3768,41 +4254,35 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -3814,21 +4294,11 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" name = "ping-example" version = "0.1.0" dependencies = [ - "async-std", - "async-trait", "futures", "libp2p", - "multiaddr", -] - -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -3837,15 +4307,15 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.5", - "spki 0.7.2", + "der", + "spki", ] [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" @@ -3855,9 +4325,9 @@ checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" [[package]] name = "plotters" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" dependencies = [ "num-traits", "plotters-backend", @@ -3868,60 +4338,65 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" +checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" [[package]] name = "plotters-svg" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" dependencies = [ "plotters-backend", ] [[package]] name = "polling" -version = "2.5.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22122d5ec4f9fe1b3916419b76be1e80bcb93f618d071d2edf841b137b2a2bd6" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", + "bitflags 1.3.2", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-ffi", - "windows-sys 0.42.0", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] -name = "poly1305" -version = "0.7.2" +name = "polling" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "e53b6af1f60f36f8c2ac2aad5459d75a5a9b4be1e8cdd40264f315d78193e531" dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.21", + "tracing", + "windows-sys 0.48.0", ] [[package]] -name = "polyval" -version = "0.4.5" +name = "poly1305" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ - "cpuid-bool", + "cpufeatures", "opaque-debug", "universal-hash", ] [[package]] name = "polyval" -version = "0.5.3" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ "cfg-if", "cpufeatures", @@ -3937,21 +4412,11 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "primeorder" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf8d3875361e28f7753baefef104386e7aa47642c93023356d97fdef4003bfb5" -dependencies = [ - "elliptic-curve 0.13.4", -] - -[[package]] -name = "proc-macro-crate" -version = "1.1.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "3c2fcef82c0ec6eefcc179b978446c399b3cdf73c392c35604e399eee6df1ee3" dependencies = [ - "thiserror", - "toml", + "elliptic-curve", ] [[package]] @@ -3980,31 +4445,54 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e227aeb6c2cfec819e999c4773b35f8c7fa37298a203ff46420095458eee567e" +checksum = "510c4f1c9d81d556458f94c98f857748130ea9737bbd6053da497503b26ea63c" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot", "prometheus-client-derive-encode", ] [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.43", +] + +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ + "anyhow", + "itertools", "proc-macro2", "quote", "syn 1.0.109", @@ -4027,13 +4515,16 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.3.1" dependencies = [ "asynchronous-codec", "bytes", + "criterion", + "futures", "quick-protobuf", + "quickcheck-ext", "thiserror", - "unsigned-varint", + "unsigned-varint 0.8.0", ] [[package]] @@ -4056,39 +4547,60 @@ dependencies = [ ] [[package]] -name = "quicksink" -version = "0.1.2" +name = "quinn" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.12", + "async-io 1.13.0", + "async-std", + "bytes", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.21.9", + "thiserror", + "tokio", + "tracing", ] [[package]] name = "quinn-proto" -version = "0.9.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" +checksum = "2c78e758510582acc40acb90458401172d41f1016f8c9dde89e49677afb7eec1" dependencies = [ "bytes", "rand 0.8.5", - "ring", + "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.21.9", "slab", "thiserror", "tinyvec", "tracing", - "webpki 0.22.0", +] + +[[package]] +name = "quinn-udp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6df19e284d93757a9fb91d63672f7741b129246a669db09d1c0063071debc0c0" +dependencies = [ + "bytes", + "libc", + "socket2 0.5.5", + "tracing", + "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.26" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -4152,7 +4664,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.11", ] [[package]] @@ -4166,9 +4678,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -4176,9 +4688,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.10.2" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -4188,34 +4700,22 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" -dependencies = [ - "pem", - "ring", - "time", - "x509-parser 0.13.2", - "yasna", -] - -[[package]] -name = "rcgen" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", + "x509-parser", "yasna", ] [[package]] name = "redis" -version = "0.23.0" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea8c51b5dc1d8e5fd3350ec8167f464ec0995e79f2e90a075b63371500d557f" +checksum = "4f49cdc0bb3f412bf8e7d1bd90fe1d9eb10bc5c399ba90973c14662a27b3f8ba" dependencies = [ "async-trait", "bytes", @@ -4223,7 +4723,7 @@ dependencies = [ "futures-util", "itoa", "percent-encoding", - "pin-project-lite 0.2.9", + "pin-project-lite", "ryu", "tokio", "tokio-util", @@ -4236,7 +4736,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -4245,25 +4745,72 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom 0.2.11", + "redox_syscall 0.2.16", + "thiserror", ] [[package]] name = "regex" -version = "1.8.1" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick 1.0.2", + "memchr", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ - "aho-corasick", + "aho-corasick 1.0.2", "memchr", - "regex-syntax", + "regex-syntax 0.8.2", ] [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relay-server-example" @@ -4271,10 +4818,11 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "clap 4.2.7", - "env_logger 0.10.0", + "clap", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4283,11 +4831,49 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "reqwest" +version = "0.11.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +dependencies = [ + "base64 0.21.5", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.3.20", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", + "hyper-tls", + "ipnet", + "js-sys", "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "system-configuration", "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", ] [[package]] @@ -4300,17 +4886,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4330,26 +4905,40 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom 0.2.11", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "ringbuf" -version = "0.2.8" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65af18d50f789e74aaf23bbb3f65dcd22a3cb6e029b5bced149f6bd57c5c2a2" +checksum = "79abed428d1fd2a128201cec72c5f6938e2da607c6f3745f769fabea399d950a" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] name = "rmp" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44519172358fd6d58656c86ab8e7fbc9e1490c3e8f14d35ed78ca0dd07403c9f" +checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" dependencies = [ "byteorder", "num-traits", @@ -4358,9 +4947,9 @@ dependencies = [ [[package]] name = "rmp-serde" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b13be192e0220b8afb7222aa5813cb62cc269ebb5cac346ca6487681d2913e" +checksum = "bffea85eea980d8a74453e5d02a8d93028f3c34725de143085a844ebe953258a" dependencies = [ "byteorder", "rmp", @@ -4369,9 +4958,9 @@ dependencies = [ [[package]] name = "rtcp" -version = "0.7.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1919efd6d4a6a85d13388f9487549bb8e359f17198cc03ffd72f79b553873691" +checksum = "3677908cadfbecb4cc1da9a56a32524fae4ebdfa7c2ea93886e1b1e846488cb9" dependencies = [ "bytes", "thiserror", @@ -4389,18 +4978,17 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix", + "nix 0.24.3", "thiserror", "tokio", ] [[package]] name = "rtp" -version = "0.6.8" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a095411ff00eed7b12e4c6a118ba984d113e1079582570d56a5ee723f11f80" +checksum = "e60482acbe8afb31edf6b1413103b7bca7a65004c423b3c3993749a083994fbe" dependencies = [ - "async-trait", "bytes", "rand 0.8.5", "serde", @@ -4408,6 +4996,48 @@ dependencies = [ "webrtc-util", ] +[[package]] +name = "rust-embed" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "810294a8a4a0853d4118e3b94bb079905f2107c7fe979d8f0faae98765eb6378" +dependencies = [ + "rust-embed-impl", + "rust-embed-utils", + "walkdir", +] + +[[package]] +name = "rust-embed-impl" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfc144a1273124a67b8c1d7cd19f5695d1878b31569c0512f6086f0f4676604e" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "shellexpand", + "syn 2.0.43", + "walkdir", +] + +[[package]] +name = "rust-embed-utils" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "816ccd4875431253d6bb54b804bcff4369cbde9bae33defde25fdf6c2ef91d40" +dependencies = [ + "globset", + "sha2 0.10.8", + "walkdir", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -4434,29 +5064,29 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.3" +version = "0.37.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b24138615de35e32031d041a09032ef3487a616d901ca4db224e7d557efae2" +checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno", "io-lifetimes", "libc", - "linux-raw-sys", - "windows-sys 0.45.0", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", ] [[package]] -name = "rustls" -version = "0.19.1" +name = "rustix" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "base64 0.13.1", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys 0.4.10", + "windows-sys 0.48.0", ] [[package]] @@ -4466,30 +5096,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", - "ring", - "sct 0.7.0", - "webpki 0.22.0", + "ring 0.16.20", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.21.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +dependencies = [ + "log", + "ring 0.17.5", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.0", + "base64 0.21.5", ] [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -4502,9 +5162,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "salsa20" @@ -4512,7 +5172,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" dependencies = [ - "cipher 0.4.3", + "cipher", ] [[package]] @@ -4525,20 +5185,25 @@ dependencies = [ ] [[package]] -name = "scopeguard" -version = "1.1.0" +name = "schannel" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys 0.48.0", +] [[package]] -name = "sct" -version = "0.6.1" +name = "scoped-tls" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -4546,15 +5211,15 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] name = "sdp" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d22a5ef407871893fd72b4562ee15e4742269b173959db4b8df6f538c414e13" +checksum = "4653054c30ebce63658762eb0d64e27673868a95564474811ae6c220cf767640" dependencies = [ "rand 0.8.5", "substring", @@ -4564,79 +5229,122 @@ dependencies = [ [[package]] name = "sec1" -version = "0.3.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", + "base16ct", + "der", "generic-array", - "pkcs8 0.9.0", + "pkcs8", "subtle", "zeroize", ] [[package]] -name = "sec1" -version = "0.7.1" +name = "security-framework" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48518a2b5775ba8ca5b46596aae011caa431e6ce7e4a67ead66d92f08884220e" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "base16ct 0.2.0", - "der 0.7.5", - "generic-array", - "pkcs8 0.10.2", - "subtle", - "zeroize", + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" + +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" +dependencies = [ + "futures-core", ] [[package]] -name = "semver" -version = "1.0.16" +name = "serde" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +dependencies = [ + "serde_derive", +] [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "serde_derive" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.43", +] [[package]] -name = "serde" -version = "1.0.160" +name = "serde_json" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ - "serde_derive", + "indexmap 2.0.0", + "itoa", + "ryu", + "serde", ] [[package]] -name = "serde-big-array" -version = "0.3.3" +name = "serde_path_to_error" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ + "itoa", "serde", ] [[package]] -name = "serde_derive" -version = "1.0.160" +name = "serde_repr" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", ] [[package]] -name = "serde_json" -version = "1.0.96" +name = "serde_urlencoded" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ + "form_urlencoded", "itoa", "ryu", "serde", @@ -4655,6 +5363,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha2" version = "0.9.9" @@ -4670,78 +5389,86 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] name = "sha3" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] [[package]] -name = "signal-hook" -version = "0.3.14" +name = "sharded-slab" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ - "libc", - "signal-hook-registry", + "lazy_static", ] [[package]] -name = "signal-hook-registry" -version = "1.4.0" +name = "shellexpand" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da03fa3b94cc19e3ebfc88c4229c49d8f08cdbd1228870a45f0ffdf84988e14b" +dependencies = [ + "dirs", +] + +[[package]] +name = "signal-hook" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", + "signal-hook-registry", ] [[package]] -name = "signature" -version = "1.6.4" +name = "signal-hook-registry" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ - "digest 0.10.6", - "rand_core 0.6.4", + "libc", ] [[package]] name = "signature" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "smol" @@ -4752,28 +5479,37 @@ dependencies = [ "async-channel", "async-executor", "async-fs", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.7.0", "async-net", "async-process", "blocking", - "futures-lite", + "futures-lite 1.13.0", +] + +[[package]] +name = "smol_str" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74212e6bbe9a4352329b2f68ba3130c15a3f26fe88ff22dbdc6cdd58fa85e99c" +dependencies = [ + "serde", ] [[package]] name = "snow" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" dependencies = [ - "aes-gcm 0.9.4", + "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek", "rand_core 0.6.4", - "ring", + "ring 0.17.5", "rustc_version", - "sha2 0.10.6", + "sha2 0.10.8", "subtle", ] @@ -4789,9 +5525,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d283f86695ae989d1e18440a943880967156325ba025f05049946bff47bcc2b" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -4805,7 +5541,6 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", "bytes", - "flate2", "futures", "httparse", "log", @@ -4820,14 +5555,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "spki" -version = "0.6.0" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" @@ -4836,7 +5567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.5", + "der", ] [[package]] @@ -4846,10 +5577,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] -name = "strsim" -version = "0.9.3" +name = "stringmatch" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" +checksum = "6aadc0801d92f0cdc26127c67c4b8766284f52a5ba22894f285e3101fa57d05d" +dependencies = [ + "regex", +] [[package]] name = "strsim" @@ -4859,16 +5593,16 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "stun" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e94b1ec00bad60e6410e058b52f1c66de3dc5fe4d62d09b3e52bb7d3b73e25" +checksum = "7beb1624a3ea34778d58d30e2b8606b4d29fe65e87c4d50b87ed30afd5c3830c" dependencies = [ - "base64 0.13.1", + "base64 0.21.5", "crc", "lazy_static", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "subtle", "thiserror", "tokio", @@ -4887,9 +5621,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -4904,15 +5638,21 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "ee659fb5f3d355364e1f3e5bc10fb82068efbf824a1e9d1c9504244a6469ad53" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "synstructure" version = "0.12.6" @@ -4925,13 +5665,28 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sysinfo" +version = "0.29.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi", +] + [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] @@ -4948,15 +5703,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.5.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand", - "redox_syscall 0.3.5", - "rustix", - "windows-sys 0.45.0", + "fastrand 2.0.0", + "redox_syscall 0.4.1", + "rustix 0.38.21", + "windows-sys 0.48.0", ] [[package]] @@ -4969,36 +5724,78 @@ dependencies = [ ] [[package]] -name = "textwrap" -version = "0.16.0" +name = "thirtyfour" +version = "0.32.0-rc.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf0fe180d5f1f7dd32bb5f1a8d19231bb63dc9bbb1985e1dbb6f07163b6a8578" +dependencies = [ + "async-trait", + "base64 0.21.5", + "cookie", + "fantoccini", + "futures", + "http 0.2.9", + "indexmap 1.9.3", + "log", + "parking_lot", + "paste", + "serde", + "serde_json", + "serde_repr", + "stringmatch", + "thirtyfour-macros", + "thiserror", + "tokio", + "url", +] + +[[package]] +name = "thirtyfour-macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" +checksum = "9cae91d1c7c61ec65817f1064954640ee350a50ae6548ff9a1bdd2489d6ffbb0" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", ] [[package]] name = "time" -version = "0.3.17" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", "serde", @@ -5008,15 +5805,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.6" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] @@ -5042,38 +5839,58 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c786bf8134e5a3a166db9b29ab8f48134739014a3eca7bc6bfa95d673b136f" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite 0.2.9", + "parking_lot", + "pin-project-lite", "signal-hook-registry", - "socket2 0.4.9", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.43", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -5084,33 +5901,114 @@ checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls 0.20.8", "tokio", - "webpki 0.22.0", + "webpki", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", ] [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.9", + "pin-project-lite", "tokio", "tracing", ] [[package]] -name = "toml" -version = "0.5.11" +name = "tonic" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ - "serde", + "async-trait", + "axum 0.6.20", + "base64 0.21.5", + "bytes", + "futures-core", + "futures-util", + "h2 0.3.20", + "http 0.2.9", + "http-body 0.4.5", + "hyper 0.14.27", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09e12e6351354851911bdf8c2b8f2ab15050c567d70a8b9a37ae7b8301a4080d" +dependencies = [ + "bitflags 2.4.1", + "bytes", + "futures-util", + "http 1.0.0", + "http-body 1.0.0", + "http-body-util", + "http-range-header", + "httpdate", + "mime", + "mime_guess", + "percent-encoding", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", ] +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -5119,91 +6017,102 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", - "pin-project-lite 0.2.9", + "log", + "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", + "valuable", ] [[package]] -name = "trust-dns-proto" -version = "0.22.0" +name = "tracing-log" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ - "async-trait", - "bytes", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "h2", - "http", - "idna 0.2.3", - "ipnet", "lazy_static", - "rand 0.8.5", - "rustls 0.20.8", - "rustls-pemfile", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", "smallvec", - "socket2 0.4.9", - "thiserror", - "tinyvec", - "tokio", - "tokio-rustls", "tracing", - "url", - "webpki 0.22.0", - "webpki-roots 0.22.6", + "tracing-core", + "tracing-log 0.1.3", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log 0.2.0", ] [[package]] -name = "trust-dns-resolver" -version = "0.22.0" +name = "tracing-wasm" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "lru-cache", - "parking_lot 0.12.1", - "resolv-conf", - "rustls 0.20.8", - "smallvec", - "thiserror", - "tokio", - "tokio-rustls", "tracing", - "trust-dns-proto", - "webpki-roots 0.22.6", + "tracing-subscriber", + "wasm-bindgen", ] [[package]] @@ -5212,19 +6121,34 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +[[package]] +name = "trybuild" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8419ecd263363827c5730386f418715766f584e2f874d32c23c5b00bd9727e7e" +dependencies = [ + "basic-toml", + "glob", + "once_cell", + "serde", + "serde_derive", + "serde_json", + "termcolor", +] + [[package]] name = "turn" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4712ee30d123ec7ae26d1e1b218395a16c87cdbaf4b3925d170d684af62ea5e8" +checksum = "58f4fcb97da0426e8146fe0e9b78cc13120161087256198701d12d9df77f7701" dependencies = [ "async-trait", - "base64 0.13.1", + "base64 0.21.5", "futures", "log", "md-5", "rand 0.8.5", - "ring", + "ring 0.16.20", "stun", "thiserror", "tokio", @@ -5249,17 +6173,26 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -5270,6 +6203,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-xid" version = "0.2.4" @@ -5278,19 +6217,25 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "generic-array", + "crypto-common", "subtle", ] [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" dependencies = [ "asynchronous-codec", "bytes", @@ -5302,17 +6247,39 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "upnp-example" +version = "0.1.0" +dependencies = [ + "futures", + "libp2p", + "tokio", + "tracing-subscriber", +] + [[package]] name = "url" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.5.0", "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" @@ -5321,22 +6288,24 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.2.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.11", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "value-bag" -version = "1.0.0-alpha.9" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55" -dependencies = [ - "ctor", - "version_check", -] +checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" [[package]] name = "vcpkg" @@ -5373,22 +6342,20 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -5406,9 +6373,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5416,24 +6383,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" dependencies = [ "cfg-if", "js-sys", @@ -5443,9 +6410,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5453,111 +6420,131 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.43", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] -name = "wasm-timer" -version = "0.2.5" +name = "wasm-bindgen-test" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +checksum = "2cf9242c0d27999b831eae4767b2a146feb0b27d332d553e605864acd2afd403" dependencies = [ - "futures", + "console_error_panic_hook", "js-sys", - "parking_lot 0.11.2", - "pin-utils", + "scoped-tls", "wasm-bindgen", "wasm-bindgen-futures", - "web-sys", + "wasm-bindgen-test-macro", ] [[package]] -name = "web-sys" -version = "0.3.60" +name = "wasm-bindgen-test-macro" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "794645f5408c9a039fd09f4d113cdfb2e7eba5ff1956b07bcf701cf4b394fe89" dependencies = [ - "js-sys", - "wasm-bindgen", + "proc-macro2", + "quote", + "syn 2.0.43", ] [[package]] -name = "webpki" -version = "0.21.4" +name = "wasm-logger" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "074649a66bb306c8f2068c9016395fa65d8e08d2affcbf95acf3c24c3ab19718" dependencies = [ - "ring", - "untrusted", + "log", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "webpki" -version = "0.22.0" +name = "web-sys" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ - "ring", - "untrusted", + "js-sys", + "wasm-bindgen", ] [[package]] -name = "webpki-roots" -version = "0.22.6" +name = "webdriver" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +checksum = "9973cb72c8587d5ad5efdb91e663d36177dc37725e6c90ca86c626b0cc45c93f" dependencies = [ - "webpki 0.22.0", + "base64 0.13.1", + "bytes", + "cookie", + "http 0.2.9", + "log", + "serde", + "serde_derive", + "serde_json", + "time", + "unicode-segmentation", + "url", ] [[package]] -name = "webpki-roots" -version = "0.23.0" +name = "webpki" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa54963694b65584e170cf5dc46aeb4dcaa5584e652ff5f3952e56d66aff0125" +checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" dependencies = [ - "rustls-webpki", + "ring 0.16.20", + "untrusted 0.7.1", ] +[[package]] +name = "webpki-roots" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" + [[package]] name = "webrtc" -version = "0.6.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3bc9049bdb2cea52f5fd4f6f728184225bdb867ed0dc2410eab6df5bdd67bb" +checksum = "d91e7cf018f7185552bf6a5dd839f4ed9827aea33b746763c9a215f84a0d0b34" dependencies = [ "arc-swap", "async-trait", "bytes", + "cfg-if", "hex", "interceptor", "lazy_static", "log", "pem", "rand 0.8.5", - "rcgen 0.9.3", + "rcgen", "regex", - "ring", + "ring 0.16.20", "rtcp", "rtp", - "rustls 0.19.1", + "rustls 0.21.9", "sdp", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.8", + "smol_str", "stun", "thiserror", "time", @@ -5577,12 +6564,11 @@ dependencies = [ [[package]] name = "webrtc-data" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef36a4d12baa6e842582fe9ec16a57184ba35e1a09308307b67d43ec8883100" +checksum = "a45d2461d0e0bf93f181e30eb0b40df32b8bf3efb89c53cebb1990e603e2067d" dependencies = [ "bytes", - "derive_builder", "log", "thiserror", "tokio", @@ -5592,51 +6578,46 @@ dependencies = [ [[package]] name = "webrtc-dtls" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7021987ae0a2ed6c8cd33f68e98e49bb6e74ffe9543310267b48a1bbe3900e5f" +checksum = "32b140b953f986e97828aa33ec6318186b05d862bee689efbc57af04a243e832" dependencies = [ - "aes 0.6.0", - "aes-gcm 0.8.0", + "aes", + "aes-gcm", "async-trait", "bincode", - "block-modes", "byteorder", + "cbc", "ccm", - "curve25519-dalek 3.2.0", - "der-parser 8.1.0", - "elliptic-curve 0.12.3", + "der-parser", "hkdf", - "hmac 0.10.1", + "hmac 0.12.1", "log", - "oid-registry 0.6.1", - "p256 0.11.1", + "p256", "p384", "pem", "rand 0.8.5", "rand_core 0.6.4", - "rcgen 0.9.3", - "ring", - "rustls 0.19.1", - "sec1 0.3.0", + "rcgen", + "ring 0.16.20", + "rustls 0.21.9", + "sec1", "serde", - "sha-1", - "sha2 0.9.9", - "signature 1.6.4", + "sha1", + "sha2 0.10.8", "subtle", "thiserror", "tokio", - "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", - "x509-parser 0.13.2", + "x25519-dalek", + "x509-parser", ] [[package]] name = "webrtc-ice" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494483fbb2f5492620871fdc78b084aed8807377f6e3fe88b2e49f0a9c9c41d7" +checksum = "66eb4b85646f1c52225779db3e1e7e873dede6db68cc9be080b648f1713083a3" dependencies = [ "arc-swap", "async-trait", @@ -5658,12 +6639,12 @@ dependencies = [ [[package]] name = "webrtc-mdns" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" +checksum = "62bebbd40e7f8b630a0f1a74783dbfff1edfc0ccaae891c4689891156a8c4d8c" dependencies = [ "log", - "socket2 0.4.9", + "socket2 0.5.5", "thiserror", "tokio", "webrtc-util", @@ -5671,25 +6652,22 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +checksum = "1cfde3c7b9450b67d466bb2f02c6d9ff9514d33535eb9994942afd1f828839d1" dependencies = [ "byteorder", "bytes", - "derive_builder", - "displaydoc", "rand 0.8.5", "rtp", "thiserror", - "webrtc-util", ] [[package]] name = "webrtc-sctp" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d47adcd9427eb3ede33d5a7f3424038f63c965491beafcc20bc650a2f6679c0" +checksum = "1af6116b7f9703560c3ad0b32f67220b171bb1b59633b03563db8404d0e482ea" dependencies = [ "arc-swap", "async-trait", @@ -5704,22 +6682,21 @@ dependencies = [ [[package]] name = "webrtc-srtp" -version = "0.9.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6183edc4c1c6c0175f8812eefdce84dfa0aea9c3ece71c2bf6ddd3c964de3da5" +checksum = "c1db1f36c1c81e4b1e531c0b9678ba0c93809e196ce62122d87259bb71c03b9f" dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "aes-gcm 0.9.4", - "async-trait", + "aead", + "aes", + "aes-gcm", "byteorder", "bytes", - "ctr 0.8.0", - "hmac 0.11.0", + "ctr", + "hmac 0.12.1", "log", "rtcp", "rtp", - "sha-1", + "sha1", "subtle", "thiserror", "tokio", @@ -5728,19 +6705,19 @@ dependencies = [ [[package]] name = "webrtc-util" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f1db1727772c05cf7a2cfece52c3aca8045ca1e176cd517d323489aa3c6d87" +checksum = "1adc96bee68417e1f4d19dd7698124a7f859db55ae2fd3eedbbb7e732f614735" dependencies = [ "async-trait", - "bitflags", + "bitflags 1.3.2", "bytes", "cc", "ipnet", "lazy_static", "libc", "log", - "nix", + "nix 0.26.4", "rand 0.8.5", "thiserror", "tokio", @@ -5748,19 +6725,28 @@ dependencies = [ ] [[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" +name = "webtransport-tests" +version = "0.1.0" dependencies = [ - "cc", + "futures", + "getrandom 0.2.11", + "libp2p-core", + "libp2p-identity", + "libp2p-noise", + "libp2p-webtransport-websys", + "multiaddr", + "multihash", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test", + "web-sys", ] [[package]] name = "widestring" -version = "0.5.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" @@ -5795,271 +6781,237 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows-core", + "windows-targets 0.48.5", ] [[package]] -name = "windows-sys" -version = "0.42.0" +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows_aarch64_gnullvm 0.42.1", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm 0.42.1", - "windows_x86_64_msvc 0.42.1", + "windows-targets 0.48.5", ] [[package]] name = "windows-sys" -version = "0.45.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.42.1", + "windows-targets 0.48.5", ] [[package]] name = "windows-sys" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.52.0", ] [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.42.1", - "windows_aarch64_msvc 0.42.1", - "windows_i686_gnu 0.42.1", - "windows_i686_msvc 0.42.1", - "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm 0.42.1", - "windows_x86_64_msvc 0.42.1", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" - -[[package]] -name = "windows_i686_gnu" -version = "0.34.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" - -[[package]] -name = "windows_i686_msvc" -version = "0.34.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winreg" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" -dependencies = [ - "winapi", -] - -[[package]] -name = "x25519-dalek" -version = "1.1.1" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "curve25519-dalek 3.2.0", - "rand_core 0.5.1", - "zeroize", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek", "rand_core 0.6.4", + "serde", "zeroize", ] [[package]] name = "x509-parser" -version = "0.13.2" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9bace5b5589ffead1afb76e43e34cff39cd0f3ce7e170ae0c29e53b88eb1c" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs 0.3.1", - "base64 0.13.1", + "asn1-rs", "data-encoding", - "der-parser 7.0.0", + "der-parser", "lazy_static", "nom", - "oid-registry 0.4.0", - "ring", + "oid-registry", + "ring 0.16.20", "rusticata-macros", "thiserror", "time", ] [[package]] -name = "x509-parser" -version = "0.15.0" +name = "xml-rs" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab0c2f54ae1d92f4fcb99c0b7ccf0b1e3451cbd395e5f115ccbdbcb18d4f634" +checksum = "1eee6bf5926be7cf998d7381a9a23d833fd493f6a8034658a9505a4dc4b20444" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" dependencies = [ - "asn1-rs 0.5.1", - "data-encoding", - "der-parser 8.1.0", - "lazy_static", - "nom", - "oid-registry 0.6.1", - "rusticata-macros", - "thiserror", - "time", + "xml-rs", ] [[package]] name = "yamux" -version = "0.10.2" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot", + "pin-project", + "rand 0.8.5", + "static_assertions", +] + +[[package]] +name = "yamux" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +checksum = "ad1d0148b89300047e72994bee99ecdabd15a9166a7b70c8b8c37c314dcc9002" dependencies = [ "futures", + "instant", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot", + "pin-project", "rand 0.8.5", "static_assertions", ] @@ -6075,21 +7027,20 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", - "synstructure", + "syn 2.0.43", ] diff --git a/Cargo.toml b/Cargo.toml index 8400238368d..d10ed7e3bbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,9 @@ [workspace] members = [ "core", - "examples/chat-example", "examples/autonat", + "examples/browser-webrtc", + "examples/chat", "examples/dcutr", "examples/distributed-key-value-store", "examples/file-sharing", @@ -10,19 +11,25 @@ members = [ "examples/ipfs-kad", "examples/ipfs-private", "examples/metrics", - "examples/ping-example", + "examples/ping", "examples/relay-server", "examples/rendezvous", + "examples/upnp", + "hole-punching-tests", "identity", "interop-tests", "misc/allow-block-list", "misc/connection-limits", + "misc/futures-bounded", "misc/keygen", + "misc/memory-connection-limits", "misc/metrics", "misc/multistream-select", "misc/quick-protobuf-codec", "misc/quickcheck-ext", "misc/rw-stream-sink", + "misc/server", + "misc/webrtc-utils", "muxers/mplex", "muxers/test-harness", "muxers/yamux", @@ -38,10 +45,10 @@ members = [ "protocols/relay", "protocols/rendezvous", "protocols/request-response", + "protocols/upnp", "swarm", "swarm-derive", "swarm-test", - "transports/deflate", "transports/dns", "transports/noise", "transports/plaintext", @@ -50,52 +57,88 @@ members = [ "transports/tcp", "transports/tls", "transports/uds", - "transports/wasm-ext", "transports/webrtc", + "transports/webrtc-websys", "transports/websocket", + "transports/webtransport-websys", + "transports/websocket-websys", + "wasm-tests/webtransport-tests", ] resolver = "2" [workspace.package] -rust-version = "1.65.0" +rust-version = "1.73.0" [workspace.dependencies] -libp2p-allow-block-list = { version = "0.2.0", path = "misc/allow-block-list" } -libp2p-autonat = { version = "0.11.0", path = "protocols/autonat" } -libp2p-connection-limits = { version = "0.2.0", path = "misc/connection-limits" } -libp2p-core = { version = "0.40.0", path = "core" } -libp2p-dcutr = { version = "0.10.0", path = "protocols/dcutr" } -libp2p-deflate = { version = "0.40.0", path = "transports/deflate" } -libp2p-dns = { version = "0.40.0", path = "transports/dns" } -libp2p-floodsub = { version = "0.43.0", path = "protocols/floodsub" } -libp2p-gossipsub = { version = "0.45.0", path = "protocols/gossipsub" } -libp2p-identify = { version = "0.43.0", path = "protocols/identify" } -libp2p-identity = { version = "0.2.0", path = "identity" } -libp2p-kad = { version = "0.44.0", path = "protocols/kad" } -libp2p-mdns = { version = "0.44.0", path = "protocols/mdns" } -libp2p-metrics = { version = "0.13.0", path = "misc/metrics" } -libp2p-mplex = { version = "0.40.0", path = "muxers/mplex" } -libp2p-muxer-test-harness = { version = "0.1.0", path = "muxers/test-harness" } -libp2p-noise = { version = "0.43.0", path = "transports/noise" } -libp2p-perf = { version = "0.2.0", path = "protocols/perf" } -libp2p-ping = { version = "0.43.0", path = "protocols/ping" } -libp2p-plaintext = { version = "0.40.0", path = "transports/plaintext" } -libp2p-pnet = { version = "0.23.0", path = "transports/pnet" } -libp2p-quic = { version = "0.8.0-alpha", path = "transports/quic" } -libp2p-relay = { version = "0.16.0", path = "protocols/relay" } -libp2p-rendezvous = { version = "0.13.0", path = "protocols/rendezvous" } -libp2p-request-response = { version = "0.25.0", path = "protocols/request-response" } -libp2p-swarm = { version = "0.43.0", path = "swarm" } -libp2p-swarm-derive = { version = "0.33.0", path = "swarm-derive" } -libp2p-swarm-test = { version = "0.2.0", path = "swarm-test" } -libp2p-tcp = { version = "0.40.0", path = "transports/tcp" } -libp2p-tls = { version = "0.2.0", path = "transports/tls" } -libp2p-uds = { version = "0.39.0", path = "transports/uds" } -libp2p-wasm-ext = { version = "0.40.0", path = "transports/wasm-ext" } -libp2p-webrtc = { version = "0.5.0-alpha", path = "transports/webrtc" } -libp2p-websocket = { version = "0.42.0", path = "transports/websocket" } -libp2p-yamux = { version = "0.44.0", path = "muxers/yamux" } +asynchronous-codec = { version = "0.7.0" } +futures-bounded = { version = "0.2.3", path = "misc/futures-bounded" } +libp2p = { version = "0.53.2", path = "libp2p" } +libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } +libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } +libp2p-connection-limits = { version = "0.3.1", path = "misc/connection-limits" } +libp2p-core = { version = "0.41.2", path = "core" } +libp2p-dcutr = { version = "0.11.0", path = "protocols/dcutr" } +libp2p-dns = { version = "0.41.1", path = "transports/dns" } +libp2p-floodsub = { version = "0.44.0", path = "protocols/floodsub" } +libp2p-gossipsub = { version = "0.46.1", path = "protocols/gossipsub" } +libp2p-identify = { version = "0.44.1", path = "protocols/identify" } +libp2p-identity = { version = "0.2.8" } +libp2p-kad = { version = "0.45.3", path = "protocols/kad" } +libp2p-mdns = { version = "0.45.1", path = "protocols/mdns" } +libp2p-memory-connection-limits = { version = "0.2.0", path = "misc/memory-connection-limits" } +libp2p-metrics = { version = "0.14.1", path = "misc/metrics" } +libp2p-mplex = { version = "0.41.0", path = "muxers/mplex" } +libp2p-muxer-test-harness = { path = "muxers/test-harness" } +libp2p-noise = { version = "0.44.0", path = "transports/noise" } +libp2p-perf = { version = "0.3.0", path = "protocols/perf" } +libp2p-ping = { version = "0.44.0", path = "protocols/ping" } +libp2p-plaintext = { version = "0.41.0", path = "transports/plaintext" } +libp2p-pnet = { version = "0.24.0", path = "transports/pnet" } +libp2p-quic = { version = "0.10.2", path = "transports/quic" } +libp2p-relay = { version = "0.17.1", path = "protocols/relay" } +libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } +libp2p-request-response = { version = "0.26.1", path = "protocols/request-response" } +libp2p-server = { version = "0.12.5", path = "misc/server" } +libp2p-swarm = { version = "0.44.1", path = "swarm" } +libp2p-swarm-derive = { version = "=0.34.2", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. +libp2p-swarm-test = { version = "0.3.0", path = "swarm-test" } +libp2p-tcp = { version = "0.41.0", path = "transports/tcp" } +libp2p-tls = { version = "0.3.0", path = "transports/tls" } +libp2p-uds = { version = "0.40.0", path = "transports/uds" } +libp2p-upnp = { version = "0.2.0", path = "protocols/upnp" } +libp2p-webrtc = { version = "0.7.0-alpha", path = "transports/webrtc" } +libp2p-webrtc-utils = { version = "0.2.0", path = "misc/webrtc-utils" } +libp2p-webrtc-websys = { version = "0.3.0-alpha", path = "transports/webrtc-websys" } +libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } +libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } +libp2p-webtransport-websys = { version = "0.2.0", path = "transports/webtransport-websys" } +libp2p-yamux = { version = "0.45.1", path = "muxers/yamux" } +multiaddr = "0.18.1" +multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } -quick-protobuf-codec = { version = "0.2.0", path = "misc/quick-protobuf-codec" } +prometheus-client = "0.22.0" +quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } +unsigned-varint = { version = "0.8.0" } + +[patch.crates-io] + +# Patch away `libp2p-identity` in our dependency tree with the workspace version. +# `libp2p-identity` is a leaf dependency and used within `rust-multiaddr` which is **not** part of the workspace. +# As a result, we cannot just reference the workspace version in our crates because the types would mismatch with what +# we import via `rust-multiaddr`. +# This is expected to stay here until we move `libp2p-identity` to a separate repository which makes the dependency relationship more obvious. +libp2p-identity = { path = "identity" } + +[workspace.lints] +rust.unreachable_pub = "warn" +clippy.used_underscore_binding = "warn" +clippy.pedantic = "allow" +clippy.type_complexity = "allow" +clippy.unnecessary_wraps = "warn" +clippy.manual_let_else = "warn" +clippy.dbg_macro = "warn" + +[workspace.metadata.release] +pre-release-hook = ["/bin/sh", '-c', '/bin/sh $WORKSPACE_ROOT/scripts/add-changelog-header.sh'] # Nested use of shell to expand variables. diff --git a/README.md b/README.md index c5db804204e..48fa976635a 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,8 @@ This repository is the central place for Rust development of the [libp2p](https: many protocols in this repository. - For **security related issues** please [file a private security vulnerability - report](https://github.com/libp2p/rust-libp2p/security/advisories/new) - or reach out to [security@libp2p.io](mailto:security@libp2p.io). Please do not - file a public issue on GitHub. + report](https://github.com/libp2p/rust-libp2p/security/advisories/new) . Please do not file a + public issue on GitHub. - To **report bugs, suggest improvements or request new features** please open a GitHub issue on this repository. @@ -29,8 +28,9 @@ This repository is the central place for Rust development of the [libp2p](https: - For **discussions and questions related to multiple libp2p implementations** please use the libp2p _Discourse_ forum https://discuss.libp2p.io. -- For general project updates and discussions join the [biweekly libp2p Community - Calls](https://discuss.libp2p.io/t/libp2p-community-calls/1157). +- For synchronous discussions join the [open rust-libp2p maintainer + calls](https://github.com/libp2p/rust-libp2p/discussions?discussions_q=open+maintainers+call+) + or the [biweekly libp2p community calls](https://discuss.libp2p.io/t/libp2p-community-calls/1157). ## Repository Structure @@ -41,7 +41,7 @@ The main components of this repository are structured as follows: * `transports/`: Implementations of transport protocols (e.g. TCP) and protocol upgrades (e.g. for authenticated encryption, compression, ...) based on the `libp2p-core` `Transport` - API . + API. * `muxers/`: Implementations of the `StreamMuxer` interface of `libp2p-core`, e.g. (sub)stream multiplexing protocols on top of (typically TCP) connections. @@ -90,14 +90,16 @@ Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - [Forest](https://github.com/ChainSafe/forest) - An implementation of Filecoin written in Rust. - [fuel-core](https://github.com/FuelLabs/fuel-core) - A Rust implementation of the Fuel protocol. - [HotShot](https://github.com/EspressoSystems/HotShot) - Decentralized sequencer in Rust developed by [Espresso Systems](https://www.espressosys.com/). -- [ipfs-embed](https://github.com/ipfs-rust/ipfs-embed) - A small embeddable ipfs implementation -used and maintained by [Actyx](https://www.actyx.com). -- [iroh](https://github.com/n0-computer/iroh) - Next-generation implementation of IPFS for Cloud & Mobile platforms. +- [ipfs-embed](https://github.com/ipfs-rust/ipfs-embed) - A small embeddable ipfs implementation used and maintained by [Actyx](https://www.actyx.com). +- [Homestar](https://github.com/ipvm-wg/homestar) - An InterPlanetary Virtual Machine (IPVM) implementation used and maintained by Fission. +- [beetle](https://github.com/n0-computer/beetle) - Next-generation implementation of IPFS for Cloud & Mobile platforms. - [Lighthouse](https://github.com/sigp/lighthouse) - Ethereum consensus client in Rust. - [Locutus](https://github.com/freenet/locutus) - Global, observable, decentralized key-value store. +- [OpenMina](https://github.com/openmina/openmina) - In-browser Mina Rust implementation. - [rust-ipfs](https://github.com/rs-ipfs/rust-ipfs) - IPFS implementation in Rust. +- [Safe Network](https://github.com/maidsafe/safe_network) - Safe Network implementation in Rust. - [Starcoin](https://github.com/starcoinorg/starcoin) - A smart contract blockchain network that scales by layering. - [Subspace](https://github.com/subspace/subspace) - Subspace Network reference implementation - [Substrate](https://github.com/paritytech/substrate) - Framework for blockchain innovation, used by [Polkadot](https://www.parity.io/technologies/polkadot/). -- [Ursa](https://github.com/fleek-network/ursa) - Decentralized Content Delivery & Edge Compute. +- [Taple](https://github.com/opencanarias/taple-core) - Sustainable DLT for asset and process traceability by [OpenCanarias](https://www.opencanarias.com/en/). diff --git a/ROADMAP.md b/ROADMAP.md index a2ba7cea3ee..0d422a6d385 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,94 +1,84 @@ # rust-libp2p Roadmap -Below is a high level roadmap for the rust-libp2p project. Items are ordered by priority (high to -low). +Below is a high level roadmap for the rust-libp2p project. +Items are ordered by priority (high to low). -This is a living document. Input is always welcome e.g. via GitHub issues or pull requests. +This is a living document. +Input is always welcome e.g. via GitHub issues or pull requests. -This is the roadmap of the Rust implementation of libp2p. See also the [general libp2p project -roadmap](https://github.com/libp2p/specs/blob/master/ROADMAP.md). +This is the roadmap of the Rust implementation of libp2p. +See also the [general libp2p project roadmap](https://github.com/libp2p/specs/blob/master/ROADMAP.md). -## Cross Behaviour communication +## In the works -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|----------------------|--------|-------------------|---------------------------------------------------|---------------------------------------------------|-----------------------------------------------| -| Developer ergonomics | todo | Q1/2023 | https://github.com/libp2p/rust-libp2p/issues/2680 | https://github.com/libp2p/rust-libp2p/issues/2832 | [Kademlia client mode](#kademlia-client-mode) | +### WebTransport -Today `NetworkBehaviour` implementations like Kademlia, GossipSub or Circuit Relay v2 can not -communicate with each other, i.e. cannot make use of information known by another -`NetworkBehaviour` implementation. Users need to write the wiring code by hand to e.g. enable -Kademlia to learn protocols supported by a remote peer from Identify. - -This roadmap item contains exchanging standard information about remote peers (e.g. supported -protocols) between `NetworkBehaviour` implementations. +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|-----------------------------|--------|-------------------|---------------------------------------------------|------------------------------------|------------| +| Connectivity / optimization | todo | | https://github.com/libp2p/rust-libp2p/issues/2993 | [QUIC](#experimental-quic-support) | | -Long term we might consider a generic approach for `NetworkBehaviours` to exchange data. Though that -would deserve its own roadmap item. +A WebTransport implementation in rust-libp2p will enable browsers to connect to rust-libp2p server nodes where the latter only have a self-signed TLS certificate. +Compared to WebRTC, this would likely be more stable and performant. -## Kademlia client mode +### AutoNATv2 -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|--------------|--------|-------------------|---------------------------------------------------|-----------------------------------------------------------------|------------| -| Optimization | todo | Q1/2023 | https://github.com/libp2p/rust-libp2p/issues/2032 | [Cross behaviour communication](#cross-behaviour-communication) | | +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|----------|------------------|------------------| +| Connectivity | todo | Q4/2023 | | Address pipeline | Address pipeline | -Kademlia client mode will enhance routing table health and thus have a positive impact on all -Kademlia operations. +Implement the new AutoNAT v2 specification. +See https://github.com/libp2p/specs/pull/538. -## QUIC - evaluate and move to quinn +### Address pipeline -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| -| Connectivity | todo | Q2/2023 | https://github.com/libp2p/rust-libp2p/issues/2883 | | | +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|----------|--------------|------------| +| Connectivity | todo | Q4/2023 | | AutoNATv2 | AutoNATv2 | -We added alpha support for QUIC in Q4/2022 wrapping `quinn-proto`. Evaluate using `quinn` directly, replacing the wrapper. +Be smart on address prioritization. +go-libp2p made a lot of progress here. +Lots to learn. +See https://github.com/libp2p/go-libp2p/issues/2229 and https://github.com/libp2p/rust-libp2p/issues/1896#issuecomment-1537774383. -## Optimize Hole punching +### Optimize Hole punching | Category | Status | Target Completion | Tracking | Dependencies | Dependents | |--------------|--------|-------------------|----------|--------------|------------| -| Optimization | todo | Q2/2023 | | | | +| Optimization | todo | | | | | -We released hole punching support with [rust-libp2p -`v0.43.0`](https://github.com/libp2p/rust-libp2p/releases/tag/v0.43.0), see also -https://github.com/libp2p/rust-libp2p/issues/2052. We are currently collecting data via the -[punchr](https://github.com/dennis-tra/punchr) project on the hole punching success rate. See also -[call for -action](https://discuss.libp2p.io/t/decentralized-nat-hole-punching-measurement-campaign/1616) in -case you want to help. Based on this data we will likely find many optimizations we can do to our -hole punching stack. +We released hole punching support with [rust-libp2p `v0.43.0`](https://github.com/libp2p/rust-libp2p/releases/tag/v0.43.0), see also https://github.com/libp2p/rust-libp2p/issues/2052. +We are currently collecting data via the [punchr](https://github.com/dennis-tra/punchr) project on the hole punching success rate. +See also [call for action](https://discuss.libp2p.io/t/decentralized-nat-hole-punching-measurement-campaign/1616) in case you want to help. +Based on this data we will likely find many optimizations we can do to our hole punching stack. -## Improved Wasm support +### Improved Wasm support | Category | Status | Target Completion | Tracking | Dependencies | Dependents | |----------------------|--------|-------------------|---------------------------------------------------|--------------|----------------------------------------------| -| Developer ergonomics | todo | Q3/2023 | https://github.com/libp2p/rust-libp2p/issues/2617 | | [WebRTC](#webrtc-support-browser-to-browser) | +| Developer ergonomics | todo | | https://github.com/libp2p/rust-libp2p/issues/2617 | | [WebRTC](#webrtc-support-browser-to-browser) | The project supports Wasm already today, though the developer experience is cumbersome at best. -Properly supporting Wasm opens rust-libp2p to a whole new set of use-cases. I would love for this to -happen earlier. Though (a) I think we should prioritize improving existing functionality over new -functionality and (b) we don't have high demand for this feature from the community. (One could -argue that that demand follows this roadmap item and not the other way round.) +Properly supporting Wasm opens rust-libp2p to a whole new set of use-cases. +I would love for this to happen earlier. +Though (a) I think we should prioritize improving existing functionality over new functionality and (b) we don't have high demand for this feature from the community. +(One could argue that that demand follows this roadmap item and not the other way round.) -## WebRTC support (browser-to-browser) +### WebRTC in the browser via WASM | Category | Status | Target Completion | Tracking | Dependencies | Dependents | |--------------|--------|-------------------|--------------------------------------------|-------------------------------------------------------------------------------------------|------------| -| Connectivity | todo | Q3/2023 | https://github.com/libp2p/specs/issues/475 | [Improved WASM support](#improved-wasm-support), https://github.com/libp2p/specs/pull/497 | | +| Connectivity | In progress | | https://github.com/libp2p/specs/issues/475 | [Improved WASM support](#improved-wasm-support), https://github.com/libp2p/specs/pull/497 | https://github.com/libp2p/rust-libp2p/pull/4248 | +Use the browser's WebRTC stack to support [`/webrtc`](https://github.com/libp2p/specs/blob/master/webrtc/webrtc.md) and [`/webrtc-direct`](https://github.com/libp2p/specs/blob/master/webrtc/webrtc-direct.md) from within the browser using rust-libp2p compiled to WASM. +This makes rust-libp2p a truly end-to-end solution, enabling users to use rust-libp2p on both the client (browser) and server side. -Once WebRTC for browser-to-server is complete, we can begin work on **browser-to-browser** and complete the WebRTC connectivity story. -We need to improve rust-libp2p's WASM story first. +### Attempt to switch from webrtc-rs to str0m -## WebTransport - -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|-----------------------------|--------|-------------------|---------------------------------------------------|------------------------------------|------------| -| Connectivity / optimization | todo | | https://github.com/libp2p/rust-libp2p/issues/2993 | [QUIC](#experimental-quic-support) | | +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Connectivity | todo | | https://github.com/libp2p/rust-libp2p/issues/3659 | | | -A WebTransport implementation in rust-libp2p will enable browsers to connect to rust-libp2p nodes -where the latter only have a self-signed TLS certificate. Compared to WebRTC, this would likely be -more performant. It is dependent on QUIC support in rust-libp2p. Given that we will support WebRTC -(browser-to-server) this is not a high priority. +Reduce maintenance burden and reduce dependency footprint. ## Done @@ -98,24 +88,23 @@ more performant. It is dependent on QUIC support in rust-libp2p. Given that we w |--------------|--------|-------------------|---------------------------------------------------|------------------------------------------------|------------| | Connectivity | Done | Q4/2022 | https://github.com/libp2p/rust-libp2p/issues/2883 | https://github.com/libp2p/test-plans/issues/53 | | -QUIC has been on the roadmap for a long time. It enables various performance improvements as well as -higher hole punching success rates. We are close to finishing a first version with -https://github.com/libp2p/rust-libp2p/pull/2289. +QUIC has been on the roadmap for a long time. +It enables various performance improvements as well as higher hole punching success rates. +We are close to finishing a first version with https://github.com/libp2p/rust-libp2p/pull/2289. ### WebRTC support (browser-to-server) -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|--------------|--------|-------------------|------------------------------------------|------------------------------------------------|-------------------------------------------------------------------| +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|------------------------------------------|-----------------------------------------------|-------------------------------------------------------------------| | Connectivity | Done | Q4/2022 | https://github.com/libp2p/specs/pull/412 | https://github.com/libp2p/test-plans/pull/100 | [WebRTC (browser-to-browser)](#webrtc-support-browser-to-browser) | +We are currently implementing WebRTC for **browser-to-server** connectivity in https://github.com/libp2p/rust-libp2p/pull/2622. +More specifically the server side. +This will enable browser nodes to connect to rust-libp2p nodes where the latter only have self-signed TLS certificates. +See https://github.com/libp2p/specs/pull/412 for in-depth motivation. -We are currently implementing WebRTC for **browser-to-server** connectivity in -https://github.com/libp2p/rust-libp2p/pull/2622. More specifically the server side. This will enable -browser nodes to connect to rust-libp2p nodes where the latter only have self-signed TLS -certificates. See https://github.com/libp2p/specs/pull/412 for in-depth motivation. - -Long term we should enable rust-libp2p running in the browser via Wasm to use the browser's WebRTC -stack. Though that should only happen after improved Wasm support, see below. +Long term we should enable rust-libp2p running in the browser via Wasm to use the browser's WebRTC stack. +Though that should only happen after improved Wasm support, see below. ### Kademlia efficient querying @@ -123,16 +112,68 @@ stack. Though that should only happen after improved Wasm support, see below. |--------------|-------------|-------------------|-------------------------------------------------|--------------|------------| | Optimization | done | Q1/2023 | https://github.com/libp2p/rust-libp2p/pull/2712 | | | -Users of rust-libp2p like [iroh](https://github.com/n0-computer/iroh) need this for low latency -usage of `libp2p-kad`. The rust-libp2p maintainers can pick this up unless iroh folks finish the +Users of rust-libp2p like [iroh](https://github.com/n0-computer/iroh) need this for low latency usage of `libp2p-kad`. +The rust-libp2p maintainers can pick this up unless iroh folks finish the work before that. ### Generic connection management -| Category | Status | Target Completion | Tracking | Dependencies | Dependents | -|----------------------|-------------|-------------------|---------------------------------------------------|--------------|------------| -| Developer Ergonomics | done | Q1/2023 | https://github.com/libp2p/rust-libp2p/issues/2824 | | | +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|----------------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Developer Ergonomics | done | Q1/2023 | https://github.com/libp2p/rust-libp2p/issues/2824 | | | + +Today connection management functionality in rust-libp2p is limited. +Building abstractions on top is cumbersome and inefficient. +See https://github.com/libp2p/rust-libp2p/issues/2824. +Making connection management generic allows users to build advanced and efficient abstractions on top of rust-libp2p. + +### Cross Behaviour communication + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|----------------------|--------|-------------------|---------------------------------------------------|---------------------------------------------------|-----------------------------------------------| +| Developer ergonomics | Done | Q1/2023 | https://github.com/libp2p/rust-libp2p/issues/2680 | https://github.com/libp2p/rust-libp2p/issues/2832 | [Kademlia client mode](#kademlia-client-mode) | + +Today `NetworkBehaviour` implementations like Kademlia, GossipSub or Circuit Relay v2 can not communicate with each other, i.e. cannot make use of information known by another `NetworkBehaviour` implementation. +Users need to write the wiring code by hand to e.g. enable Kademlia to learn protocols supported by a remote peer from Identify. + +This roadmap item contains exchanging standard information about remote peers (e.g. supported protocols) between `NetworkBehaviour` implementations. + +Long term we might consider a generic approach for `NetworkBehaviours` to exchange data. +Though that would deserve its own roadmap item. + +## QUIC - implement hole punching + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Connectivity | done | Q3/2023 | https://github.com/libp2p/rust-libp2p/issues/2883 | | | + +Add hole punching support for QUIC. +See also [DCUtR specification on usage with QUIC](https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol). + +## Kademlia client mode + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|-----------------------------------------------------------------|------------| +| Optimization | Done | Q2/2023 | https://github.com/libp2p/rust-libp2p/issues/2032 | [Cross behaviour communication](#cross-behaviour-communication) | | + +Kademlia client mode will enhance routing table health and thus have a positive impact on all +Kademlia operations. + +## QUIC - evaluate and move to quinn + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Connectivity | done | Q3/2023 | https://github.com/libp2p/rust-libp2p/issues/2883 | | | + +We added alpha support for QUIC in Q4/2022 wrapping `quinn-proto`. +Evaluate using `quinn` directly, replacing the wrapper. + +### Automate port-forwarding e.g. via UPnP + +| Category | Status | Target Completion | Tracking | Dependencies | Dependents | +|--------------|--------|-------------------|---------------------------------------------------|--------------|------------| +| Connectivity | done | | https://github.com/libp2p/rust-libp2p/pull/4156 | | | + +Leverage protocols like UPnP to configure port-forwarding on ones router when behind NAT and/or firewall. +Another technique in addition to hole punching increasing the probability for a node to become publicly reachable when behind a firewall and/or NAT. -Today connection management functionality in rust-libp2p is limited. Building abstractions on top is -cumbersome and inefficient. See https://github.com/libp2p/rust-libp2p/issues/2824. Making connection -management generic allows users to build advanced and efficient abstractions on top of rust-libp2p. diff --git a/SECURITY.md b/SECURITY.md index 0e5a3f2e55f..f3ae83405e8 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -7,5 +7,3 @@ By default we provide security patches for the latest released version only. On ## Reporting a Vulnerability Please do not file a public issue on GitHub. Instead, please [file a private security vulnerability report](https://github.com/libp2p/rust-libp2p/security/advisories/new). - -If you need further assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io). diff --git a/clippy.toml b/clippy.toml index f66cc0ac2da..fd38ead0202 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,4 @@ disallowed-methods = [ { path = "futures::channel::mpsc::unbounded", reason = "does not enforce backpressure" }, ] +avoid-breaking-exported-api = false diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index d3a5ac63f6e..a7cd7fd46b4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,4 +1,34 @@ -## 0.40.0 - unreleased +## 0.41.2 + +- Implement `std::fmt::Display` on `ListenerId`. + See [PR 4936](https://github.com/libp2p/rust-libp2p/pull/4936). + +## 0.41.1 + +- Implement `{In,Out}boundConnectionUpgrade` for `SelectUpgrade`. + See [PR 4812](https://github.com/libp2p/rust-libp2p/pull/4812). + +## 0.41.0 + +- Remove blanket-impl of `{In,Out}boundUpgrade` for `{In,Out}boundConnectionUpgrade`. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). +- Remove deprecated functions from `ListenerId`. + See [PR 4736](https://github.com/libp2p/rust-libp2p/pull/4736). +- Remove `upgrade::transfer` module. + See [issue 4011](https://github.com/libp2p/rust-libp2p/issues/4011) for details. + See [PR 4788](https://github.com/libp2p/rust-libp2p/pull/4788). + +## 0.40.1 + +- Implement `Debug` for `StreamMuxerEvent`. + See [PR 4426]. + +[PR 4426]: https://github.com/libp2p/rust-libp2p/pull/4426 + +## 0.40.0 + +- Allow `ListenerId` to be user-controlled, i.e. to be provided on `Transport::listen_on`. + See [PR 3567]. - Raise MSRV to 1.65. See [PR 3715]. @@ -6,15 +36,33 @@ - Remove deprecated symbols related to upgrades. See [PR 3867]. -[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 -[PR 3867]: https://github.com/libp2p/rust-libp2p/pull/3867 - - Enforce protocol names to be valid UTF8 strings as required by the [spec]. We delete the `ProtocolName` trait and replace it with a requirement for `AsRef`. See [PR 3746] +- Remove `SingletonMuxer`. + See [PR 3883]. + +- Add `global_only::Transport` that refuses to dial IP addresses from private ranges. + See [PR 3814]. + +- Remove `upgrade::{apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply}` from public API. + These are implementation details that should not be depended on. + See [PR 3915]. + +- Remove deprecated `identity` module. + Depend on `libp2p-identity` directly instead or import it via the `libp2p::identity` re-export. + See [PR 4040]. + [spec]: https://github.com/libp2p/specs/blob/master/connections/README.md#multistream-select +[PR 3567]: https://github.com/libp2p/rust-libp2p/pull/3567 +[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3746]: https://github.com/libp2p/rust-libp2p/pull/3746 +[PR 3814]: https://github.com/libp2p/rust-libp2p/pull/3814 +[PR 3867]: https://github.com/libp2p/rust-libp2p/pull/3867 +[PR 3883]: https://github.com/libp2p/rust-libp2p/pull/3883 +[PR 3915]: https://github.com/libp2p/rust-libp2p/pull/3915 +[PR 4040]: https://github.com/libp2p/rust-libp2p/pull/4040 ## 0.39.2 diff --git a/core/Cargo.toml b/core/Cargo.toml index 6812e5f3b34..34b2202c166 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.40.0" +version = "0.41.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,39 +11,37 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -either = "1.5" +either = "1.9" fnv = "1.0" -futures = { version = "0.3.28", features = ["executor", "thread-pool"] } +futures = { version = "0.3.30", features = ["executor", "thread-pool"] } futures-timer = "3" -instant = "0.1.11" +instant = "0.1.12" libp2p-identity = { workspace = true, features = ["peerid", "ed25519"] } -log = "0.4" -multiaddr = { version = "0.17.1" } -multihash = { version = "0.17.0", default-features = false, features = ["std"] } +multiaddr = { workspace = true } +multihash = { workspace = true } multistream-select = { workspace = true } -once_cell = "1.17.1" +once_cell = "1.19.0" parking_lot = "0.12.0" -pin-project = "1.0.0" +pin-project = "1.1.3" quick-protobuf = "0.8" rand = "0.8" rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } -smallvec = "1.6.1" +smallvec = "1.11.2" thiserror = "1.0" -unsigned-varint = "0.7" +tracing = "0.1.37" +unsigned-varint = { workspace = true } void = "1" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -libp2p-mplex = { workspace = true } -libp2p-noise = { workspace = true } -multihash = { version = "0.17.0", default-features = false, features = ["arb"] } +libp2p-mplex = { path = "../muxers/mplex" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-noise = { path = "../transports/noise" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +multihash = { workspace = true, features = ["arb"] } quickcheck = { workspace = true } +libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } [features] -secp256k1 = [ "libp2p-identity/secp256k1" ] -ecdsa = [ "libp2p-identity/ecdsa" ] -rsa = [ "libp2p-identity/rsa" ] serde = ["multihash/serde-codec", "dep:serde", "libp2p-identity/serde"] # Passing arguments to the docsrs builder in order to properly document cfg's. @@ -52,3 +50,6 @@ serde = ["multihash/serde-codec", "dep:serde", "libp2p-identity/serde"] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/core/src/either.rs b/core/src/either.rs index 32e09ca691c..3f79b2b37a9 100644 --- a/core/src/either.rs +++ b/core/src/either.rs @@ -154,14 +154,18 @@ where } } - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { use TransportError::*; match self { - Either::Left(a) => a.listen_on(addr).map_err(|e| match e { + Either::Left(a) => a.listen_on(id, addr).map_err(|e| match e { MultiaddrNotSupported(addr) => MultiaddrNotSupported(addr), Other(err) => Other(Either::Left(err)), }), - Either::Right(b) => b.listen_on(addr).map_err(|e| match e { + Either::Right(b) => b.listen_on(id, addr).map_err(|e| match e { MultiaddrNotSupported(addr) => MultiaddrNotSupported(addr), Other(err) => Other(Either::Right(err)), }), diff --git a/core/src/lib.rs b/core/src/lib.rs index c40e64c5d8b..abb83481d6c 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,10 +22,6 @@ //! //! The main concepts of libp2p-core are: //! -//! - A [`PeerId`] is a unique global identifier for a node on the network. -//! Each node must have a different [`PeerId`]. Normally, a [`PeerId`] is the -//! hash of the public key used to negotiate encryption on the -//! communication channel, thereby guaranteeing that they cannot be spoofed. //! - The [`Transport`] trait defines how to reach a remote node or listen for //! incoming remote connections. See the [`transport`] module. //! - The [`StreamMuxer`] trait is implemented on structs that hold a connection @@ -47,59 +43,8 @@ mod proto { /// Multi-address re-export. pub use multiaddr; -use std::fmt; -use std::fmt::Formatter; pub type Negotiated = multistream_select::Negotiated; -#[deprecated(since = "0.39.0", note = "Depend on `libp2p-identity` instead.")] -pub mod identity { - pub use libp2p_identity::Keypair; - pub use libp2p_identity::PublicKey; - - pub mod ed25519 { - pub use libp2p_identity::ed25519::Keypair; - pub use libp2p_identity::ed25519::PublicKey; - pub use libp2p_identity::ed25519::SecretKey; - } - - #[cfg(feature = "ecdsa")] - #[deprecated( - since = "0.39.0", - note = "The `ecdsa` feature-flag is deprecated and will be removed in favor of `libp2p-identity`." - )] - pub mod ecdsa { - pub use libp2p_identity::ecdsa::Keypair; - pub use libp2p_identity::ecdsa::PublicKey; - pub use libp2p_identity::ecdsa::SecretKey; - } - - #[cfg(feature = "secp256k1")] - #[deprecated( - since = "0.39.0", - note = "The `secp256k1` feature-flag is deprecated and will be removed in favor of `libp2p-identity`." - )] - pub mod secp256k1 { - pub use libp2p_identity::secp256k1::Keypair; - pub use libp2p_identity::secp256k1::PublicKey; - pub use libp2p_identity::secp256k1::SecretKey; - } - - #[cfg(feature = "rsa")] - #[deprecated( - since = "0.39.0", - note = "The `rsa` feature-flag is deprecated and will be removed in favor of `libp2p-identity`." - )] - pub mod rsa { - pub use libp2p_identity::rsa::Keypair; - pub use libp2p_identity::rsa::PublicKey; - } - - pub mod error { - pub use libp2p_identity::DecodingError; - pub use libp2p_identity::SigningError; - } -} - mod translation; pub mod connection; @@ -110,15 +55,6 @@ pub mod signed_envelope; pub mod transport; pub mod upgrade; -#[deprecated(since = "0.39.0", note = "Depend on `libp2p-identity` instead.")] -pub type PublicKey = libp2p_identity::PublicKey; - -#[deprecated(since = "0.39.0", note = "Depend on `libp2p-identity` instead.")] -pub type PeerId = libp2p_identity::PeerId; - -#[deprecated(since = "0.39.0", note = "Depend on `libp2p-identity` instead.")] -pub type ParseError = libp2p_identity::ParseError; - pub use connection::{ConnectedPoint, Endpoint}; pub use multiaddr::Multiaddr; pub use multihash; @@ -127,19 +63,8 @@ pub use peer_record::PeerRecord; pub use signed_envelope::SignedEnvelope; pub use translation::address_translation; pub use transport::Transport; -pub use upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError, UpgradeInfo}; +pub use upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; #[derive(Debug, thiserror::Error)] -pub struct DecodeError(String); - -impl From for DecodeError { - fn from(e: quick_protobuf::Error) -> Self { - Self(e.to_string()) - } -} - -impl fmt::Display for DecodeError { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} +#[error(transparent)] +pub struct DecodeError(quick_protobuf::Error); diff --git a/core/src/muxing.rs b/core/src/muxing.rs index cb3f79e69d5..477e1608073 100644 --- a/core/src/muxing.rs +++ b/core/src/muxing.rs @@ -57,10 +57,8 @@ use std::pin::Pin; pub use self::boxed::StreamMuxerBox; pub use self::boxed::SubstreamBox; -pub use self::singleton::SingletonMuxer; mod boxed; -mod singleton; /// Provides multiplexing for a connection by allowing users to open substreams. /// @@ -114,6 +112,7 @@ pub trait StreamMuxer { } /// An event produced by a [`StreamMuxer`]. +#[derive(Debug)] pub enum StreamMuxerEvent { /// The address of the remote has changed. AddressChange(Multiaddr), diff --git a/core/src/muxing/singleton.rs b/core/src/muxing/singleton.rs deleted file mode 100644 index 3ba2c1cb366..00000000000 --- a/core/src/muxing/singleton.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::connection::Endpoint; -use crate::muxing::{StreamMuxer, StreamMuxerEvent}; - -use futures::prelude::*; -use std::cell::Cell; -use std::pin::Pin; -use std::{io, task::Context, task::Poll}; - -/// Implementation of `StreamMuxer` that allows only one substream on top of a connection, -/// yielding the connection itself. -/// -/// Applying this muxer on a connection doesn't read or write any data on the connection itself. -/// Most notably, no protocol is negotiated. -pub struct SingletonMuxer { - /// The inner connection. - inner: Cell>, - /// Our local endpoint. Always the same value as was passed to `new`. - endpoint: Endpoint, -} - -impl SingletonMuxer { - /// Creates a new `SingletonMuxer`. - /// - /// If `endpoint` is `Dialer`, then only one outbound substream will be permitted. - /// If `endpoint` is `Listener`, then only one inbound substream will be permitted. - pub fn new(inner: TSocket, endpoint: Endpoint) -> Self { - SingletonMuxer { - inner: Cell::new(Some(inner)), - endpoint, - } - } -} - -impl StreamMuxer for SingletonMuxer -where - TSocket: AsyncRead + AsyncWrite + Unpin, -{ - type Substream = TSocket; - type Error = io::Error; - - fn poll_inbound( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - let this = self.get_mut(); - - match this.endpoint { - Endpoint::Dialer => Poll::Pending, - Endpoint::Listener => match this.inner.replace(None) { - None => Poll::Pending, - Some(stream) => Poll::Ready(Ok(stream)), - }, - } - } - - fn poll_outbound( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - let this = self.get_mut(); - - match this.endpoint { - Endpoint::Listener => Poll::Pending, - Endpoint::Dialer => match this.inner.replace(None) { - None => Poll::Pending, - Some(stream) => Poll::Ready(Ok(stream)), - }, - } - } - - fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - Poll::Pending - } -} diff --git a/core/src/peer_record.rs b/core/src/peer_record.rs index 35433dc245f..2fd6a39ef2b 100644 --- a/core/src/peer_record.rs +++ b/core/src/peer_record.rs @@ -36,8 +36,7 @@ impl PeerRecord { let (payload, signing_key) = envelope.payload_and_signing_key(String::from(DOMAIN_SEP), PAYLOAD_TYPE.as_bytes())?; let mut reader = BytesReader::from_bytes(payload); - let record = - proto::PeerRecord::from_reader(&mut reader, payload).map_err(DecodeError::from)?; + let record = proto::PeerRecord::from_reader(&mut reader, payload).map_err(DecodeError)?; let peer_id = PeerId::from_bytes(&record.peer_id)?; @@ -139,7 +138,7 @@ pub enum FromEnvelopeError { InvalidPeerRecord(#[from] DecodeError), /// Failed to decode the peer ID. #[error("Failed to decode bytes as PeerId")] - InvalidPeerId(#[from] multihash::Error), + InvalidPeerId(#[from] libp2p_identity::ParseError), /// The signer of the envelope is different than the peer id in the record. #[error("The signer of the envelope is different than the peer id in the record")] MismatchedSignature, diff --git a/core/src/signed_envelope.rs b/core/src/signed_envelope.rs index a50146f87d1..19a0cac4f82 100644 --- a/core/src/signed_envelope.rs +++ b/core/src/signed_envelope.rs @@ -97,8 +97,7 @@ impl SignedEnvelope { use quick_protobuf::MessageRead; let mut reader = BytesReader::from_bytes(bytes); - let envelope = - proto::Envelope::from_reader(&mut reader, bytes).map_err(DecodeError::from)?; + let envelope = proto::Envelope::from_reader(&mut reader, bytes).map_err(DecodeError)?; Ok(Self { key: PublicKey::try_decode_protobuf(&envelope.public_key)?, diff --git a/core/src/transport.rs b/core/src/transport.rs index 04196efca13..22e7a0532fa 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -31,12 +31,14 @@ use std::{ error::Error, fmt, pin::Pin, + sync::atomic::{AtomicUsize, Ordering}, task::{Context, Poll}, }; pub mod and_then; pub mod choice; pub mod dummy; +pub mod global_only; pub mod map; pub mod map_err; pub mod memory; @@ -54,6 +56,8 @@ pub use self::memory::MemoryTransport; pub use self::optional::OptionalTransport; pub use self::upgrade::Upgrade; +static NEXT_LISTENER_ID: AtomicUsize = AtomicUsize::new(1); + /// A transport provides connection-oriented communication between two peers /// through ordered streams of data (i.e. connections). /// @@ -108,8 +112,12 @@ pub trait Transport { /// obtained from [dialing](Transport::dial). type Dial: Future>; - /// Listens on the given [`Multiaddr`] for inbound connections. - fn listen_on(&mut self, addr: Multiaddr) -> Result>; + /// Listens on the given [`Multiaddr`] for inbound connections with a provided [`ListenerId`]. + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError>; /// Remove a listener. /// @@ -127,7 +135,7 @@ pub trait Transport { /// /// This option is needed for NAT and firewall hole punching. /// - /// See [`ConnectedPoint::Dialer`](crate::connection::ConnectedPoint::Dialer) for related option. + /// See [`ConnectedPoint::Dialer`] for related option. fn dial_as_listener( &mut self, addr: Multiaddr, @@ -227,8 +235,7 @@ pub trait Transport { and_then::AndThen::new(self, f) } - /// Begins a series of protocol upgrades via an - /// [`upgrade::Builder`](upgrade::Builder). + /// Begins a series of protocol upgrades via an [`upgrade::Builder`]. fn upgrade(self, version: upgrade::Version) -> upgrade::Builder where Self: Sized, @@ -240,18 +247,18 @@ pub trait Transport { /// The ID of a single listener. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct ListenerId(u64); +pub struct ListenerId(usize); impl ListenerId { /// Creates a new `ListenerId`. - pub fn new() -> Self { - ListenerId(rand::random()) + pub fn next() -> Self { + ListenerId(NEXT_LISTENER_ID.fetch_add(1, Ordering::SeqCst)) } } -impl Default for ListenerId { - fn default() -> Self { - Self::new() +impl std::fmt::Display for ListenerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) } } @@ -400,16 +407,16 @@ impl TransportEvent { /// Returns `None` if the event is not actually an incoming connection, /// otherwise the upgrade and the remote address. pub fn into_incoming(self) -> Option<(TUpgr, Multiaddr)> { - if let TransportEvent::Incoming { + let TransportEvent::Incoming { upgrade, send_back_addr, .. } = self - { - Some((upgrade, send_back_addr)) - } else { - None - } + else { + return None; + }; + + Some((upgrade, send_back_addr)) } /// Returns `true` if this is a [`TransportEvent::NewAddress`]. diff --git a/core/src/transport/and_then.rs b/core/src/transport/and_then.rs index fb5280568ea..6e0c7e32067 100644 --- a/core/src/transport/and_then.rs +++ b/core/src/transport/and_then.rs @@ -54,9 +54,13 @@ where type ListenerUpgrade = AndThenFuture; type Dial = AndThenFuture; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { self.transport - .listen_on(addr) + .listen_on(id, addr) .map_err(|err| err.map(Either::Left)) } diff --git a/core/src/transport/boxed.rs b/core/src/transport/boxed.rs index a55e4db8466..1cede676c8e 100644 --- a/core/src/transport/boxed.rs +++ b/core/src/transport/boxed.rs @@ -43,7 +43,7 @@ where /// A `Boxed` transport is a `Transport` whose `Dial`, `Listener` /// and `ListenerUpgrade` futures are `Box`ed and only the `Output` -/// and `Error` types are captured in type variables. +/// type is captured in a type variable. pub struct Boxed { inner: Box + Send + Unpin>, } @@ -52,7 +52,11 @@ type Dial = Pin> + Send>>; type ListenerUpgrade = Pin> + Send>>; trait Abstract { - fn listen_on(&mut self, addr: Multiaddr) -> Result>; + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError>; fn remove_listener(&mut self, id: ListenerId) -> bool; fn dial(&mut self, addr: Multiaddr) -> Result, TransportError>; fn dial_as_listener(&mut self, addr: Multiaddr) -> Result, TransportError>; @@ -70,8 +74,12 @@ where T::Dial: Send + 'static, T::ListenerUpgrade: Send + 'static, { - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - Transport::listen_on(self, addr).map_err(|e| e.map(box_err)) + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Transport::listen_on(self, id, addr).map_err(|e| e.map(box_err)) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -123,8 +131,12 @@ impl Transport for Boxed { type ListenerUpgrade = ListenerUpgrade; type Dial = Dial; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - self.inner.listen_on(addr) + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.inner.listen_on(id, addr) } fn remove_listener(&mut self, id: ListenerId) -> bool { diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index bb7d542d292..aa3acfc3231 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -46,14 +46,42 @@ where type ListenerUpgrade = EitherFuture; type Dial = EitherFuture; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let addr = match self.0.listen_on(addr) { - Err(TransportError::MultiaddrNotSupported(addr)) => addr, + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", + std::any::type_name::() + ); + let addr = match self.0.listen_on(id, addr) { + Err(TransportError::MultiaddrNotSupported(addr)) => { + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", + std::any::type_name::() + ); + addr + } res => return res.map_err(|err| err.map(Either::Left)), }; - let addr = match self.1.listen_on(addr) { - Err(TransportError::MultiaddrNotSupported(addr)) => addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", + std::any::type_name::() + ); + let addr = match self.1.listen_on(id, addr) { + Err(TransportError::MultiaddrNotSupported(addr)) => { + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", + std::any::type_name::() + ); + addr + } res => return res.map_err(|err| err.map(Either::Right)), }; @@ -65,17 +93,41 @@ where } fn dial(&mut self, addr: Multiaddr) -> Result> { + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", + std::any::type_name::() + ); let addr = match self.0.dial(addr) { Ok(connec) => return Ok(EitherFuture::First(connec)), - Err(TransportError::MultiaddrNotSupported(addr)) => addr, + Err(TransportError::MultiaddrNotSupported(addr)) => { + tracing::debug!( + address=%addr, + "Failed to dial address using {}", + std::any::type_name::() + ); + addr + } Err(TransportError::Other(err)) => { return Err(TransportError::Other(Either::Left(err))) } }; + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", + std::any::type_name::() + ); let addr = match self.1.dial(addr) { Ok(connec) => return Ok(EitherFuture::Second(connec)), - Err(TransportError::MultiaddrNotSupported(addr)) => addr, + Err(TransportError::MultiaddrNotSupported(addr)) => { + tracing::debug!( + address=%addr, + "Failed to dial address using {}", + std::any::type_name::() + ); + addr + } Err(TransportError::Other(err)) => { return Err(TransportError::Other(Either::Right(err))) } diff --git a/core/src/transport/dummy.rs b/core/src/transport/dummy.rs index a7d1cab9089..951d1039328 100644 --- a/core/src/transport/dummy.rs +++ b/core/src/transport/dummy.rs @@ -59,7 +59,11 @@ impl Transport for DummyTransport { type ListenerUpgrade = futures::future::Pending>; type Dial = futures::future::Pending>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + _id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { Err(TransportError::MultiaddrNotSupported(addr)) } diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs new file mode 100644 index 00000000000..0671b0e9984 --- /dev/null +++ b/core/src/transport/global_only.rs @@ -0,0 +1,349 @@ +// Copyright 2023 Protocol Labs +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::{ + multiaddr::{Multiaddr, Protocol}, + transport::{ListenerId, TransportError, TransportEvent}, +}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +/// Dropping all dial requests to non-global IP addresses. +#[derive(Debug, Clone, Default)] +pub struct Transport { + inner: T, +} + +/// This module contains an implementation of the `is_global` IPv4 address space. +/// +/// Credit for this implementation goes to the Rust standard library team. +/// +/// Unstable tracking issue: [#27709](https://github.com/rust-lang/rust/issues/27709) +mod ipv4_global { + use std::net::Ipv4Addr; + + /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112] + /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the + /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since + /// it is obviously not reserved for future use. + /// + /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112 + /// + /// # Warning + /// + /// As IANA assigns new addresses, this method will be + /// updated. This may result in non-reserved addresses being + /// treated as reserved in code that relies on an outdated version + /// of this method. + #[must_use] + #[inline] + const fn is_reserved(a: Ipv4Addr) -> bool { + a.octets()[0] & 240 == 240 && !a.is_broadcast() + } + + /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for + /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0` + /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`. + /// + /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544 + /// [errata 423]: https://www.rfc-editor.org/errata/eid423 + #[must_use] + #[inline] + const fn is_benchmarking(a: Ipv4Addr) -> bool { + a.octets()[0] == 198 && (a.octets()[1] & 0xfe) == 18 + } + + /// Returns [`true`] if this address is part of the Shared Address Space defined in + /// [IETF RFC 6598] (`100.64.0.0/10`). + /// + /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598 + #[must_use] + #[inline] + const fn is_shared(a: Ipv4Addr) -> bool { + a.octets()[0] == 100 && (a.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + + /// Returns [`true`] if this is a private address. + /// + /// The private address ranges are defined in [IETF RFC 1918] and include: + /// + /// - `10.0.0.0/8` + /// - `172.16.0.0/12` + /// - `192.168.0.0/16` + /// + /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918 + #[must_use] + #[inline] + const fn is_private(a: Ipv4Addr) -> bool { + match a.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv4 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// + /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) + /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) + /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) + /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) + /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) + /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) + /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) + /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) + /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// + /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + /// [unspecified address]: Ipv4Addr::UNSPECIFIED + /// [broadcast address]: Ipv4Addr::BROADCAST + #[must_use] + #[inline] + pub(crate) const fn is_global(a: Ipv4Addr) -> bool { + !(a.octets()[0] == 0 // "This network" + || is_private(a) + || is_shared(a) + || a.is_loopback() + || a.is_link_local() + // addresses reserved for future protocols (`192.0.0.0/24`) + ||(a.octets()[0] == 192 && a.octets()[1] == 0 && a.octets()[2] == 0) + || a.is_documentation() + || is_benchmarking(a) + || is_reserved(a) + || a.is_broadcast()) + } +} + +/// This module contains an implementation of the `is_global` IPv6 address space. +/// +/// Credit for this implementation goes to the Rust standard library team. +/// +/// Unstable tracking issue: [#27709](https://github.com/rust-lang/rust/issues/27709) +mod ipv6_global { + use std::net::Ipv6Addr; + + /// Returns `true` if the address is a unicast address with link-local scope, + /// as defined in [RFC 4291]. + /// + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. + /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], + /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// + /// ```text + /// | 10 bits | 54 bits | 64 bits | + /// +----------+-------------------------+----------------------------+ + /// |1111111010| 0 | interface ID | + /// +----------+-------------------------+----------------------------+ + /// ``` + /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, + /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, + /// and those addresses will have link-local scope. + /// + /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", + /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// + /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 + /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 + /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6 + /// [loopback address]: Ipv6Addr::LOCALHOST + #[must_use] + #[inline] + const fn is_unicast_link_local(a: Ipv6Addr) -> bool { + (a.segments()[0] & 0xffc0) == 0xfe80 + } + + /// Returns [`true`] if this is a unique local address (`fc00::/7`). + /// + /// This property is defined in [IETF RFC 4193]. + /// + /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193 + #[must_use] + #[inline] + const fn is_unique_local(a: Ipv6Addr) -> bool { + (a.segments()[0] & 0xfe00) == 0xfc00 + } + + /// Returns [`true`] if this is an address reserved for documentation + /// (`2001:db8::/32`). + /// + /// This property is defined in [IETF RFC 3849]. + /// + /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849 + #[must_use] + #[inline] + const fn is_documentation(a: Ipv6Addr) -> bool { + (a.segments()[0] == 0x2001) && (a.segments()[1] == 0xdb8) + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv6 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) + /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) + /// - IPv4-mapped addresses + /// - Addresses reserved for benchmarking + /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) + /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) + /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// + /// Note that an address having global scope is not the same as being globally reachable, + /// and there is no direct relation between the two concepts: There exist addresses with global scope + /// that are not globally reachable (for example unique local addresses), + /// and addresses that are globally reachable without having global scope + /// (multicast addresses with non-global scope). + /// + /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + /// [unspecified address]: Ipv6Addr::UNSPECIFIED + /// [loopback address]: Ipv6Addr::LOCALHOST + #[must_use] + #[inline] + pub(crate) const fn is_global(a: Ipv6Addr) -> bool { + !(a.is_unspecified() + || a.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(a.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(a.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(a.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(a.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(a.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(a.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(a.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(a.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(a.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || is_documentation(a) + || is_unique_local(a) + || is_unicast_link_local(a)) + } +} + +impl Transport { + pub fn new(transport: T) -> Self { + Transport { inner: transport } + } +} + +impl crate::Transport for Transport { + type Output = ::Output; + type Error = ::Error; + type ListenerUpgrade = ::ListenerUpgrade; + type Dial = ::Dial; + + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.inner.listen_on(id, addr) + } + + fn remove_listener(&mut self, id: ListenerId) -> bool { + self.inner.remove_listener(id) + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + match addr.iter().next() { + Some(Protocol::Ip4(a)) => { + if !ipv4_global::is_global(a) { + tracing::debug!(ip=%a, "Not dialing non global IP address"); + return Err(TransportError::MultiaddrNotSupported(addr)); + } + self.inner.dial(addr) + } + Some(Protocol::Ip6(a)) => { + if !ipv6_global::is_global(a) { + tracing::debug!(ip=%a, "Not dialing non global IP address"); + return Err(TransportError::MultiaddrNotSupported(addr)); + } + self.inner.dial(addr) + } + _ => { + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); + Err(TransportError::MultiaddrNotSupported(addr)) + } + } + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + match addr.iter().next() { + Some(Protocol::Ip4(a)) => { + if !ipv4_global::is_global(a) { + tracing::debug!(ip=?a, "Not dialing non global IP address"); + return Err(TransportError::MultiaddrNotSupported(addr)); + } + self.inner.dial_as_listener(addr) + } + Some(Protocol::Ip6(a)) => { + if !ipv6_global::is_global(a) { + tracing::debug!(ip=?a, "Not dialing non global IP address"); + return Err(TransportError::MultiaddrNotSupported(addr)); + } + self.inner.dial_as_listener(addr) + } + _ => { + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); + Err(TransportError::MultiaddrNotSupported(addr)) + } + } + } + + fn address_translation(&self, listen: &Multiaddr, observed: &Multiaddr) -> Option { + self.inner.address_translation(listen, observed) + } + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.inner).poll(cx) + } +} diff --git a/core/src/transport/map.rs b/core/src/transport/map.rs index 50f7b826d36..553f3e6338d 100644 --- a/core/src/transport/map.rs +++ b/core/src/transport/map.rs @@ -61,8 +61,12 @@ where type ListenerUpgrade = MapFuture; type Dial = MapFuture; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - self.transport.listen_on(addr) + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.transport.listen_on(id, addr) } fn remove_listener(&mut self, id: ListenerId) -> bool { diff --git a/core/src/transport/map_err.rs b/core/src/transport/map_err.rs index 99f2912447f..56e1ebf2929 100644 --- a/core/src/transport/map_err.rs +++ b/core/src/transport/map_err.rs @@ -50,9 +50,15 @@ where type ListenerUpgrade = MapErrListenerUpgrade; type Dial = MapErrDial; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { let map = self.map.clone(); - self.transport.listen_on(addr).map_err(|err| err.map(map)) + self.transport + .listen_on(id, addr) + .map_err(|err| err.map(map)) } fn remove_listener(&mut self, id: ListenerId) -> bool { diff --git a/core/src/transport/memory.rs b/core/src/transport/memory.rs index 7e079d07fb5..bf88215dd43 100644 --- a/core/src/transport/memory.rs +++ b/core/src/transport/memory.rs @@ -62,9 +62,8 @@ impl Hub { port } else { loop { - let port = match NonZeroU64::new(rand::random()) { - Some(p) => p, - None => continue, + let Some(port) = NonZeroU64::new(rand::random()) else { + continue; }; if !hub.contains_key(&port) { break port; @@ -179,19 +178,18 @@ impl Transport for MemoryTransport { type ListenerUpgrade = Ready>; type Dial = DialFuture; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let port = if let Ok(port) = parse_memory_addr(&addr) { - port - } else { - return Err(TransportError::MultiaddrNotSupported(addr)); - }; + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + let port = + parse_memory_addr(&addr).map_err(|_| TransportError::MultiaddrNotSupported(addr))?; - let (rx, port) = match HUB.register_port(port) { - Some((rx, port)) => (rx, port), - None => return Err(TransportError::Other(MemoryTransportError::Unreachable)), - }; + let (rx, port) = HUB + .register_port(port) + .ok_or(TransportError::Other(MemoryTransportError::Unreachable))?; - let id = ListenerId::new(); let listener = Listener { id, port, @@ -201,7 +199,7 @@ impl Transport for MemoryTransport { }; self.listeners.push_back(Box::pin(listener)); - Ok(id) + Ok(()) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -457,30 +455,40 @@ mod tests { let addr_1: Multiaddr = "/memory/1639174018481".parse().unwrap(); let addr_2: Multiaddr = "/memory/8459375923478".parse().unwrap(); - let listener_id_1 = transport.listen_on(addr_1.clone()).unwrap(); + let listener_id_1 = ListenerId::next(); + + transport.listen_on(listener_id_1, addr_1.clone()).unwrap(); assert!( transport.remove_listener(listener_id_1), "Listener doesn't exist." ); - let listener_id_2 = transport.listen_on(addr_1.clone()).unwrap(); - let listener_id_3 = transport.listen_on(addr_2.clone()).unwrap(); + let listener_id_2 = ListenerId::next(); + transport.listen_on(listener_id_2, addr_1.clone()).unwrap(); + let listener_id_3 = ListenerId::next(); + transport.listen_on(listener_id_3, addr_2.clone()).unwrap(); - assert!(transport.listen_on(addr_1.clone()).is_err()); - assert!(transport.listen_on(addr_2.clone()).is_err()); + assert!(transport + .listen_on(ListenerId::next(), addr_1.clone()) + .is_err()); + assert!(transport + .listen_on(ListenerId::next(), addr_2.clone()) + .is_err()); assert!( transport.remove_listener(listener_id_2), "Listener doesn't exist." ); - assert!(transport.listen_on(addr_1).is_ok()); - assert!(transport.listen_on(addr_2.clone()).is_err()); + assert!(transport.listen_on(ListenerId::next(), addr_1).is_ok()); + assert!(transport + .listen_on(ListenerId::next(), addr_2.clone()) + .is_err()); assert!( transport.remove_listener(listener_id_3), "Listener doesn't exist." ); - assert!(transport.listen_on(addr_2).is_ok()); + assert!(transport.listen_on(ListenerId::next(), addr_2).is_ok()); } #[test] @@ -489,8 +497,11 @@ mod tests { assert!(transport .dial("/memory/810172461024613".parse().unwrap()) .is_err()); - let _listener = transport - .listen_on("/memory/810172461024613".parse().unwrap()) + transport + .listen_on( + ListenerId::next(), + "/memory/810172461024613".parse().unwrap(), + ) .unwrap(); assert!(transport .dial("/memory/810172461024613".parse().unwrap()) @@ -504,7 +515,8 @@ mod tests { let mut transport = MemoryTransport::default().boxed(); futures::executor::block_on(async { - let listener_id = transport.listen_on(addr.clone()).unwrap(); + let listener_id = ListenerId::next(); + transport.listen_on(listener_id, addr.clone()).unwrap(); let reported_addr = transport .select_next_some() .await @@ -539,7 +551,7 @@ mod tests { let mut t1 = MemoryTransport::default().boxed(); let listener = async move { - t1.listen_on(t1_addr.clone()).unwrap(); + t1.listen_on(ListenerId::next(), t1_addr.clone()).unwrap(); let upgrade = loop { let event = t1.select_next_some().await; if let Some(upgrade) = event.into_incoming() { @@ -577,7 +589,9 @@ mod tests { let mut listener_transport = MemoryTransport::default().boxed(); let listener = async move { - listener_transport.listen_on(listener_addr.clone()).unwrap(); + listener_transport + .listen_on(ListenerId::next(), listener_addr.clone()) + .unwrap(); loop { if let TransportEvent::Incoming { send_back_addr, .. } = listener_transport.select_next_some().await @@ -614,7 +628,9 @@ mod tests { let mut listener_transport = MemoryTransport::default().boxed(); let listener = async move { - listener_transport.listen_on(listener_addr.clone()).unwrap(); + listener_transport + .listen_on(ListenerId::next(), listener_addr.clone()) + .unwrap(); loop { if let TransportEvent::Incoming { send_back_addr, .. } = listener_transport.select_next_some().await diff --git a/core/src/transport/optional.rs b/core/src/transport/optional.rs index 2d93077659c..839f55a4000 100644 --- a/core/src/transport/optional.rs +++ b/core/src/transport/optional.rs @@ -60,9 +60,13 @@ where type ListenerUpgrade = T::ListenerUpgrade; type Dial = T::Dial; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { if let Some(inner) = self.0.as_mut() { - inner.listen_on(addr) + inner.listen_on(id, addr) } else { Err(TransportError::MultiaddrNotSupported(addr)) } diff --git a/core/src/transport/timeout.rs b/core/src/transport/timeout.rs index c796e6f0775..0e8ab3f5201 100644 --- a/core/src/transport/timeout.rs +++ b/core/src/transport/timeout.rs @@ -85,9 +85,13 @@ where type ListenerUpgrade = Timeout; type Dial = Timeout; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { self.inner - .listen_on(addr) + .listen_on(id, addr) .map_err(|err| err.map(TransportTimeoutError::Other)) } diff --git a/core/src/transport/upgrade.rs b/core/src/transport/upgrade.rs index 9f6998d9968..8525ab741ff 100644 --- a/core/src/transport/upgrade.rs +++ b/core/src/transport/upgrade.rs @@ -30,8 +30,8 @@ use crate::{ TransportError, TransportEvent, }, upgrade::{ - self, apply_inbound, apply_outbound, InboundUpgrade, InboundUpgradeApply, OutboundUpgrade, - OutboundUpgradeApply, UpgradeError, + self, apply_inbound, apply_outbound, InboundConnectionUpgrade, InboundUpgradeApply, + OutboundConnectionUpgrade, OutboundUpgradeApply, UpgradeError, }, Negotiated, }; @@ -101,8 +101,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = (PeerId, D), Error = E>, - U: OutboundUpgrade, Output = (PeerId, D), Error = E> + Clone, + U: InboundConnectionUpgrade, Output = (PeerId, D), Error = E>, + U: OutboundConnectionUpgrade, Output = (PeerId, D), Error = E> + Clone, E: Error + 'static, { let version = self.version; @@ -123,7 +123,7 @@ where pub struct Authenticate where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { #[pin] inner: EitherUpgrade, @@ -132,11 +132,11 @@ where impl Future for Authenticate where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> - + OutboundUpgrade< + U: InboundConnectionUpgrade> + + OutboundConnectionUpgrade< Negotiated, - Output = >>::Output, - Error = >>::Error, + Output = >>::Output, + Error = >>::Error, >, { type Output = as Future>::Output; @@ -155,7 +155,7 @@ where pub struct Multiplex where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { peer_id: Option, #[pin] @@ -165,8 +165,8 @@ where impl Future for Multiplex where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E>, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E>, { type Output = Result<(PeerId, M), UpgradeError>; @@ -208,8 +208,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, D: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D, Error = E>, - U: OutboundUpgrade, Output = D, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = D, Error = E>, + U: OutboundConnectionUpgrade, Output = D, Error = E> + Clone, E: Error + 'static, { Authenticated(Builder::new( @@ -236,8 +236,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, M: StreamMuxer, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E> + Clone, E: Error + 'static, { let version = self.0.version; @@ -269,8 +269,8 @@ where T: Transport, C: AsyncRead + AsyncWrite + Unpin, M: StreamMuxer, - U: InboundUpgrade, Output = M, Error = E>, - U: OutboundUpgrade, Output = M, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = M, Error = E>, + U: OutboundConnectionUpgrade, Output = M, Error = E> + Clone, E: Error + 'static, F: for<'a> FnOnce(&'a PeerId, &'a ConnectedPoint) -> U + Clone, { @@ -350,8 +350,12 @@ where self.0.dial_as_listener(addr) } - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - self.0.listen_on(addr) + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.0.listen_on(id, addr) } fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { @@ -391,8 +395,8 @@ where T: Transport, T::Error: 'static, C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D, Error = E>, - U: OutboundUpgrade, Output = D, Error = E> + Clone, + U: InboundConnectionUpgrade, Output = D, Error = E>, + U: OutboundConnectionUpgrade, Output = D, Error = E> + Clone, E: Error + 'static, { type Output = (PeerId, D); @@ -429,9 +433,13 @@ where }) } - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { self.inner - .listen_on(addr) + .listen_on(id, addr) .map_err(|err| err.map(TransportUpgradeError::Transport)) } @@ -494,7 +502,7 @@ where /// The [`Transport::Dial`] future of an [`Upgrade`]d transport. pub struct DialUpgradeFuture where - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, C: AsyncRead + AsyncWrite + Unpin, { future: Pin>, @@ -505,7 +513,7 @@ impl Future for DialUpgradeFuture where F: TryFuture, C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade, Output = D>, + U: OutboundConnectionUpgrade, Output = D>, U::Error: Error, { type Output = Result<(PeerId, D), TransportUpgradeError>; @@ -545,7 +553,7 @@ where impl Unpin for DialUpgradeFuture where - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, C: AsyncRead + AsyncWrite + Unpin, { } @@ -554,7 +562,7 @@ where pub struct ListenerUpgradeFuture where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { future: Pin>, upgrade: future::Either, (PeerId, InboundUpgradeApply)>, @@ -564,7 +572,7 @@ impl Future for ListenerUpgradeFuture where F: TryFuture, C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade, Output = D>, + U: InboundConnectionUpgrade, Output = D>, U::Error: Error, { type Output = Result<(PeerId, D), TransportUpgradeError>; @@ -605,6 +613,6 @@ where impl Unpin for ListenerUpgradeFuture where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { } diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 3d0b752f4b8..69561fbebd8 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -64,18 +64,15 @@ mod error; mod pending; mod ready; mod select; -mod transfer; +pub(crate) use apply::{ + apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply, +}; +pub(crate) use error::UpgradeError; use futures::future::Future; pub use self::{ - apply::{apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply}, - denied::DeniedUpgrade, - error::UpgradeError, - pending::PendingUpgrade, - ready::ReadyUpgrade, - select::SelectUpgrade, - transfer::{read_length_prefixed, read_varint, write_length_prefixed, write_varint}, + denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, }; pub use crate::Negotiated; pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; @@ -123,3 +120,35 @@ pub trait OutboundUpgrade: UpgradeInfo { /// The `info` is the identifier of the protocol, as produced by `protocol_info`. fn upgrade_outbound(self, socket: C, info: Self::Info) -> Self::Future; } + +/// Possible upgrade on an inbound connection +pub trait InboundConnectionUpgrade: UpgradeInfo { + /// Output after the upgrade has been successfully negotiated and the handshake performed. + type Output; + /// Possible error during the handshake. + type Error; + /// Future that performs the handshake with the remote. + type Future: Future>; + + /// After we have determined that the remote supports one of the protocols we support, this + /// method is called to start the handshake. + /// + /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future; +} + +/// Possible upgrade on an outbound connection +pub trait OutboundConnectionUpgrade: UpgradeInfo { + /// Output after the upgrade has been successfully negotiated and the handshake performed. + type Output; + /// Possible error during the handshake. + type Error; + /// Future that performs the handshake with the remote. + type Future: Future>; + + /// After we have determined that the remote supports one of the protocols we support, this + /// method is called to start the handshake. + /// + /// The `info` is the identifier of the protocol, as produced by `protocol_info`. + fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future; +} diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index 1dca0be4ad5..15cb0348cf3 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -18,10 +18,9 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError}; +use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; use crate::{connection::ConnectedPoint, Negotiated}; use futures::{future::Either, prelude::*}; -use log::debug; use multistream_select::{self, DialerSelectFuture, ListenerSelectFuture}; use std::{mem, pin::Pin, task::Context, task::Poll}; @@ -29,7 +28,7 @@ pub(crate) use multistream_select::Version; // TODO: Still needed? /// Applies an upgrade to the inbound and outbound direction of a connection or substream. -pub fn apply( +pub(crate) fn apply( conn: C, up: U, cp: ConnectedPoint, @@ -37,7 +36,7 @@ pub fn apply( ) -> Either, OutboundUpgradeApply> where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade> + OutboundUpgrade>, + U: InboundConnectionUpgrade> + OutboundConnectionUpgrade>, { match cp { ConnectedPoint::Dialer { role_override, .. } if role_override.is_dialer() => { @@ -48,24 +47,24 @@ where } /// Tries to perform an upgrade on an inbound connection or substream. -pub fn apply_inbound(conn: C, up: U) -> InboundUpgradeApply +pub(crate) fn apply_inbound(conn: C, up: U) -> InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { InboundUpgradeApply { inner: InboundUpgradeApplyState::Init { - future: multistream_select::listener_select_proto(conn, up.protocol_info().into_iter()), + future: multistream_select::listener_select_proto(conn, up.protocol_info()), upgrade: up, }, } } /// Tries to perform an upgrade on an outbound connection or substream. -pub fn apply_outbound(conn: C, up: U, v: Version) -> OutboundUpgradeApply +pub(crate) fn apply_outbound(conn: C, up: U, v: Version) -> OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { OutboundUpgradeApply { inner: OutboundUpgradeApplyState::Init { @@ -79,7 +78,7 @@ where pub struct InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { inner: InboundUpgradeApplyState, } @@ -88,7 +87,7 @@ where enum InboundUpgradeApplyState where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { Init { future: ListenerSelectFuture, @@ -104,14 +103,14 @@ where impl Unpin for InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { } impl Future for InboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: InboundUpgrade>, + U: InboundConnectionUpgrade>, { type Output = Result>; @@ -141,11 +140,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded inbound stream to {name}"); + tracing::trace!(upgrade=%name, "Upgraded inbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade inbound stream to {name}"); + tracing::debug!(upgrade=%name, "Failed to upgrade inbound stream"); return Poll::Ready(Err(UpgradeError::Apply(e))); } } @@ -162,7 +161,7 @@ where pub struct OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { inner: OutboundUpgradeApplyState, } @@ -170,7 +169,7 @@ where enum OutboundUpgradeApplyState where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { Init { future: DialerSelectFuture::IntoIter>, @@ -186,14 +185,14 @@ where impl Unpin for OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { } impl Future for OutboundUpgradeApply where C: AsyncRead + AsyncWrite + Unpin, - U: OutboundUpgrade>, + U: OutboundConnectionUpgrade>, { type Output = Result>; @@ -223,11 +222,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded outbound stream to {name}",); + tracing::trace!(upgrade=%name, "Upgraded outbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade outbound stream to {name}",); + tracing::debug!(upgrade=%name, "Failed to upgrade outbound stream",); return Poll::Ready(Err(UpgradeError::Apply(e))); } } diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 19b8b7a93f7..037045a2f29 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -19,7 +19,10 @@ // DEALINGS IN THE SOFTWARE. use crate::either::EitherFuture; -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use crate::upgrade::{ + InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, + UpgradeInfo, +}; use either::Either; use futures::future; use std::iter::{Chain, Map}; @@ -84,6 +87,23 @@ where } } +impl InboundConnectionUpgrade for SelectUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + } +} + impl OutboundUpgrade for SelectUpgrade where A: OutboundUpgrade, @@ -100,3 +120,20 @@ where } } } + +impl OutboundConnectionUpgrade for SelectUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + } +} diff --git a/core/src/upgrade/transfer.rs b/core/src/upgrade/transfer.rs deleted file mode 100644 index 93aeb987c8a..00000000000 --- a/core/src/upgrade/transfer.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Contains some helper futures for creating upgrades. - -use futures::prelude::*; -use std::io; - -// TODO: these methods could be on an Ext trait to AsyncWrite - -/// Writes a message to the given socket with a length prefix appended to it. Also flushes the socket. -/// -/// > **Note**: Prepends a variable-length prefix indicate the length of the message. This is -/// > compatible with what [`read_length_prefixed`] expects. -pub async fn write_length_prefixed( - socket: &mut (impl AsyncWrite + Unpin), - data: impl AsRef<[u8]>, -) -> Result<(), io::Error> { - write_varint(socket, data.as_ref().len()).await?; - socket.write_all(data.as_ref()).await?; - socket.flush().await?; - - Ok(()) -} - -/// Writes a variable-length integer to the `socket`. -/// -/// > **Note**: Does **NOT** flush the socket. -pub async fn write_varint( - socket: &mut (impl AsyncWrite + Unpin), - len: usize, -) -> Result<(), io::Error> { - let mut len_data = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(len, &mut len_data).len(); - socket.write_all(&len_data[..encoded_len]).await?; - - Ok(()) -} - -/// Reads a variable-length integer from the `socket`. -/// -/// As a special exception, if the `socket` is empty and EOFs right at the beginning, then we -/// return `Ok(0)`. -/// -/// > **Note**: This function reads bytes one by one from the `socket`. It is therefore encouraged -/// > to use some sort of buffering mechanism. -pub async fn read_varint(socket: &mut (impl AsyncRead + Unpin)) -> Result { - let mut buffer = unsigned_varint::encode::usize_buffer(); - let mut buffer_len = 0; - - loop { - match socket.read(&mut buffer[buffer_len..buffer_len + 1]).await? { - 0 => { - // Reaching EOF before finishing to read the length is an error, unless the EOF is - // at the very beginning of the substream, in which case we assume that the data is - // empty. - if buffer_len == 0 { - return Ok(0); - } else { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - } - n => debug_assert_eq!(n, 1), - } - - buffer_len += 1; - - match unsigned_varint::decode::usize(&buffer[..buffer_len]) { - Ok((len, _)) => return Ok(len), - Err(unsigned_varint::decode::Error::Overflow) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "overflow in variable-length integer", - )); - } - // TODO: why do we have a `__Nonexhaustive` variant in the error? I don't know how to process it - // Err(unsigned_varint::decode::Error::Insufficient) => {} - Err(_) => {} - } - } -} - -/// Reads a length-prefixed message from the given socket. -/// -/// The `max_size` parameter is the maximum size in bytes of the message that we accept. This is -/// necessary in order to avoid DoS attacks where the remote sends us a message of several -/// gigabytes. -/// -/// > **Note**: Assumes that a variable-length prefix indicates the length of the message. This is -/// > compatible with what [`write_length_prefixed`] does. -pub async fn read_length_prefixed( - socket: &mut (impl AsyncRead + Unpin), - max_size: usize, -) -> io::Result> { - let len = read_varint(socket).await?; - if len > max_size { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Received data size ({len} bytes) exceeds maximum ({max_size} bytes)"), - )); - } - - let mut buf = vec![0; len]; - socket.read_exact(&mut buf).await?; - - Ok(buf) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn write_length_prefixed_works() { - let data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - let mut out = vec![0; 10_000]; - - futures::executor::block_on(async { - let mut socket = futures::io::Cursor::new(&mut out[..]); - - write_length_prefixed(&mut socket, &data).await.unwrap(); - socket.close().await.unwrap(); - }); - - let (out_len, out_data) = unsigned_varint::decode::usize(&out).unwrap(); - assert_eq!(out_len, data.len()); - assert_eq!(&out_data[..out_len], &data[..]); - } - - // TODO: rewrite these tests - /* - #[test] - fn read_one_works() { - let original_data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - - let mut len_buf = unsigned_varint::encode::usize_buffer(); - let len_buf = unsigned_varint::encode::usize(original_data.len(), &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend_from_slice(&original_data); - - let future = read_one_then(Cursor::new(in_buffer), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert_eq!(out, original_data); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_zero_len() { - let future = read_one_then(Cursor::new(vec![0]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_checks_length() { - let mut len_buf = unsigned_varint::encode::u64_buffer(); - let len_buf = unsigned_varint::encode::u64(5_000, &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend((0..5000).map(|_| 0)); - - let future = read_one_then(Cursor::new(in_buffer), 100, (), move |_, ()| -> Result<_, ReadOneError> { - Ok(()) - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::TooLarge { .. }) => (), - _ => panic!(), - } - } - - #[test] - fn read_one_accepts_empty() { - let future = read_one_then(Cursor::new([]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_eof_before_len() { - let future = read_one_then(Cursor::new([0x80]), 10_000, (), move |_, ()| -> Result<(), ReadOneError> { - unreachable!() - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::Io(ref err)) if err.kind() == io::ErrorKind::UnexpectedEof => (), - _ => panic!() - } - }*/ -} diff --git a/core/tests/transport_upgrade.rs b/core/tests/transport_upgrade.rs index ac724a64ffa..a8872051618 100644 --- a/core/tests/transport_upgrade.rs +++ b/core/tests/transport_upgrade.rs @@ -19,8 +19,10 @@ // DEALINGS IN THE SOFTWARE. use futures::prelude::*; -use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::upgrade::{self, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::transport::{ListenerId, MemoryTransport, Transport}; +use libp2p_core::upgrade::{ + self, InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo, +}; use libp2p_identity as identity; use libp2p_mplex::MplexConfig; use libp2p_noise as noise; @@ -40,7 +42,7 @@ impl UpgradeInfo for HelloUpgrade { } } -impl InboundUpgrade for HelloUpgrade +impl InboundConnectionUpgrade for HelloUpgrade where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -58,7 +60,7 @@ where } } -impl OutboundUpgrade for HelloUpgrade +impl OutboundConnectionUpgrade for HelloUpgrade where C: AsyncWrite + AsyncRead + Send + Unpin + 'static, { @@ -102,15 +104,17 @@ fn upgrade_pipeline() { let listen_addr1 = Multiaddr::from(Protocol::Memory(random::())); let listen_addr2 = listen_addr1.clone(); - listener_transport.listen_on(listen_addr1).unwrap(); + listener_transport + .listen_on(ListenerId::next(), listen_addr1) + .unwrap(); let server = async move { loop { - let (upgrade, _send_back_addr) = - match listener_transport.select_next_some().await.into_incoming() { - Some(u) => u, - None => continue, - }; + let Some((upgrade, _send_back_addr)) = + listener_transport.select_next_some().await.into_incoming() + else { + continue; + }; let (peer, _mplex) = upgrade.await.unwrap(); assert_eq!(peer, dialer_id); } diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md index aef8dd6986a..bacbfe9509e 100644 --- a/docs/coding-guidelines.md +++ b/docs/coding-guidelines.md @@ -28,7 +28,7 @@ Below is a set of coding guidelines followed across the rust-libp2p code base. ## Hierarchical State Machines -If you sqint, rust-libp2p is just a big hierarchy of [state +If you squint, rust-libp2p is just a big hierarchy of [state machines](https://en.wikipedia.org/wiki/Finite-state_machine) where parents pass events down to their children and children pass events up to their parents. @@ -167,7 +167,7 @@ impl Stream for SomeStateMachine { } ``` -This priotization provides: +This prioritization provides: - Low memory footprint as local queues (here `events_to_return_to_parent`) stay small. - Low latency as accepted local work is not stuck in queues. - DOS defense as a remote does not control the size of the local queue, nor starves local work with its remote work. @@ -195,7 +195,7 @@ through a side-channel. ### Local queues As for channels shared across potentially concurrent actors (e.g. future tasks -or OS threads), the same applies for queues owned by a single actor only. E.g. +or OS threads), the same applies to queues owned by a single actor only. E.g. reading events from a socket into a `Vec` without some mechanism bounding the size of that `Vec` again can lead to unbounded memory growth and high latencies. @@ -241,7 +241,7 @@ shows a speed up when running it concurrently. ## Use `async/await` for sequential execution only Using `async/await` for sequential execution makes things significantly simpler. -Though unfortunately using `async/await` does not allow accesing methods on the +Though unfortunately using `async/await` does not allow accessing methods on the object being `await`ed unless paired with some synchronization mechanism like an `Arc>`. @@ -308,7 +308,7 @@ response and a previous request. For example, if a user requests two new connect peer, they should be able to match each new connection to the corresponding previous connection request without having to guess. -When accepting a **command** that eventually results in a response through an event require that +When accepting a **command** that eventually results in a response through an event requires that command to contain a unique ID, which is later on contained in the asynchronous response event. One such example is the `Swarm` accepting a `ToSwarm::Dial` from the `NetworkBehaviour`. diff --git a/docs/maintainer-handbook.md b/docs/maintainer-handbook.md new file mode 100644 index 00000000000..6d36f6fe77c --- /dev/null +++ b/docs/maintainer-handbook.md @@ -0,0 +1,68 @@ +# Maintainer handbook + +This document describes what ever maintainer of the repository should know. + +## GitHub settings + +All settings around GitHub like branch protection settings are managed through https://github.com/libp2p/github-mgmt. +For example, adding, removing or renaming a required CI job will need to be preceded by a PR that changes the configuration. + +To streamline things, it is good to _prepare_ such a PR together with the one that changes the CI workflows. +Take care to not merge the configuration change too early because it will block CI of all other PRs because GitHub now requires the new set of jobs (which will only be valid for the PR that actually changes the CI definition). + +## Mergify + +We utilize mergify as a merge-queue and overall automation bot on the repository. +The configuration file is [.github/mergify.yml](../.github/mergify.yml). + +The main feature is the `send-it` label. +Once a PR fulfills all merge requirements (approvals, passing CI, etc), applying the `send-it` labels activates mergify's merge-queue. + +- All branch protection rules, i.e. minimum number of reviews, green CI, etc are _implicit_ and thus do not need to be listed. +- Changing the mergify configuration file **always** requires the PR to be merged manually. + In other words, applying `send-it` to a PR that changes the mergify configuration has **no** effect. + This is a security feature of mergify to make sure changes to the automation are carefully reviewed. + +In case of a trivial code change, maintainers may choose to apply the `trivial` label. +This will have mergify approve your PR, thus fulfilling all requirements to automatically queue a PR for merging. + +## Changelog entries + +Our CI checks that each crate which is modified gets a changelog entry. +Whilst this is a good default safety-wise, it creates a lot of false-positives for changes that are internal and don't need a changelog entry. + +For PRs that in the categories `chore`, `deps`, `refactor` and `docs`, this check is disabled automatically. +Any other PR needs to explicitly disable this check if desired by applying the `internal-change` label. + +## Dependencies + +We version our `Cargo.lock` file for better visibility into which dependencies are required for a functional build. +Additionally, this makes us resilient to semver-incompatible updates of our dependencies (which would otherwise result in a build error). + +As a consequence, we receive many dependency bump PRs from dependabot. +We have some automation in place to deal with those. + +1. semver-compatible updates (i.e. patch bumps for 0.x dependencies and minor bumps for 1.x dependencies) are approved automatically by mergify. +2. all approved dependabot PRs are queued for merging automatically + +The `send-it` label is not necessary (but also harmless) for dependabot PRs. + +## Issues vs discussions + +We typically use issues to handle bugs, feature-requests and track to-be-done work. +As a rule of thumb, we use issues for things that are fairly clear in nature. + +Broader ideas or brainstorming happens in GitHub's discussions. +Those allow for more fine-granular threading which is likely to happen for ideas that are not yet fleshed out. + +Unless specified otherwise, it is safe to assume that what is documented in issues represents the consensus of the maintainers. + +## Labels + +For the most part, the labels we use on issues are pretty self-explanatory. + +- `decision-pending`: Documents that the issue is blocked. + Maintainers are encouraged to provide their input on issues marked with this label. +- `need/author-input`: Integrates with our [.github/workflows/stale.yml](../.github/workflows/stale.yml) workflow. + Any issue tagged with this label will be auto-closed due to inactivity after a certain time. + diff --git a/docs/release.md b/docs/release.md index 5b4d32aedaf..50b7b0605c7 100644 --- a/docs/release.md +++ b/docs/release.md @@ -17,43 +17,34 @@ Non-breaking changes are typically merged very quickly and often released as pat Every crate that we publish on `crates.io` has a `CHANGELOG.md` file. Substantial PRs should add an entry to each crate they modify. -The next unreleased version is tagged with ` - unreleased`, for example: `0.17.0 - unreleased`. +We have a CI check[^1] that enforces adding a changelog entry if you modify code in a particular crate. +In case the current version is already released (we also check that in CI), you'll have to add a new header at the top. +For example, the top-listed version might be `0.17.3` but it is already released. +In that case, add a new heading `## 0.17.4` with your changelog entry in case it is a non-breaking change. -In case there isn't a version with an ` - unreleased` postfix yet, add one for the next version. -The next version number depends on the impact of your change (breaking vs non-breaking, see above). - -If you are making a non-breaking change, please also bump the version number: - -- in the `Cargo.toml` manifest of the respective crate -- in the `[workspace.dependencies]` section of the workspace `Cargo.toml` manifest - -For breaking changes, a changelog entry itself is sufficient. -Bumping the version in the `Cargo.toml` file would lead to many merge conflicts once we decide to merge them. -Hence, we are going to bump those versions once we work through the milestone that collects the breaking changes. +The version in the crate's `Cargo.toml` and the top-most version in the `CHANGELOG.md` file always have to be in sync. +Additionally, we also enforce that all crates always depend on the latest version of other workspace-crates through workspace inheritance. +As a consequence, you'll also have to bump the version in `[workspace.dependencies]` in the workspace `Cargo.toml` manifest. ## Releasing one or more crates +The above changelog-management strategy means `master` is always in a state where we can make a release. + ### Prerequisites - [cargo release](https://github.com/crate-ci/cargo-release/) ### Steps -1. Remove the ` - unreleased` tag for each crate to be released in the respective `CHANGELOG.md`. - Create a pull request with the changes against the rust-libp2p `master` branch. - -2. Once merged, run the two commands below on the (squash-) merged commit on the `master` branch. +1. Run the two commands below on the (squash-) merged commit on the `master` branch. 1. `cargo release publish --execute` 2. `cargo release tag --sign-tag --execute` -3. Confirm that `cargo release` tagged the commit correctly via `git push - $YOUR_ORIGIN --tag --dry-run` and then push the new tags via `git push - $YOUR_ORIGIN --tag`. Make sure not to push unrelated git tags. - - Note that dropping the `--no-push` flag on `cargo release` might as well do - the trick. +2. Confirm that `cargo release` tagged the commit correctly via `git push $YOUR_ORIGIN --tag --dry-run` + Push the new tags via `git push $YOUR_ORIGIN --tag`. + Make sure not to push unrelated git tags. ## Patch release @@ -65,15 +56,17 @@ Hence, we are going to bump those versions once we work through the milestone th ## Dealing with alphas -Unfortunately, `cargo` has a rather uninutitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. +Unfortunately, `cargo` has a rather unintuitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. See this internals thread for some context: https://internals.rust-lang.org/t/changing-cargo-semver-compatibility-for-pre-releases In short, cargo will automatically update from `0.1.0-alpha.1` to `0.1.0-alpha.2` UNLESS you pin the version directly with `=0.1.0-alpha.1`. However, from a semver perspective, changes between pre-releases can be breaking. -To avoid accidential breaking changes for our users, we employ the following convention for alpha releases: +To avoid accidental breaking changes for our users, we employ the following convention for alpha releases: - For a breaking change in a crate with an alpha release, bump the "minor" version but retain the "alpha" tag. Example: `0.1.0-alpha` to `0.2.0-alpha`. - For a non-breaking change in a crate with an alpha release, bump or append number to the "alpha" tag. Example: `0.1.0-alpha` to `0.1.0-alpha.1`. + +[^1]: See [ci.yml](../.github/workflows/ci.yml) and look for "Ensure manifest and CHANGELOG are properly updated". diff --git a/examples/README.md b/examples/README.md index a8a07be0cbf..0a3e55aed39 100644 --- a/examples/README.md +++ b/examples/README.md @@ -7,7 +7,7 @@ A set of examples showcasing how to use rust-libp2p. ## Individual libp2p features -- [Chat](./chat-example) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. +- [Chat](./chat) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. - [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol. - [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name. @@ -20,6 +20,6 @@ A set of examples showcasing how to use rust-libp2p. - [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. -- [Ping](./ping-example) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../src/tutorials/ping.rs) for a step-by-step guide building the example. +- [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../libp2p/src/tutorials/ping.rs) for a step-by-step guide building the example. - [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 184d6af729f..9dbe1cabd01 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -5,9 +5,16 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -async-std = { version = "1.12", features = ["attributes"] } -clap = { version = "4.2.7", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +tokio = { version = "1.35", features = ["full"] } +clap = { version = "4.4.11", features = ["derive"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/autonat/README.md b/examples/autonat/README.md new file mode 100644 index 00000000000..60827bbcf3e --- /dev/null +++ b/examples/autonat/README.md @@ -0,0 +1,41 @@ +## Description + +This example consists of a client and a server, which demonstrate the usage of the AutoNAT and identify protocols in **libp2p**. + +## Usage + +### Client + +The client-side part of the example showcases the combination of the AutoNAT and identify protocols. +The identify protocol allows the local peer to determine its external addresses, which are then included in AutoNAT dial-back requests sent to the server. + +To run the client example, follow these steps: + +1. Start the server by following the instructions provided in the `examples/server` directory. + +2. Open a new terminal. + +3. Run the following command in the terminal: + ```sh + cargo run --bin autonat_client -- --server-address --server-peer-id --listen-port + ``` + Note: The `--listen-port` parameter is optional and allows you to specify a fixed port at which the local client should listen. + +### Server + +The server-side example demonstrates a basic AutoNAT server that supports the autonat and identify protocols. + +To start the server, follow these steps: + +1. Open a terminal. + +2. Run the following command: + ```sh + cargo run --bin autonat_server -- --listen-port + ``` + Note: The `--listen-port` parameter is optional and allows you to set a fixed port at which the local peer should listen. + +## Conclusion + +By combining the AutoNAT and identify protocols, the example showcases the establishment of direct connections between peers and the exchange of external address information. +Users can explore the provided client and server code to gain insights into the implementation details and functionality of **libp2p**. diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index bc0c0521af8..3fb25aa6222 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -18,26 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Basic example that combines the AutoNAT and identify protocols. -//! -//! The identify protocol informs the local peer of its external addresses, that are then send in AutoNAT dial-back -//! requests to the server. -//! -//! To run this example, follow the instructions in `examples/server` to start a server, then run in a new terminal: -//! ```sh -//! cargo run --bin autonat_client -- --server-address --server-peer-id --listen-port -//! ``` -//! The `listen-port` parameter is optional and allows to set a fixed port at which the local client should listen. +#![doc = include_str!("../../README.md")] use clap::Parser; -use futures::prelude::*; +use futures::StreamExt; use libp2p::core::multiaddr::Protocol; -use libp2p::core::{upgrade::Version, Multiaddr, Transport}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use libp2p::core::Multiaddr; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; use std::error::Error; use std::net::Ipv4Addr; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -52,26 +44,25 @@ struct Opt { server_peer_id: PeerId, } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = Behaviour::new(local_key.public()); - - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 7177a5bf840..44a53f0d17f 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -18,21 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Basic example for a AutoNAT server that supports the /libp2p/autonat/1.0.0 and "/ipfs/0.1.0" protocols. -//! -//! To start the server run: -//! ```sh -//! cargo run --bin autonat_server -- --listen-port -//! ``` -//! The `listen-port` parameter is optional and allows to set a fixed port at which the local peer should listen. +#![doc = include_str!("../../README.md")] use clap::Parser; -use futures::prelude::*; -use libp2p::core::{multiaddr::Protocol, upgrade::Version, Multiaddr, Transport}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; -use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; +use futures::StreamExt; +use libp2p::core::{multiaddr::Protocol, Multiaddr}; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; +use libp2p::{autonat, identify, identity, noise, tcp, yamux}; use std::error::Error; use std::net::Ipv4Addr; +use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -41,26 +37,25 @@ struct Opt { listen_port: Option, } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let behaviour = Behaviour::new(local_key.public()); - - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); swarm.listen_on( Multiaddr::empty() .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml new file mode 100644 index 00000000000..0587f23e234 --- /dev/null +++ b/examples/browser-webrtc/Cargo.toml @@ -0,0 +1,46 @@ +[package] +authors = ["Doug Anderson "] +description = "Example use of the WebRTC transport in a browser wasm environment" +edition = "2021" +license = "MIT" +name = "browser-webrtc-example" +publish = false +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.1.0" + +[package.metadata.release] +release = false + +[lib] +crate-type = ["cdylib"] + +[dependencies] +anyhow = "1.0.76" +futures = "0.3.30" +rand = "0.8" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +axum = "0.7.1" +libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "tokio"] } +libp2p-webrtc = { workspace = true, features = ["tokio"] } +rust-embed = { version = "8.1.0", features = ["include-exclude", "interpolate-folder-path"] } +tokio = { version = "1.35", features = ["macros", "net", "rt", "signal"] } +tokio-util = { version = "0.7", features = ["compat"] } +tower = "0.4" +tower-http = { version = "0.5.0", features = ["cors"] } +mime_guess = "2.0.4" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = "0.3.66" +libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } +libp2p-webrtc-websys = { workspace = true } +tracing-wasm = "0.2.1" +wasm-bindgen = "0.2.89" +wasm-bindgen-futures = "0.4.39" +web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } + +[lints] +workspace = true diff --git a/examples/browser-webrtc/README.md b/examples/browser-webrtc/README.md new file mode 100644 index 00000000000..eec2c9c0494 --- /dev/null +++ b/examples/browser-webrtc/README.md @@ -0,0 +1,20 @@ +# Rust-libp2p Browser-Server WebRTC Example + +This example demonstrates how to use the `libp2p-webrtc-websys` transport library in a browser to ping the WebRTC Server. +It uses [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/) to build the project for use in the browser. + +## Running the example + +Ensure you have `wasm-pack` [installed](https://rustwasm.github.io/wasm-pack/). + +1. Build the client library: +```shell +wasm-pack build --target web --out-dir static +``` + +2. Start the server: +```shell +cargo run +``` + +3. Open the URL printed in the terminal diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs new file mode 100644 index 00000000000..9499ccbd158 --- /dev/null +++ b/examples/browser-webrtc/src/lib.rs @@ -0,0 +1,110 @@ +#![cfg(target_arch = "wasm32")] + +use futures::StreamExt; +use js_sys::Date; +use libp2p::core::Multiaddr; +use libp2p::ping; +use libp2p::swarm::SwarmEvent; +use libp2p_webrtc_websys as webrtc_websys; +use std::io; +use std::time::Duration; +use wasm_bindgen::prelude::*; +use web_sys::{Document, HtmlElement}; + +#[wasm_bindgen] +pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { + tracing_wasm::set_as_global_default(); + + let ping_duration = Duration::from_secs(30); + + let body = Body::from_current_window()?; + body.append_p(&format!( + "Let's ping the rust-libp2p server over WebRTC for {:?}:", + ping_duration + ))?; + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|key| { + webrtc_websys::Transport::new(webrtc_websys::Config::new(&key)) + })? + .with_behaviour(|_| ping::Behaviour::new(ping::Config::new()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(ping_duration)) + .build(); + + let addr = libp2p_endpoint.parse::()?; + tracing::info!("Dialing {addr}"); + swarm.dial(addr)?; + + loop { + match swarm.next().await.unwrap() { + SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { + tracing::error!("Ping failed: {:?}", e); + + break; + } + SwarmEvent::Behaviour(ping::Event { + peer, + result: Ok(rtt), + .. + }) => { + tracing::info!("Ping successful: RTT: {rtt:?}, from {peer}"); + body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; + } + SwarmEvent::ConnectionClosed { + cause: Some(cause), .. + } => { + tracing::info!("Swarm event: {:?}", cause); + + if let libp2p::swarm::ConnectionError::KeepAliveTimeout = cause { + body.append_p("All done with pinging! ")?; + + break; + } + body.append_p(&format!("Connection closed due to: {:?}", cause))?; + } + evt => tracing::info!("Swarm event: {:?}", evt), + } + } + + Ok(()) +} + +/// Convenience wrapper around the current document body +struct Body { + body: HtmlElement, + document: Document, +} + +impl Body { + fn from_current_window() -> Result { + // Use `web_sys`'s global `window` function to get a handle on the global + // window object. + let document = web_sys::window() + .ok_or(js_error("no global `window` exists"))? + .document() + .ok_or(js_error("should have a document on window"))?; + let body = document + .body() + .ok_or(js_error("document should have a body"))?; + + Ok(Self { body, document }) + } + + fn append_p(&self, msg: &str) -> Result<(), JsError> { + let val = self + .document + .create_element("p") + .map_err(|_| js_error("failed to create

"))?; + val.set_text_content(Some(msg)); + self.body + .append_child(&val) + .map_err(|_| js_error("failed to append

"))?; + + Ok(()) + } +} + +fn js_error(msg: &str) -> JsError { + io::Error::new(io::ErrorKind::Other, msg).into() +} diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs new file mode 100644 index 00000000000..7f06b0d0d99 --- /dev/null +++ b/examples/browser-webrtc/src/main.rs @@ -0,0 +1,155 @@ +#![allow(non_upper_case_globals)] + +use anyhow::Result; +use axum::extract::{Path, State}; +use axum::http::header::CONTENT_TYPE; +use axum::http::StatusCode; +use axum::response::{Html, IntoResponse}; +use axum::{http::Method, routing::get, Router}; +use futures::StreamExt; +use libp2p::{ + core::muxing::StreamMuxerBox, + core::Transport, + multiaddr::{Multiaddr, Protocol}, + ping, + swarm::SwarmEvent, +}; +use libp2p_webrtc as webrtc; +use rand::thread_rng; +use std::net::{Ipv4Addr, SocketAddr}; +use std::time::Duration; +use tokio::net::TcpListener; +use tower_http::cors::{Any, CorsLayer}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let _ = tracing_subscriber::fmt() + .with_env_filter("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") + .try_init(); + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|id_keys| { + Ok(webrtc::tokio::Transport::new( + id_keys.clone(), + webrtc::tokio::Certificate::generate(&mut thread_rng())?, + ) + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn)))) + })? + .with_behaviour(|_| ping::Behaviour::default())? + .with_swarm_config(|cfg| { + cfg.with_idle_connection_timeout( + Duration::from_secs(u64::MAX), // Allows us to observe the pings. + ) + }) + .build(); + + let address_webrtc = Multiaddr::from(Ipv4Addr::UNSPECIFIED) + .with(Protocol::Udp(0)) + .with(Protocol::WebRTCDirect); + + swarm.listen_on(address_webrtc.clone())?; + + let address = loop { + if let SwarmEvent::NewListenAddr { address, .. } = swarm.select_next_some().await { + if address + .iter() + .any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST)) + { + tracing::debug!( + "Ignoring localhost address to make sure the example works in Firefox" + ); + continue; + } + + tracing::info!(%address, "Listening"); + + break address; + } + }; + + let addr = address.with(Protocol::P2p(*swarm.local_peer_id())); + + // Serve .wasm, .js and server multiaddress over HTTP on this address. + tokio::spawn(serve(addr)); + + loop { + tokio::select! { + swarm_event = swarm.next() => { + tracing::trace!(?swarm_event) + }, + _ = tokio::signal::ctrl_c() => { + break; + } + } + } + + Ok(()) +} + +#[derive(rust_embed::RustEmbed)] +#[folder = "$CARGO_MANIFEST_DIR/static"] +struct StaticFiles; + +/// Serve the Multiaddr we are listening on and the host files. +pub(crate) async fn serve(libp2p_transport: Multiaddr) { + let Some(Protocol::Ip4(listen_addr)) = libp2p_transport.iter().next() else { + panic!("Expected 1st protocol to be IP4") + }; + + let server = Router::new() + .route("/", get(get_index)) + .route("/index.html", get(get_index)) + .route("/:path", get(get_static_file)) + .with_state(Libp2pEndpoint(libp2p_transport)) + .layer( + // allow cors + CorsLayer::new() + .allow_origin(Any) + .allow_methods([Method::GET]), + ); + + let addr = SocketAddr::new(listen_addr.into(), 8080); + + tracing::info!(url=%format!("http://{addr}"), "Serving client files at url"); + + axum::serve( + TcpListener::bind((listen_addr, 8080)).await.unwrap(), + server.into_make_service(), + ) + .await + .unwrap(); +} + +#[derive(Clone)] +struct Libp2pEndpoint(Multiaddr); + +/// Serves the index.html file for our client. +/// +/// Our server listens on a random UDP port for the WebRTC transport. +/// To allow the client to connect, we replace the `__LIBP2P_ENDPOINT__` placeholder with the actual address. +async fn get_index( + State(Libp2pEndpoint(libp2p_endpoint)): State, +) -> Result, StatusCode> { + let content = StaticFiles::get("index.html") + .ok_or(StatusCode::NOT_FOUND)? + .data; + + let html = std::str::from_utf8(&content) + .expect("index.html to be valid utf8") + .replace("__LIBP2P_ENDPOINT__", &libp2p_endpoint.to_string()); + + Ok(Html(html)) +} + +/// Serves the static files generated by `wasm-pack`. +async fn get_static_file(Path(path): Path) -> Result { + tracing::debug!(file_path=%path, "Serving static file"); + + let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data; + let content_type = mime_guess::from_path(path) + .first_or_octet_stream() + .to_string(); + + Ok(([(CONTENT_TYPE, content_type)], content)) +} diff --git a/examples/browser-webrtc/static/index.html b/examples/browser-webrtc/static/index.html new file mode 100644 index 00000000000..a5a26310e3f --- /dev/null +++ b/examples/browser-webrtc/static/index.html @@ -0,0 +1,23 @@ + + + + + + + +

+

Rust Libp2p Demo!

+
+ + + + diff --git a/examples/chat-example/Cargo.toml b/examples/chat-example/Cargo.toml deleted file mode 100644 index 1d865711ea8..00000000000 --- a/examples/chat-example/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "chat-example" -version = "0.1.0" -edition = "2021" -publish = false -license = "MIT" - -[dependencies] -async-std = { version = "1.12", features = ["attributes"] } -async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux"] } -libp2p-quic = { path = "../../transports/quic", features = ["async-std"] } diff --git a/examples/chat-example/src/main.rs b/examples/chat-example/src/main.rs deleted file mode 100644 index 2c038724c37..00000000000 --- a/examples/chat-example/src/main.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! A basic chat application with logs demonstrating libp2p and the gossipsub protocol -//! combined with mDNS for the discovery of peers to gossip with. -//! -//! Using two terminal windows, start two instances, typing the following in each: -//! -//! ```sh -//! cargo run -//! ``` -//! -//! Mutual mDNS discovery may take a few seconds. When each peer does discover the other -//! it will print a message like: -//! -//! ```sh -//! mDNS discovered a new peer: {peerId} -//! ``` -//! -//! Type a message and hit return: the message is sent and printed in the other terminal. -//! Close with Ctrl-c. -//! -//! You can open more terminal windows and add more peers using the same line above. -//! -//! Once an additional peer is mDNS discovered it can participate in the conversation -//! and all peers will receive messages sent from it. -//! -//! If a participant exits (Control-C or otherwise) the other peers will receive an mDNS expired -//! event and remove the expired peer from the list of known peers. - -use async_std::io; -use futures::{future::Either, prelude::*, select}; -use libp2p::{ - core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade}, - gossipsub, identity, mdns, noise, - swarm::NetworkBehaviour, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, -}; -use libp2p_quic as quic; -use std::collections::hash_map::DefaultHasher; -use std::error::Error; -use std::hash::{Hash, Hasher}; -use std::time::Duration; - -// We create a custom network behaviour that combines Gossipsub and Mdns. -#[derive(NetworkBehaviour)] -struct MyBehaviour { - gossipsub: gossipsub::Behaviour, - mdns: mdns::async_io::Behaviour, -} - -#[async_std::main] -async fn main() -> Result<(), Box> { - // Create a random PeerId - let id_keys = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(id_keys.public()); - println!("Local peer id: {local_peer_id}"); - - // Set up an encrypted DNS-enabled TCP Transport over the Mplex protocol. - let tcp_transport = tcp::async_io::Transport::new(tcp::Config::default().nodelay(true)) - .upgrade(upgrade::Version::V1Lazy) - .authenticate(noise::Config::new(&id_keys).expect("signing libp2p-noise static keypair")) - .multiplex(yamux::Config::default()) - .timeout(std::time::Duration::from_secs(20)) - .boxed(); - let quic_transport = quic::async_std::Transport::new(quic::Config::new(&id_keys)); - let transport = OrTransport::new(quic_transport, tcp_transport) - .map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed(); - - // To content-address message, we can take the hash of message and use it as an ID. - let message_id_fn = |message: &gossipsub::Message| { - let mut s = DefaultHasher::new(); - message.data.hash(&mut s); - gossipsub::MessageId::from(s.finish().to_string()) - }; - - // Set a custom gossipsub configuration - let gossipsub_config = gossipsub::ConfigBuilder::default() - .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) - .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. - .build() - .expect("Valid config"); - - // build a gossipsub network behaviour - let mut gossipsub = gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(id_keys), - gossipsub_config, - ) - .expect("Correct configuration"); - // Create a Gossipsub topic - let topic = gossipsub::IdentTopic::new("test-net"); - // subscribes to our topic - gossipsub.subscribe(&topic)?; - - // Create a Swarm to manage peers and events - let mut swarm = { - let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; - let behaviour = MyBehaviour { gossipsub, mdns }; - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() - }; - - // Read full lines from stdin - let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); - - // Listen on all interfaces and whatever port the OS assigns - swarm.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse()?)?; - swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; - - println!("Enter messages via STDIN and they will be sent to connected peers using Gossipsub"); - - // Kick it off - loop { - select! { - line = stdin.select_next_some() => { - if let Err(e) = swarm - .behaviour_mut().gossipsub - .publish(topic.clone(), line.expect("Stdin not to close").as_bytes()) { - println!("Publish error: {e:?}"); - } - }, - event = swarm.select_next_some() => match event { - SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { - for (peer_id, _multiaddr) in list { - println!("mDNS discovered a new peer: {peer_id}"); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); - } - }, - SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { - for (peer_id, _multiaddr) in list { - println!("mDNS discover peer has expired: {peer_id}"); - swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); - } - }, - SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(gossipsub::Event::Message { - propagation_source: peer_id, - message_id: id, - message, - })) => println!( - "Got message: '{}' with id: {id} from peer: {peer_id}", - String::from_utf8_lossy(&message.data), - ), - SwarmEvent::NewListenAddr { address, .. } => { - println!("Local node is listening on {address}"); - } - _ => {} - } - } - } -} diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml new file mode 100644 index 00000000000..a8349344c03 --- /dev/null +++ b/examples/chat/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "chat-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[package.metadata.release] +release = false + +[dependencies] +tokio = { version = "1.35", features = ["full"] } +async-trait = "0.1" +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/chat/README.md b/examples/chat/README.md new file mode 100644 index 00000000000..96bad137b07 --- /dev/null +++ b/examples/chat/README.md @@ -0,0 +1,30 @@ +## Description + +A basic chat application with logs demonstrating libp2p and the gossipsub protocol combined with mDNS for the discovery of peers to gossip with. +It showcases how peers can connect, discover each other using mDNS, and engage in real-time chat sessions. + +## Usage + +1. Using two terminal windows, start two instances, typing the following in each: + ```sh + cargo run + ``` + +2. Mutual mDNS discovery may take a few seconds. When each peer does discover the other +it will print a message like: + ```sh + mDNS discovered a new peer: {peerId} + ``` + +3. Type a message and hit return: the message is sent and printed in the other terminal. + +4. Close with `Ctrl-c`. You can open more terminal windows and add more peers using the same line above. + +When a new peer is discovered through mDNS, it can join the conversation, and all peers will receive messages sent by that peer. +If a participant exits the application using `Ctrl-c` or any other method, the remaining peers will receive an mDNS expired event and remove the expired peer from their list of known peers. + +## Conclusion + +This chat application demonstrates the usage of **libp2p** and the gossipsub protocol for building a decentralized chat system. +By leveraging mDNS for peer discovery, users can easily connect with other peers and engage in real-time conversations. +The example provides a starting point for developing more sophisticated chat applications using **libp2p** and exploring the capabilities of decentralized communication. diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs new file mode 100644 index 00000000000..c785d301c2f --- /dev/null +++ b/examples/chat/src/main.rs @@ -0,0 +1,134 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![doc = include_str!("../README.md")] + +use futures::stream::StreamExt; +use libp2p::{gossipsub, mdns, noise, swarm::NetworkBehaviour, swarm::SwarmEvent, tcp, yamux}; +use std::collections::hash_map::DefaultHasher; +use std::error::Error; +use std::hash::{Hash, Hasher}; +use std::time::Duration; +use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; + +// We create a custom network behaviour that combines Gossipsub and Mdns. +#[derive(NetworkBehaviour)] +struct MyBehaviour { + gossipsub: gossipsub::Behaviour, + mdns: mdns::tokio::Behaviour, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_behaviour(|key| { + // To content-address message, we can take the hash of message and use it as an ID. + let message_id_fn = |message: &gossipsub::Message| { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + gossipsub::MessageId::from(s.finish().to_string()) + }; + + // Set a custom gossipsub configuration + let gossipsub_config = gossipsub::ConfigBuilder::default() + .heartbeat_interval(Duration::from_secs(10)) // This is set to aid debugging by not cluttering the log space + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. + .build() + .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. + + // build a gossipsub network behaviour + let gossipsub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(key.clone()), + gossipsub_config, + )?; + + let mdns = + mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id())?; + Ok(MyBehaviour { gossipsub, mdns }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + // Create a Gossipsub topic + let topic = gossipsub::IdentTopic::new("test-net"); + // subscribes to our topic + swarm.behaviour_mut().gossipsub.subscribe(&topic)?; + + // Read full lines from stdin + let mut stdin = io::BufReader::new(io::stdin()).lines(); + + // Listen on all interfaces and whatever port the OS assigns + swarm.listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse()?)?; + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + + println!("Enter messages via STDIN and they will be sent to connected peers using Gossipsub"); + + // Kick it off + loop { + select! { + Ok(Some(line)) = stdin.next_line() => { + if let Err(e) = swarm + .behaviour_mut().gossipsub + .publish(topic.clone(), line.as_bytes()) { + println!("Publish error: {e:?}"); + } + } + event = swarm.select_next_some() => match event { + SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { + for (peer_id, _multiaddr) in list { + println!("mDNS discovered a new peer: {peer_id}"); + swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); + } + }, + SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { + for (peer_id, _multiaddr) in list { + println!("mDNS discover peer has expired: {peer_id}"); + swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); + } + }, + SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(gossipsub::Event::Message { + propagation_source: peer_id, + message_id: id, + message, + })) => println!( + "Got message: '{}' with id: {id} from peer: {peer_id}", + String::from_utf8_lossy(&message.data), + ), + SwarmEvent::NewListenAddr { address, .. } => { + println!("Local node is listening on {address}"); + } + _ => {} + } + } + } +} diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index ea3b8fb2a9d..8bbc02debe1 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -1,14 +1,22 @@ [package] -name = "dcutr" +name = "dcutr-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.2.7", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" +clap = { version = "4.4.11", features = ["derive"] } +futures = "0.3.30" futures-timer = "3.0" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio", "yamux"] } +libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } log = "0.4" +tokio = { version = "1.35", features = ["macros", "net", "rt", "signal"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/dcutr/README.md b/examples/dcutr/README.md new file mode 100644 index 00000000000..5c7a9c38f82 --- /dev/null +++ b/examples/dcutr/README.md @@ -0,0 +1,35 @@ +## Description + +The "Direct Connection Upgrade through Relay" (DCUTR) protocol allows peers in a peer-to-peer network to establish direct connections with each other. +In other words, DCUTR is libp2p's version of hole-punching. +This example provides a basic usage of this protocol in **libp2p**. + +## Usage + +To run the example, follow these steps: + +1. Run the example using Cargo: + ```sh + cargo run -- + ``` + Replace `` with specific options (you can use the `--help` command to see the available options). + +### Example usage + +- Example usage in client-listen mode: + ```sh + cargo run -- --mode listen --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID + ``` + +- Example usage in client-dial mode: + ```sh + cargo run -- --mode dial --secret-key-seed 42 --relay-address /ip4/$RELAY_IP/tcp/$PORT/p2p/$RELAY_PEERID --remote-peer-id + ``` + +For this example to work, it is also necessary to turn on a relay server (you will find the related instructions in the example in the `examples/relay-server` folder). + +## Conclusion + +The DCUTR protocol offers a solution for achieving direct connectivity between peers in a peer-to-peer network. +By utilizing hole punching and eliminating the need for signaling servers, the protocol allows peers behind NATs to establish direct connections. +This example provides instructions on running an example implementation of the protocol, allowing users to explore its functionality and benefits. diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 0a3f24088eb..c6c513208e7 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -18,28 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#![doc = include_str!("../README.md")] + use clap::Parser; -use futures::{ - executor::{block_on, ThreadPool}, - future::FutureExt, - stream::StreamExt, -}; +use futures::{executor::block_on, future::FutureExt, stream::StreamExt}; use libp2p::{ - core::{ - multiaddr::{Multiaddr, Protocol}, - transport::{OrTransport, Transport}, - upgrade, - }, - dcutr, - dns::DnsConfig, - identify, identity, noise, ping, relay, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, + core::multiaddr::{Multiaddr, Protocol}, + dcutr, identify, identity, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use log::info; -use std::error::Error; -use std::net::Ipv4Addr; use std::str::FromStr; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p DCUtR client")] @@ -78,33 +69,15 @@ impl FromStr for Mode { } } -fn main() -> Result<(), Box> { - env_logger::init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); - let local_key = generate_ed25519(opts.secret_key_seed); - let local_peer_id = PeerId::from(local_key.public()); - info!("Local peer id: {:?}", local_peer_id); - - let (relay_transport, client) = relay::client::new(local_peer_id); - - let transport = OrTransport::new( - relay_transport, - block_on(DnsConfig::system(tcp::async_io::Transport::new( - tcp::Config::default().port_reuse(true), - ))) - .unwrap(), - ) - .upgrade(upgrade::Version::V1Lazy) - .authenticate( - noise::Config::new(&local_key).expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(yamux::Config::default()) - .boxed(); - #[derive(NetworkBehaviour)] - #[behaviour(out_event = "Event", event_process = false)] struct Behaviour { relay_client: relay::client::Behaviour, ping: ping::Behaviour, @@ -112,61 +85,34 @@ fn main() -> Result<(), Box> { dcutr: dcutr::Behaviour, } - #[derive(Debug)] - #[allow(clippy::large_enum_variant)] - enum Event { - Ping(ping::Event), - Identify(identify::Event), - Relay(relay::client::Event), - Dcutr(dcutr::Event), - } - - impl From for Event { - fn from(e: ping::Event) -> Self { - Event::Ping(e) - } - } - - impl From for Event { - fn from(e: identify::Event) -> Self { - Event::Identify(e) - } - } - - impl From for Event { - fn from(e: relay::client::Event) -> Self { - Event::Relay(e) - } - } - - impl From for Event { - fn from(e: dcutr::Event) -> Self { - Event::Dcutr(e) - } - } - - let behaviour = Behaviour { - relay_client: client, - ping: ping::Behaviour::new(ping::Config::new()), - identify: identify::Behaviour::new(identify::Config::new( - "/TODO/0.0.1".to_string(), - local_key.public(), - )), - dcutr: dcutr::Behaviour::new(local_peer_id), - }; - - let mut swarm = match ThreadPool::new() { - Ok(tp) => SwarmBuilder::with_executor(transport, behaviour, local_peer_id, tp), - Err(_) => SwarmBuilder::without_executor(transport, behaviour, local_peer_id), - } - .build(); + let mut swarm = + libp2p::SwarmBuilder::with_existing_identity(generate_ed25519(opts.secret_key_seed)) + .with_tokio() + .with_tcp( + tcp::Config::default().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_relay_client(noise::Config::new, yamux::Config::default)? + .with_behaviour(|keypair, relay_behaviour| Behaviour { + relay_client: relay_behaviour, + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( + "/TODO/0.0.1".to_string(), + keypair.public(), + )), + dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()), + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); swarm - .listen_on( - Multiaddr::empty() - .with("0.0.0.0".parse::().unwrap().into()) - .with(Protocol::Tcp(0)), - ) + .listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap()) + .unwrap(); + swarm + .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) .unwrap(); // Wait to listen on all interfaces. @@ -177,7 +123,7 @@ fn main() -> Result<(), Box> { event = swarm.next() => { match event.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } event => panic!("{event:?}"), } @@ -202,16 +148,18 @@ fn main() -> Result<(), Box> { SwarmEvent::NewListenAddr { .. } => {} SwarmEvent::Dialing { .. } => {} SwarmEvent::ConnectionEstablished { .. } => {} - SwarmEvent::Behaviour(Event::Ping(_)) => {} - SwarmEvent::Behaviour(Event::Identify(identify::Event::Sent { .. })) => { - info!("Told relay its public address."); + SwarmEvent::Behaviour(BehaviourEvent::Ping(_)) => {} + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Sent { + .. + })) => { + tracing::info!("Told relay its public address"); told_relay_observed_addr = true; } - SwarmEvent::Behaviour(Event::Identify(identify::Event::Received { + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { info: identify::Info { observed_addr, .. }, .. })) => { - info!("Relay told us our public address: {:?}", observed_addr); + tracing::info!(address=%observed_addr, "Relay told us our observed address"); learned_observed_addr = true; } event => panic!("{event:?}"), @@ -229,7 +177,7 @@ fn main() -> Result<(), Box> { .dial( opts.relay_address .with(Protocol::P2pCircuit) - .with(Protocol::P2p(opts.remote_peer_id.unwrap().into())), + .with(Protocol::P2p(opts.remote_peer_id.unwrap())), ) .unwrap(); } @@ -244,31 +192,31 @@ fn main() -> Result<(), Box> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } - SwarmEvent::Behaviour(Event::Relay( + SwarmEvent::Behaviour(BehaviourEvent::RelayClient( relay::client::Event::ReservationReqAccepted { .. }, )) => { assert!(opts.mode == Mode::Listen); - info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request"); } - SwarmEvent::Behaviour(Event::Relay(event)) => { - info!("{:?}", event) + SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { + tracing::info!(?event) } - SwarmEvent::Behaviour(Event::Dcutr(event)) => { - info!("{:?}", event) + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(event)) => { + tracing::info!(?event) } - SwarmEvent::Behaviour(Event::Identify(event)) => { - info!("{:?}", event) + SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { + tracing::info!(?event) } - SwarmEvent::Behaviour(Event::Ping(_)) => {} + SwarmEvent::Behaviour(BehaviourEvent::Ping(_)) => {} SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - info!("Outgoing connection error to {:?}: {:?}", peer_id, error); + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { + tracing::info!(peer=?peer_id, "Outgoing connection failed: {error}"); } _ => {} } diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index b690baac2e8..a7efe3c0697 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -1,14 +1,20 @@ [package] -name = "distributed-key-value-store" +name = "distributed-key-value-store-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "websocket", "yamux"] } -multiaddr = { version = "0.17.1" } \ No newline at end of file +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/distributed-key-value-store/README.md b/examples/distributed-key-value-store/README.md new file mode 100644 index 00000000000..6065609cfdf --- /dev/null +++ b/examples/distributed-key-value-store/README.md @@ -0,0 +1,42 @@ +## Description + +This example showcases a basic distributed key-value store implemented using **libp2p**, along with the mDNS and Kademlia protocols. + +## Usage + +### Key-Value Store + +1. Open two terminal windows, type `cargo run` and press Enter. + +2. In terminal one, type `PUT my-key my-value` and press Enter. +This command will store the value `my-value` with the key `my-key` in the distributed key-value store. + +3. In terminal two, type `GET my-key` and press Enter. +This command will retrieve the value associated with the key `my-key` from the key-value store. + +4. To exit, press `Ctrl-c` in each terminal window to gracefully close the instances. + + +### Provider Records + +You can also use provider records instead of key-value records in the distributed store. + +1. Open two terminal windows and start two instances of the key-value store. +If your local network supports mDNS, the instances will automatically connect. + +2. In terminal one, type `PUT_PROVIDER my-key` and press Enter. +This command will register the peer as a provider for the key `my-key` in the distributed key-value store. + +3. In terminal two, type `GET_PROVIDERS my-key` and press Enter. +This command will retrieve the list of providers for the key `my-key` from the key-value store. + +4. To exit, press `Ctrl-c` in each terminal window to gracefully close the instances. + + +Feel free to explore and experiment with the distributed key-value store example, and observe how the data is distributed and retrieved across the network using **libp2p**, mDNS, and the Kademlia protocol. + +## Conclusion + +This example demonstrates the implementation of a basic distributed key-value store using **libp2p**, mDNS, and the Kademlia protocol. +By leveraging these technologies, peers can connect, store, and retrieve key-value pairs in a decentralized manner. +The example provides a starting point for building more advanced distributed systems and exploring the capabilities of **libp2p** and its associated protocols. diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 952e55ba6e7..404333f3d20 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -18,92 +18,58 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! A basic key value store demonstrating libp2p and the mDNS and Kademlia protocols. -//! -//! 1. Using two terminal windows, start two instances. If you local network -//! allows mDNS, they will automatically connect. -//! -//! 2. Type `PUT my-key my-value` in terminal one and hit return. -//! -//! 3. Type `GET my-key` in terminal two and hit return. -//! -//! 4. Close with Ctrl-c. -//! -//! You can also store provider records instead of key value records. -//! -//! 1. Using two terminal windows, start two instances. If you local network -//! allows mDNS, they will automatically connect. -//! -//! 2. Type `PUT_PROVIDER my-key` in terminal one and hit return. -//! -//! 3. Type `GET_PROVIDERS my-key` in terminal two and hit return. -//! -//! 4. Close with Ctrl-c. +#![doc = include_str!("../README.md")] use async_std::io; use futures::{prelude::*, select}; -use libp2p::core::upgrade::Version; -use libp2p::kad::record::store::MemoryStore; -use libp2p::kad::{ - record::Key, AddProviderOk, GetProvidersOk, GetRecordOk, Kademlia, KademliaEvent, PeerRecord, - PutRecordOk, QueryResult, Quorum, Record, -}; +use libp2p::kad; +use libp2p::kad::store::MemoryStore; +use libp2p::kad::Mode; use libp2p::{ - identity, mdns, noise, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + mdns, noise, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; use std::error::Error; +use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); - - // Create a random key for ourselves. - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // We create a custom network behaviour that combines Kademlia and mDNS. #[derive(NetworkBehaviour)] - #[behaviour(out_event = "MyBehaviourEvent")] - struct MyBehaviour { - kademlia: Kademlia, + struct Behaviour { + kademlia: kad::Behaviour, mdns: mdns::async_io::Behaviour, } - #[allow(clippy::large_enum_variant)] - enum MyBehaviourEvent { - Kademlia(KademliaEvent), - Mdns(mdns::Event), - } - - impl From for MyBehaviourEvent { - fn from(event: KademliaEvent) -> Self { - MyBehaviourEvent::Kademlia(event) - } - } - - impl From for MyBehaviourEvent { - fn from(event: mdns::Event) -> Self { - MyBehaviourEvent::Mdns(event) - } - } - - // Create a swarm to manage peers and events. - let mut swarm = { - // Create a Kademlia behaviour. - let store = MemoryStore::new(local_peer_id); - let kademlia = Kademlia::new(local_peer_id, store); - let mdns = mdns::async_io::Behaviour::new(mdns::Config::default(), local_peer_id)?; - let behaviour = MyBehaviour { kademlia, mdns }; - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() - }; + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| { + Ok(Behaviour { + kademlia: kad::Behaviour::new( + key.public().to_peer_id(), + MemoryStore::new(key.public().to_peer_id()), + ), + mdns: mdns::async_io::Behaviour::new( + mdns::Config::default(), + key.public().to_peer_id(), + )?, + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); // Read full lines from stdin let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); @@ -119,14 +85,14 @@ async fn main() -> Result<(), Box> { SwarmEvent::NewListenAddr { address, .. } => { println!("Listening in {address:?}"); }, - SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { + SwarmEvent::Behaviour(BehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, multiaddr) in list { swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr); } } - SwarmEvent::Behaviour(MyBehaviourEvent::Kademlia(KademliaEvent::OutboundQueryProgressed { result, ..})) => { + SwarmEvent::Behaviour(BehaviourEvent::Kademlia(kad::Event::OutboundQueryProgressed { result, ..})) => { match result { - QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key, providers, .. })) => { + kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { key, providers, .. })) => { for peer in providers { println!( "Peer {peer:?} provides key {:?}", @@ -134,12 +100,12 @@ async fn main() -> Result<(), Box> { ); } } - QueryResult::GetProviders(Err(err)) => { + kad::QueryResult::GetProviders(Err(err)) => { eprintln!("Failed to get providers: {err:?}"); } - QueryResult::GetRecord(Ok( - GetRecordOk::FoundRecord(PeerRecord { - record: Record { key, value, .. }, + kad::QueryResult::GetRecord(Ok( + kad::GetRecordOk::FoundRecord(kad::PeerRecord { + record: kad::Record { key, value, .. }, .. }) )) => { @@ -149,26 +115,26 @@ async fn main() -> Result<(), Box> { std::str::from_utf8(&value).unwrap(), ); } - QueryResult::GetRecord(Ok(_)) => {} - QueryResult::GetRecord(Err(err)) => { + kad::QueryResult::GetRecord(Ok(_)) => {} + kad::QueryResult::GetRecord(Err(err)) => { eprintln!("Failed to get record: {err:?}"); } - QueryResult::PutRecord(Ok(PutRecordOk { key })) => { + kad::QueryResult::PutRecord(Ok(kad::PutRecordOk { key })) => { println!( "Successfully put record {:?}", std::str::from_utf8(key.as_ref()).unwrap() ); } - QueryResult::PutRecord(Err(err)) => { + kad::QueryResult::PutRecord(Err(err)) => { eprintln!("Failed to put record: {err:?}"); } - QueryResult::StartProviding(Ok(AddProviderOk { key })) => { + kad::QueryResult::StartProviding(Ok(kad::AddProviderOk { key })) => { println!( "Successfully put provider record {:?}", std::str::from_utf8(key.as_ref()).unwrap() ); } - QueryResult::StartProviding(Err(err)) => { + kad::QueryResult::StartProviding(Err(err)) => { eprintln!("Failed to put provider record: {err:?}"); } _ => {} @@ -180,14 +146,14 @@ async fn main() -> Result<(), Box> { } } -fn handle_input_line(kademlia: &mut Kademlia, line: String) { +fn handle_input_line(kademlia: &mut kad::Behaviour, line: String) { let mut args = line.split(' '); match args.next() { Some("GET") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -199,7 +165,7 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { Some("GET_PROVIDERS") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -211,7 +177,7 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { Some("PUT") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; @@ -227,20 +193,20 @@ fn handle_input_line(kademlia: &mut Kademlia, line: String) { } } }; - let record = Record { + let record = kad::Record { key, value, publisher: None, expires: None, }; kademlia - .put_record(record, Quorum::One) + .put_record(record, kad::Quorum::One) .expect("Failed to store record locally."); } Some("PUT_PROVIDER") => { let key = { match args.next() { - Some(key) => Key::new(&key), + Some(key) => kad::RecordKey::new(&key), None => { eprintln!("Expected key"); return; diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index 9f2ab176db9..c903b515e07 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -1,16 +1,23 @@ [package] -name = "file-sharing" +name = "file-sharing-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] +serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } -async-trait = "0.1" -clap = { version = "4.2.7", features = ["derive"] } -either = "1.8" -env_logger = "0.10" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } -multiaddr = { version = "0.17.1" } \ No newline at end of file +clap = { version = "4.4.11", features = ["derive"] } +either = "1.9" +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "async-std", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +void = "1.0.2" + +[lints] +workspace = true diff --git a/examples/file-sharing/README.md b/examples/file-sharing/README.md new file mode 100644 index 00000000000..2a2b3ec5317 --- /dev/null +++ b/examples/file-sharing/README.md @@ -0,0 +1,72 @@ +## Description + +The File Sharing example demonstrates a basic file sharing application built using **libp2p**. +This example showcases how to integrate **rust-libp2p** into a larger application while providing a simple file sharing functionality. + +In this application, peers in the network can either act as file providers or file retrievers. +Providers advertise the files they have available on a Distributed Hash Table (DHT) using `libp2p-kad`. +Retrievers can locate and retrieve files by their names from any node in the network. + +## How it Works + +Let's understand the flow of the file sharing process: + +- **File Providers**: Nodes A and B serve as file providers. +Each node offers a specific file: file FA for node A and file FB for node B. +To make their files available, they advertise themselves as providers on the DHT using `libp2p-kad`. +This enables other nodes in the network to discover and retrieve their files. + +- **File Retrievers**: Node C acts as a file retriever. +It wants to retrieve either file FA or FB. +Using `libp2p-kad`, it can locate the providers for these files on the DHT without being directly connected to them. +Node C connects to the corresponding provider node and requests the file content using `libp2p-request-response`. + +- **DHT and Network Connectivity**: The DHT (Distributed Hash Table) plays a crucial role in the file sharing process. +It allows nodes to store and discover information about file providers. +Nodes in the network are interconnected via the DHT, enabling efficient file discovery and retrieval. + +## Architectural Properties + +The File Sharing application has the following architectural properties: + +- **Clean and Clonable Interface**: The application provides a clean and clonable async/await interface, allowing users to interact with the network layer seamlessly. +The `Client` module encapsulates the necessary functionality for network communication. + +- **Efficient Network Handling**: The application operates with a single task that drives the network layer. +This design choice ensures efficient network communication without the need for locks or complex synchronization mechanisms. + +## Usage + +To set up a simple file sharing scenario with a provider and a retriever, follow these steps: + +1. **Start a File Provider**: In one terminal, run the following command to start a file provider node: + ```sh + cargo run -- --listen-address /ip4/127.0.0.1/tcp/40837 \ + --secret-key-seed 1 \ + provide \ + --path \ + --name + ``` + This command initiates a node that listens on the specified address and provides a file located at the specified path. + The file is identified by the provided name, which allows other nodes to discover and retrieve it. + +2. **Start a File Retriever**: In another terminal, run the following command to start a file retriever node: + ```sh + cargo run -- --peer /ip4/127.0.0.1/tcp/40837/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X \ + get \ + --name + ``` + This command initiates a node that connects to the specified peer (the provider) and requests the file with the given name. + +Note: It is not necessary for the retriever node to be directly connected to the provider. +As long as both nodes are connected to any node in the same DHT network, the file can be successfully retrieved. + +This File Sharing example demonstrates the fundamental concepts of building a file sharing application using **libp2p**. +By understanding the flow and architectural properties of this example, you can leverage the power of **libp2p** to integrate peer-to-peer networking capabilities into your own applications. + +## Conclusion + +The File Sharing example provides a practical implementation of a basic file sharing application using **libp2p**. +By leveraging the capabilities of **libp2p**, such as the DHT and network connectivity protocols, it demonstrates how peers can share files in a decentralized manner. + +By exploring and understanding the file sharing process and architectural properties presented in this example, developers can gain insights into building their own file sharing applications using **libp2p**. diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index fb7e1b75ff0..a834ee0600e 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -18,62 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! # File sharing example -//! -//! Basic file sharing application with peers either providing or locating and -//! getting files by name. -//! -//! While obviously showcasing how to build a basic file sharing application, -//! the actual goal of this example is **to show how to integrate rust-libp2p -//! into a larger application**. -//! -//! ## Sample plot -//! -//! Assuming there are 3 nodes, A, B and C. A and B each provide a file while C -//! retrieves a file. -//! -//! Provider nodes A and B each provide a file, file FA and FB respectively. -//! They do so by advertising themselves as a provider for their file on a DHT -//! via [`libp2p-kad`]. The two, among other nodes of the network, are -//! interconnected via the DHT. -//! -//! Node C can locate the providers for file FA or FB on the DHT via -//! [`libp2p-kad`] without being connected to the specific node providing the -//! file, but any node of the DHT. Node C then connects to the corresponding -//! node and requests the file content of the file via -//! [`libp2p-request-response`]. -//! -//! ## Architectural properties -//! -//! - Clean clonable async/await interface ([`Client`](network::Client)) to interact with the -//! network layer. -//! -//! - Single task driving the network layer, no locks required. -//! -//! ## Usage -//! -//! A two node setup with one node providing the file and one node requesting the file. -//! -//! 1. Run command below in one terminal. -//! -//! ```sh -//! cargo run -- --listen-address /ip4/127.0.0.1/tcp/40837 \ -//! --secret-key-seed 1 \ -//! provide \ -//! --path \ -//! --name -//! ``` -//! -//! 2. Run command below in another terminal. -//! -//! ```sh -//! cargo run -- --peer /ip4/127.0.0.1/tcp/40837/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X \ -//! get \ -//! --name -//! ``` -//! -//! Note: The client does not need to be directly connected to the providing -//! peer, as long as both are connected to some node on the same DHT. +#![doc = include_str!("../README.md")] + mod network; use async_std::task::spawn; @@ -81,14 +27,17 @@ use clap::Parser; use futures::prelude::*; use futures::StreamExt; -use libp2p::{core::Multiaddr, multiaddr::Protocol, PeerId}; +use libp2p::{core::Multiaddr, multiaddr::Protocol}; use std::error::Error; use std::io::Write; use std::path::PathBuf; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); @@ -113,9 +62,8 @@ async fn main() -> Result<(), Box> { // In case the user provided an address of a peer on the CLI, dial it. if let Some(addr) = opt.peer { - let peer_id = match addr.iter().last() { - Some(Protocol::P2p(hash)) => PeerId::from_multihash(hash).expect("Valid hash."), - _ => return Err("Expect peer multiaddr to contain peer ID.".into()), + let Some(Protocol::P2p(peer_id)) = addr.iter().last() else { + return Err("Expect peer multiaddr to contain peer ID.".into()); }; network_client .dial(peer_id, addr) diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index 218754fda09..27f833be487 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,30 +1,21 @@ -use async_std::io; -use async_trait::async_trait; -use either::Either; use futures::channel::{mpsc, oneshot}; use futures::prelude::*; use libp2p::{ - core::{ - upgrade::{read_length_prefixed, write_length_prefixed}, - Multiaddr, - }, - identity, - kad::{ - record::store::MemoryStore, GetProvidersOk, Kademlia, KademliaEvent, QueryId, QueryResult, - }, + core::Multiaddr, + identity, kad, multiaddr::Protocol, noise, - request_response::{self, ProtocolSupport, RequestId, ResponseChannel}, - swarm::{ConnectionHandlerUpgrErr, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + request_response::{self, OutboundRequestId, ProtocolSupport, ResponseChannel}, + swarm::{NetworkBehaviour, Swarm, SwarmEvent}, + tcp, yamux, PeerId, }; -use libp2p::core::upgrade::Version; use libp2p::StreamProtocol; +use serde::{Deserialize, Serialize}; use std::collections::{hash_map, HashMap, HashSet}; use std::error::Error; -use std::iter; +use std::time::Duration; /// Creates the network components, namely: /// @@ -50,30 +41,33 @@ pub(crate) async fn new( }; let peer_id = id_keys.public().to_peer_id(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&id_keys)?) - .multiplex(yamux::Config::default()) - .boxed(); - - // Build the Swarm, connecting the lower layer transport logic with the - // higher layer network behaviour logic. - let swarm = SwarmBuilder::with_async_std_executor( - transport, - ComposedBehaviour { - kademlia: Kademlia::new(peer_id, MemoryStore::new(peer_id)), - request_response: request_response::Behaviour::new( - FileExchangeCodec(), - iter::once(( + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(id_keys) + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| Behaviour { + kademlia: kad::Behaviour::new( + peer_id, + kad::store::MemoryStore::new(key.public().to_peer_id()), + ), + request_response: request_response::cbor::Behaviour::new( + [( StreamProtocol::new("/file-exchange/1"), ProtocolSupport::Full, - )), - Default::default(), + )], + request_response::Config::default(), ), - }, - peer_id, - ) - .build(); + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + swarm + .behaviour_mut() + .kademlia + .set_mode(Some(kad::Mode::Server)); let (command_sender, command_receiver) = mpsc::channel(0); let (event_sender, event_receiver) = mpsc::channel(0); @@ -176,19 +170,19 @@ impl Client { } pub(crate) struct EventLoop { - swarm: Swarm, + swarm: Swarm, command_receiver: mpsc::Receiver, event_sender: mpsc::Sender, pending_dial: HashMap>>>, - pending_start_providing: HashMap>, - pending_get_providers: HashMap>>, + pending_start_providing: HashMap>, + pending_get_providers: HashMap>>, pending_request_file: - HashMap, Box>>>, + HashMap, Box>>>, } impl EventLoop { fn new( - swarm: Swarm, + swarm: Swarm, command_receiver: mpsc::Receiver, event_sender: mpsc::Sender, ) -> Self { @@ -216,15 +210,12 @@ impl EventLoop { } } - async fn handle_event( - &mut self, - event: SwarmEvent, io::Error>>, - ) { + async fn handle_event(&mut self, event: SwarmEvent) { match event { - SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(BehaviourEvent::Kademlia( + kad::Event::OutboundQueryProgressed { id, - result: QueryResult::StartProviding(_), + result: kad::QueryResult::StartProviding(_), .. }, )) => { @@ -234,12 +225,13 @@ impl EventLoop { .expect("Completed query to be previously pending."); let _ = sender.send(()); } - SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(BehaviourEvent::Kademlia( + kad::Event::OutboundQueryProgressed { id, result: - QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { - providers, .. + kad::QueryResult::GetProviders(Ok(kad::GetProvidersOk::FoundProviders { + providers, + .. })), .. }, @@ -256,17 +248,17 @@ impl EventLoop { .finish(); } } - SwarmEvent::Behaviour(ComposedEvent::Kademlia( - KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(BehaviourEvent::Kademlia( + kad::Event::OutboundQueryProgressed { result: - QueryResult::GetProviders(Ok(GetProvidersOk::FinishedWithNoAdditionalRecord { - .. - })), + kad::QueryResult::GetProviders(Ok( + kad::GetProvidersOk::FinishedWithNoAdditionalRecord { .. }, + )), .. }, )) => {} - SwarmEvent::Behaviour(ComposedEvent::Kademlia(_)) => {} - SwarmEvent::Behaviour(ComposedEvent::RequestResponse( + SwarmEvent::Behaviour(BehaviourEvent::Kademlia(_)) => {} + SwarmEvent::Behaviour(BehaviourEvent::RequestResponse( request_response::Event::Message { message, .. }, )) => match message { request_response::Message::Request { @@ -291,7 +283,7 @@ impl EventLoop { .send(Ok(response.0)); } }, - SwarmEvent::Behaviour(ComposedEvent::RequestResponse( + SwarmEvent::Behaviour(BehaviourEvent::RequestResponse( request_response::Event::OutboundFailure { request_id, error, .. }, @@ -302,14 +294,14 @@ impl EventLoop { .expect("Request to still be pending.") .send(Err(Box::new(error))); } - SwarmEvent::Behaviour(ComposedEvent::RequestResponse( + SwarmEvent::Behaviour(BehaviourEvent::RequestResponse( request_response::Event::ResponseSent { .. }, )) => {} SwarmEvent::NewListenAddr { address, .. } => { let local_peer_id = *self.swarm.local_peer_id(); eprintln!( "Local node is listening on {:?}", - address.with(Protocol::P2p(local_peer_id.into())) + address.with(Protocol::P2p(local_peer_id)) ); } SwarmEvent::IncomingConnection { .. } => {} @@ -331,7 +323,10 @@ impl EventLoop { } } SwarmEvent::IncomingConnectionError { .. } => {} - SwarmEvent::Dialing(peer_id) => eprintln!("Dialing {peer_id}"), + SwarmEvent::Dialing { + peer_id: Some(peer_id), + .. + } => eprintln!("Dialing {peer_id}"), e => panic!("{e:?}"), } } @@ -354,10 +349,7 @@ impl EventLoop { .behaviour_mut() .kademlia .add_address(&peer_id, peer_addr.clone()); - match self - .swarm - .dial(peer_addr.with(Protocol::P2p(peer_id.into()))) - { + match self.swarm.dial(peer_addr.with(Protocol::P2p(peer_id))) { Ok(()) => { e.insert(sender); } @@ -410,28 +402,9 @@ impl EventLoop { } #[derive(NetworkBehaviour)] -#[behaviour(out_event = "ComposedEvent")] -struct ComposedBehaviour { - request_response: request_response::Behaviour, - kademlia: Kademlia, -} - -#[derive(Debug)] -enum ComposedEvent { - RequestResponse(request_response::Event), - Kademlia(KademliaEvent), -} - -impl From> for ComposedEvent { - fn from(event: request_response::Event) -> Self { - ComposedEvent::RequestResponse(event) - } -} - -impl From for ComposedEvent { - fn from(event: KademliaEvent) -> Self { - ComposedEvent::Kademlia(event) - } +struct Behaviour { + request_response: request_response::cbor::Behaviour, + kademlia: kad::Behaviour, } #[derive(Debug)] @@ -473,77 +446,7 @@ pub(crate) enum Event { } // Simple file exchange protocol - -#[derive(Clone)] -struct FileExchangeCodec(); -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct FileRequest(String); -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub(crate) struct FileResponse(Vec); - -#[async_trait] -impl request_response::Codec for FileExchangeCodec { - type Protocol = StreamProtocol; - type Request = FileRequest; - type Response = FileResponse; - - async fn read_request(&mut self, _: &StreamProtocol, io: &mut T) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let vec = read_length_prefixed(io, 1_000_000).await?; - - if vec.is_empty() { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - - Ok(FileRequest(String::from_utf8(vec).unwrap())) - } - - async fn read_response( - &mut self, - _: &StreamProtocol, - io: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let vec = read_length_prefixed(io, 500_000_000).await?; // update transfer maximum - - if vec.is_empty() { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - - Ok(FileResponse(vec)) - } - - async fn write_request( - &mut self, - _: &StreamProtocol, - io: &mut T, - FileRequest(data): FileRequest, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, data).await?; - io.close().await?; - - Ok(()) - } - - async fn write_response( - &mut self, - _: &StreamProtocol, - io: &mut T, - FileResponse(data): FileResponse, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, data).await?; - io.close().await?; - - Ok(()) - } -} diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index b11ea227c61..2dcc780ac22 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -1,12 +1,20 @@ [package] -name = "identify" +name = "identify-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio", "yamux"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio","yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/identify/README.md b/examples/identify/README.md new file mode 100644 index 00000000000..f358ae8f243 --- /dev/null +++ b/examples/identify/README.md @@ -0,0 +1,23 @@ +## Description + +The example demonstrates how to create a connection between two nodes using TCP transport, authenticate with the noise protocol, and multiplex data streams with yamux. +The library provides a behavior for identity network interactions, allowing nodes to exchange identification information securely. +By running the example, the nodes will establish a connection, negotiate the identity protocol, and exchange identification information, which will be displayed in the console. + +## Usage + +1. In the first terminal window, run the following command: + ```sh + cargo run + ``` + This will print the peer ID (`PeerId`) and the listening addresses, e.g., `Listening on "/ip4/127.0.0.1/tcp/24915"` + +2. In the second terminal window, start a new instance of the example with the following command: + ```sh + cargo run -- /ip4/127.0.0.1/tcp/24915 + ``` + The two nodes establish a connection, negotiate the identity protocol, and send each other identification information, which is then printed to the console. + +## Conclusion + +The included identity example demonstrates how to establish connections and exchange identification information between nodes using the library's protocols and behaviors. diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index a46fac7f368..916317a5a43 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -18,53 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! identify example -//! -//! In the first terminal window, run: -//! -//! ```sh -//! cargo run -//! ``` -//! It will print the [`PeerId`] and the listening addresses, e.g. `Listening on -//! "/ip4/127.0.0.1/tcp/24915"` -//! -//! In the second terminal window, start a new instance of the example with: -//! -//! ```sh -//! cargo run -- /ip4/127.0.0.1/tcp/24915 -//! ``` -//! The two nodes establish a connection, negotiate the identify protocol -//! and will send each other identify info which is then printed to the console. +#![doc = include_str!("../README.md")] -use futures::prelude::*; -use libp2p::{ - core::{multiaddr::Multiaddr, upgrade::Version}, - identify, identity, noise, - swarm::{SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, -}; -use std::error::Error; +use futures::StreamExt; +use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(); - - // Create a identify network behaviour. - let behaviour = identify::Behaviour::new(identify::Config::new( - "/ipfs/id/1.0.0".to_string(), - local_key.public(), - )); - - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| { + identify::Behaviour::new(identify::Config::new( + "/ipfs/id/1.0.0".to_string(), + key.public(), + )) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index 7e156572781..52001626de4 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -1,13 +1,23 @@ [package] -name = "ipfs-kad" +name = "ipfs-kad-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -async-std = { version = "1.12", features = ["attributes"] } +tokio = { version = "1.35", features = ["rt-multi-thread", "macros"] } async-trait = "0.1" +clap = { version = "4.4.11", features = ["derive"] } env_logger = "0.10" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "kad", "mplex", "noise", "tcp", "websocket", "yamux"] } +futures = "0.3.30" +anyhow = "1.0.76" +libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/ipfs-kad/README.md b/examples/ipfs-kad/README.md new file mode 100644 index 00000000000..ccefb6b1d9f --- /dev/null +++ b/examples/ipfs-kad/README.md @@ -0,0 +1,91 @@ +## Description + +This example showcases the usage of **libp2p** to interact with the Kademlia protocol on the IPFS network. +The code demonstrates how to: + - perform Kademlia queries to find the closest peers to a specific peer ID + - insert PK records into the Kademlia DHT + +By running this example, users can gain a better understanding of how the Kademlia protocol operates and performs operations on the IPFS network. + +## Usage + +The example code demonstrates how to perform Kademlia queries on the IPFS network using the Rust P2P Library. + +### Getting closes peers + +By specifying a peer ID as a parameter, the code will search for the closest peers to the given peer ID. + +#### Parameters + +Run the example code: + +```sh +cargo run -- get-peers [PEER_ID] +``` + +Replace `[PEER_ID]` with the base58-encoded peer ID you want to search for. +If no peer ID is provided, a random peer ID will be generated. + +#### Example Output + +Upon running the example code, you will see the output in the console. +The output will display the result of the Kademlia query, including the closest peers to the specified peer ID. + +#### Successful Query Output + +If the Kademlia query successfully finds closest peers, the output will be: + +```sh +Searching for the closest peers to [PEER_ID] +Query finished with closest peers: [peer1, peer2, peer3] +``` + +#### Failed Query Output + +If the Kademlia query times out or there are no reachable peers, the output will indicate the failure: + +```sh +Searching for the closest peers to [PEER_ID] +Query finished with no closest peers. +``` + +### Inserting PK records into the DHT + +By specifying `put-pk-record` as a subcommand, the code will insert the generated public key as a PK record into the DHT. + +#### Parameters + +Run the example code: + +```sh +cargo run -- put-pk-record +``` + +#### Example Output + +Upon running the example code, you will see the output in the console. +The output will display the result of the Kademlia operation. + +#### Successful Operation Output + +If the Kademlia query successfully finds closest peers, the output will be: + +```sh +Putting PK record into the DHT +Successfully inserted the PK record +``` + +#### Failed Query Output + +If the Kademlia operation times out or there are no reachable peers, the output will indicate the failure: + +```sh +Putting PK record into the DHT +Failed to insert the PK record +``` + +## Conclusion + +In conclusion, this example provides a practical demonstration of using the Rust P2P Library to interact with the Kademlia protocol on the IPFS network. +By examining the code and running the example, users can gain insights into the inner workings of Kademlia and how it performs various basic actions like getting the closes peers or inserting records into the DHT. +This knowledge can be valuable when developing peer-to-peer applications or understanding decentralized networks. diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index 9799ca8df0a..0d11bdd851a 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -18,20 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Demonstrates how to perform Kademlia queries on the IPFS network. -//! -//! You can pass as parameter a base58 peer ID to search for. If you don't pass any parameter, a -//! peer ID will be generated randomly. +#![doc = include_str!("../README.md")] +use std::num::NonZeroUsize; +use std::ops::Add; +use std::time::{Duration, Instant}; + +use anyhow::{bail, Result}; +use clap::Parser; use futures::StreamExt; -use libp2p::kad::record::store::MemoryStore; -use libp2p::kad::{GetClosestPeersError, Kademlia, KademliaConfig, KademliaEvent, QueryResult}; -use libp2p::{ - development_transport, identity, - swarm::{SwarmBuilder, SwarmEvent}, - PeerId, -}; -use std::{env, error::Error, time::Duration}; +use libp2p::{bytes::BufMut, identity, kad, noise, swarm::SwarmEvent, tcp, yamux, PeerId}; +use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", @@ -40,76 +37,126 @@ const BOOTNODES: [&str; 4] = [ "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", ]; -#[async_std::main] -async fn main() -> Result<(), Box> { - env_logger::init(); +#[tokio::main] +async fn main() -> Result<()> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Create a random key for ourselves. let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - // Set up a an encrypted DNS-enabled TCP Transport over the Mplex protocol - let transport = development_transport(local_key).await?; - - // Create a swarm to manage peers and events. - let mut swarm = { - // Create a Kademlia behaviour. - let mut cfg = KademliaConfig::default(); - cfg.set_query_timeout(Duration::from_secs(5 * 60)); - let store = MemoryStore::new(local_peer_id); - let mut behaviour = Kademlia::with_config(local_peer_id, store, cfg); - - // Add the bootnodes to the local routing table. `libp2p-dns` built - // into the `transport` resolves the `dnsaddr` when Kademlia tries - // to dial these nodes. - for peer in &BOOTNODES { - behaviour.add_address(&peer.parse()?, "/dnsaddr/bootstrap.libp2p.io".parse()?); + + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_key.clone()) + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_dns()? + .with_behaviour(|key| { + // Create a Kademlia behaviour. + let mut cfg = kad::Config::default(); + cfg.set_query_timeout(Duration::from_secs(5 * 60)); + let store = kad::store::MemoryStore::new(key.public().to_peer_id()); + kad::Behaviour::with_config(key.public().to_peer_id(), store, cfg) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); + + // Add the bootnodes to the local routing table. `libp2p-dns` built + // into the `transport` resolves the `dnsaddr` when Kademlia tries + // to dial these nodes. + for peer in &BOOTNODES { + swarm + .behaviour_mut() + .add_address(&peer.parse()?, "/dnsaddr/bootstrap.libp2p.io".parse()?); + } + + let cli_opt = Opt::parse(); + + match cli_opt.argument { + CliArgument::GetPeers { peer_id } => { + let peer_id = peer_id.unwrap_or(PeerId::random()); + println!("Searching for the closest peers to {peer_id}"); + swarm.behaviour_mut().get_closest_peers(peer_id); } + CliArgument::PutPkRecord {} => { + println!("Putting PK record into the DHT"); - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() - }; + let mut pk_record_key = vec![]; + pk_record_key.put_slice("/pk/".as_bytes()); + pk_record_key.put_slice(swarm.local_peer_id().to_bytes().as_slice()); - // Order Kademlia to search for a peer. - let to_search = env::args() - .nth(1) - .map(|p| p.parse()) - .transpose()? - .unwrap_or_else(PeerId::random); + let mut pk_record = + kad::Record::new(pk_record_key, local_key.public().encode_protobuf()); + pk_record.publisher = Some(*swarm.local_peer_id()); + pk_record.expires = Some(Instant::now().add(Duration::from_secs(60))); - println!("Searching for the closest peers to {to_search}"); - swarm.behaviour_mut().get_closest_peers(to_search); + swarm + .behaviour_mut() + .put_record(pk_record, kad::Quorum::N(NonZeroUsize::new(3).unwrap()))?; + } + } loop { let event = swarm.select_next_some().await; - if let SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(result), - .. - }) = event - { - match result { - Ok(ok) => { - if !ok.peers.is_empty() { - println!("Query finished with closest peers: {:#?}", ok.peers) - } else { - // The example is considered failed as there - // should always be at least 1 reachable peer. - println!("Query finished with no closest peers.") - } - } - Err(GetClosestPeersError::Timeout { peers, .. }) => { - if !peers.is_empty() { - println!("Query timed out with closest peers: {peers:#?}") - } else { - // The example is considered failed as there - // should always be at least 1 reachable peer. - println!("Query timed out with no closest peers."); - } + + match event { + SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed { + result: kad::QueryResult::GetClosestPeers(Ok(ok)), + .. + }) => { + // The example is considered failed as there + // should always be at least 1 reachable peer. + if ok.peers.is_empty() { + bail!("Query finished with no closest peers.") } - }; - break; + println!("Query finished with closest peers: {:#?}", ok.peers); + + return Ok(()); + } + SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed { + result: + kad::QueryResult::GetClosestPeers(Err(kad::GetClosestPeersError::Timeout { + .. + })), + .. + }) => { + bail!("Query for closest peers timed out") + } + SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed { + result: kad::QueryResult::PutRecord(Ok(_)), + .. + }) => { + println!("Successfully inserted the PK record"); + + return Ok(()); + } + SwarmEvent::Behaviour(kad::Event::OutboundQueryProgressed { + result: kad::QueryResult::PutRecord(Err(err)), + .. + }) => { + bail!(anyhow::Error::new(err).context("Failed to insert the PK record")); + } + _ => {} } } +} + +#[derive(Parser, Debug)] +#[clap(name = "libp2p Kademlia DHT example")] +struct Opt { + #[clap(subcommand)] + argument: CliArgument, +} - Ok(()) +#[derive(Debug, Parser)] +enum CliArgument { + GetPeers { + #[clap(long)] + peer_id: Option, + }, + PutPkRecord {}, } diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 5271f71c5bf..3b266cf35a6 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -1,15 +1,21 @@ [package] -name = "ipfs-private" +name = "ipfs-private-example" version = "0.1.0" edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -async-std = { version = "1.12", features = ["attributes"] } +tokio = { version = "1.35", features = ["rt-multi-thread", "macros", "io-std"] } async-trait = "0.1" -either = "1.8" -env_logger = "0.10" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } -multiaddr = { version = "0.17.1" } \ No newline at end of file +either = "1.9" +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/ipfs-private/README.md b/examples/ipfs-private/README.md new file mode 100644 index 00000000000..bc3b12a7ac8 --- /dev/null +++ b/examples/ipfs-private/README.md @@ -0,0 +1,40 @@ +## Description + +This example showcases a minimal implementation of a **libp2p** node that can interact with IPFS. +It utilizes the gossipsub protocol for pubsub messaging, the ping protocol for network connectivity testing, and the identify protocol for peer identification. +The node can be used to communicate with other IPFS nodes that have gossipsub enabled. + +To establish a connection with other nodes, you can provide their multiaddresses as command-line arguments. +On startup, the example will display a list of addresses that you can dial from a `go-ipfs` or `js-ipfs` node. + +## Usage + +To run the example, follow these steps: + +1. Build and run the example using Cargo: + ```sh + cargo run [ADDRESS_1] [ADDRESS_2] ... + ``` + + Replace `[ADDRESS_1]`, `[ADDRESS_2]`, etc., with the multiaddresses of the nodes you want to connect to. + You can provide multiple addresses as command-line arguments. + + **Note:** The multiaddress should be in the following format: `/ip4/127.0.0.1/tcp/4001/p2p/peer_id`. + +2. Once the example is running, you can interact with the IPFS node using the following commands: + + - **Pubsub (Gossipsub):** You can use the gossipsub protocol to send and receive messages on the "chat" topic. + To send a message, type it in the console and press Enter. + The message will be broadcasted to other connected nodes using gossipsub. + + - **Ping:** You can ping other connected nodes to test network connectivity. + The example will display the round-trip time (RTT) for successful pings or indicate if a timeout occurs. + + +## Conclusion + +This example provides a basic implementation of an IPFS node using **libp2p**. +It demonstrates the usage of the gossipsub, ping, and identify protocols to enable communication with other IPFS nodes. +By running this example and exploring its functionality, you can gain insights into how to build more advanced P2P applications using Rust. + +Feel free to experiment with different multiaddresses and explore the capabilities of **libp2p** in the context of IPFS. Happy coding! diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index fd18581407b..a57bfd465e0 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -18,55 +18,22 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! A minimal node that can interact with ipfs -//! -//! This node implements the gossipsub, ping and identify protocols. It supports -//! the ipfs private swarms feature by reading the pre shared key file `swarm.key` -//! from the IPFS_PATH environment variable or from the default location. -//! -//! You can pass any number of nodes to be dialed. -//! -//! On startup, this example will show a list of addresses that you can dial -//! from a go-ipfs or js-ipfs node. -//! -//! You can ping this node, or use pubsub (gossipsub) on the topic "chat". For this -//! to work, the ipfs node needs to be configured to use gossipsub. -use async_std::io; +#![doc = include_str!("../README.md")] + use either::Either; -use futures::{prelude::*, select}; +use futures::prelude::*; use libp2p::{ - core::{muxing::StreamMuxerBox, transport, transport::upgrade::Version}, - gossipsub, identify, identity, + core::transport::upgrade::Version, + gossipsub, identify, multiaddr::Protocol, noise, ping, pnet::{PnetConfig, PreSharedKey}, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, Transport, }; use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; - -/// Builds the transport that serves as a common ground for all connections. -pub fn build_transport( - key_pair: identity::Keypair, - psk: Option, -) -> transport::Boxed<(PeerId, StreamMuxerBox)> { - let noise_config = noise::Config::new(&key_pair).unwrap(); - let yamux_config = yamux::Config::default(); - - let base_transport = tcp::async_io::Transport::new(tcp::Config::default().nodelay(true)); - let maybe_encrypted = match psk { - Some(psk) => Either::Left( - base_transport.and_then(move |socket, _| PnetConfig::new(psk).handshake(socket)), - ), - None => Either::Right(base_transport), - }; - maybe_encrypted - .upgrade(Version::V1Lazy) - .authenticate(noise_config) - .multiplex(yamux_config) - .timeout(Duration::from_secs(20)) - .boxed() -} +use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; /// Get the current ipfs repo path, either from the IPFS_PATH environment variable or /// from the default $HOME/.ipfs @@ -119,9 +86,11 @@ fn parse_legacy_multiaddr(text: &str) -> Result> { Ok(res) } -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ipfs_path = get_ipfs_path(); println!("using IPFS_PATH {ipfs_path:?}"); @@ -129,76 +98,68 @@ async fn main() -> Result<(), Box> { .map(|text| PreSharedKey::from_str(&text)) .transpose()?; - // Create a random PeerId - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("using random peer id: {local_peer_id:?}"); if let Some(psk) = psk { println!("using swarm key with fingerprint: {}", psk.fingerprint()); } - // Set up a an encrypted DNS-enabled TCP Transport over and Yamux protocol - let transport = build_transport(local_key.clone(), psk); - // Create a Gosspipsub topic let gossipsub_topic = gossipsub::IdentTopic::new("chat"); // We create a custom network behaviour that combines gossipsub, ping and identify. #[derive(NetworkBehaviour)] - #[behaviour(out_event = "MyBehaviourEvent")] struct MyBehaviour { gossipsub: gossipsub::Behaviour, identify: identify::Behaviour, ping: ping::Behaviour, } - enum MyBehaviourEvent { - Gossipsub(gossipsub::Event), - Identify(identify::Event), - Ping(ping::Event), - } - - impl From for MyBehaviourEvent { - fn from(event: gossipsub::Event) -> Self { - MyBehaviourEvent::Gossipsub(event) - } - } - - impl From for MyBehaviourEvent { - fn from(event: identify::Event) -> Self { - MyBehaviourEvent::Identify(event) - } - } - - impl From for MyBehaviourEvent { - fn from(event: ping::Event) -> Self { - MyBehaviourEvent::Ping(event) - } - } - - // Create a Swarm to manage peers and events - let mut swarm = { - let gossipsub_config = gossipsub::ConfigBuilder::default() - .max_transmit_size(262144) - .build() - .expect("valid config"); - let mut behaviour = MyBehaviour { - gossipsub: gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(local_key.clone()), - gossipsub_config, - ) - .expect("Valid configuration"), - identify: identify::Behaviour::new(identify::Config::new( - "/ipfs/0.1.0".into(), - local_key.public(), - )), - ping: ping::Behaviour::new(ping::Config::new()), - }; - - println!("Subscribing to {gossipsub_topic:?}"); - behaviour.gossipsub.subscribe(&gossipsub_topic).unwrap(); - SwarmBuilder::with_async_std_executor(transport, behaviour, local_peer_id).build() - }; + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|key| { + let noise_config = noise::Config::new(key).unwrap(); + let yamux_config = yamux::Config::default(); + + let base_transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)); + let maybe_encrypted = match psk { + Some(psk) => Either::Left( + base_transport + .and_then(move |socket, _| PnetConfig::new(psk).handshake(socket)), + ), + None => Either::Right(base_transport), + }; + maybe_encrypted + .upgrade(Version::V1Lazy) + .authenticate(noise_config) + .multiplex(yamux_config) + })? + .with_dns()? + .with_behaviour(|key| { + let gossipsub_config = gossipsub::ConfigBuilder::default() + .max_transmit_size(262144) + .build() + .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg))?; // Temporary hack because `build` does not return a proper `std::error::Error`. + Ok(MyBehaviour { + gossipsub: gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(key.clone()), + gossipsub_config, + ) + .expect("Valid configuration"), + identify: identify::Behaviour::new(identify::Config::new( + "/ipfs/0.1.0".into(), + key.public(), + )), + ping: ping::Behaviour::new(ping::Config::new()), + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + println!("Subscribing to {gossipsub_topic:?}"); + swarm + .behaviour_mut() + .gossipsub + .subscribe(&gossipsub_topic) + .unwrap(); // Reach out to other nodes if specified for to_dial in std::env::args().skip(1) { @@ -208,7 +169,7 @@ async fn main() -> Result<(), Box> { } // Read full lines from stdin - let mut stdin = io::BufReader::new(io::stdin()).lines().fuse(); + let mut stdin = io::BufReader::new(io::stdin()).lines(); // Listen on all interfaces and whatever port the OS assigns swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; @@ -216,11 +177,11 @@ async fn main() -> Result<(), Box> { // Kick it off loop { select! { - line = stdin.select_next_some() => { + Ok(Some(line)) = stdin.next_line() => { if let Err(e) = swarm .behaviour_mut() .gossipsub - .publish(gossipsub_topic.clone(), line.expect("Stdin not to close").as_bytes()) + .publish(gossipsub_topic.clone(), line.as_bytes()) { println!("Publish error: {e:?}"); } @@ -249,7 +210,8 @@ async fn main() -> Result<(), Box> { match event { ping::Event { peer, - result: Result::Ok(ping::Success::Ping { rtt }), + result: Result::Ok(rtt), + .. } => { println!( "ping: rtt to {} is {} ms", @@ -257,27 +219,24 @@ async fn main() -> Result<(), Box> { rtt.as_millis() ); } - ping::Event { - peer, - result: Result::Ok(ping::Success::Pong), - } => { - println!("ping: pong from {}", peer.to_base58()); - } ping::Event { peer, result: Result::Err(ping::Failure::Timeout), + .. } => { println!("ping: timeout to {}", peer.to_base58()); } ping::Event { peer, result: Result::Err(ping::Failure::Unsupported), + .. } => { println!("ping: {} does not support ping protocol", peer.to_base58()); } ping::Event { peer, result: Result::Err(ping::Failure::Other { error }), + .. } => { println!("ping: ping::Failure with {}: {error}", peer.to_base58()); } diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index ba08cdaebe7..39412d29aea 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -5,11 +5,21 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -env_logger = "0.10.0" -futures = "0.3.27" +futures = "0.3.30" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { path = "../../libp2p", features = ["async-std", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -log = "0.4.0" -tokio = { version = "1", features = ["rt-multi-thread"] } -prometheus-client = "0.20.0" +libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } +opentelemetry = { version = "0.20.0", features = ["rt-tokio", "metrics"] } +opentelemetry-otlp = { version = "0.13.0", features = ["metrics"]} +opentelemetry_api = "0.20.0" +prometheus-client = { workspace = true } +tokio = { version = "1", features = ["full"] } +tracing = "0.1.37" +tracing-opentelemetry = "0.21.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/metrics/README.md b/examples/metrics/README.md new file mode 100644 index 00000000000..160536985f1 --- /dev/null +++ b/examples/metrics/README.md @@ -0,0 +1,79 @@ +## Description + +The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics` as well as span data via `opentelemetry`. +It sets up multiple nodes in the network and measures various metrics, such as `libp2p_ping`, to evaluate the network's performance. + +## Usage + +To run the example, follow these steps: + +1. Run the following command to start the first node: + + ```sh + cargo run + ``` + +2. Open a second terminal and run the following command to start a second node: + + ```sh + cargo run -- + ``` + + Replace `` with the listen address of the first node reported in the first terminal. + Look for the line that says `NewListenAddr` to find the address. + +3. Open a third terminal and run the following command to retrieve the metrics from either the first or second node: + + ```sh + curl localhost:/metrics + ``` + + Replace `` with the listen port of the metrics server of either the first or second node. + Look for the line that says `tide::server Server listening on` to find the port. + + After executing the command, you should see a long list of metrics printed to the terminal. + Make sure to check the `libp2p_ping` metrics, which should have a value greater than zero (`>0`). + +## Opentelemetry + +To see the span data collected as part of the `Swarm`s activity, start up an opentelemetry collector: + +```sh +docker compose up +``` + +Then, configure tracing to output spans: + +```shell +export RUST_LOG=info,[ConnectionHandler::poll]=trace,[NetworkBehaviour::poll]=trace +``` + +Next, (re)-start the two example for it to connect to the OTEL collector. +Finally, open the Jaeger UI in a browser and explore the spans: http://localhost:16686. + +### Filtering spans + +For a precise documentation, please see the following documentation in tracing: . + +`rust-libp2p` consistently applies spans to the following functions: + +- `ConnectionHandler::poll` implementations +- `NetworkBehaviour::poll` implementations + +The above spans are all called exactly that: `ConnectionHandler::poll` and `NetworkBehaviour::poll`. +You can activate _all_ of them by setting: + +``` +RUST_LOG=[ConnectionHandler::poll]=trace +``` + +If you just wanted to see the spans of the `libp2p_ping` crate, you can filter like this: + +``` +RUST_LOG=libp2p_ping[ConnectionHandler::poll]=trace +``` + +## Conclusion + +This example demonstrates how to utilize the `libp2p-metrics` crate to collect and analyze metrics in a libp2p network. +By running multiple nodes and examining the metrics, users can gain insights into the network's performance and behavior. diff --git a/examples/metrics/docker-compose.yml b/examples/metrics/docker-compose.yml new file mode 100644 index 00000000000..06d8d5becfe --- /dev/null +++ b/examples/metrics/docker-compose.yml @@ -0,0 +1,23 @@ +version: "2" +services: + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + restart: always + ports: + - "16686:16686" + - "14268" + - "14250" + + # Collector + otel-collector: + image: otel/opentelemetry-collector:0.88.0 + restart: always + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + depends_on: + - jaeger-all-in-one diff --git a/examples/metrics/otel-collector-config.yaml b/examples/metrics/otel-collector-config.yaml new file mode 100644 index 00000000000..8755848cd6e --- /dev/null +++ b/examples/metrics/otel-collector-config.yaml @@ -0,0 +1,25 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + +exporters: + debug: + otlp: + endpoint: jaeger-all-in-one:4317 + tls: + insecure: true + +processors: + batch: + +service: + telemetry: + logs: + level: "debug" + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [debug, otlp] diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 84102c2b558..8c77d724ea3 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::{error, info}; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -35,16 +34,12 @@ pub(crate) async fn metrics_server(registry: Registry) -> Result<(), std::io::Er // Serve on localhost. let addr = ([127, 0, 0, 1], 0).into(); - // Use the tokio runtime to run the hyper server. - let rt = tokio::runtime::Runtime::new()?; - rt.block_on(async { - let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); - info!("Metrics server on http://{}/metrics", server.local_addr()); - if let Err(e) = server.await { - error!("server error: {}", e); - } - Ok(()) - }) + let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); + tracing::info!(metrics_server=%format!("http://{}/metrics", server.local_addr())); + if let Err(e) = server.await { + tracing::error!("server error: {}", e); + } + Ok(()) } pub(crate) struct MetricService { diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 2178de9802b..3ab6815cb32 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -18,111 +18,99 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Example demonstrating `libp2p-metrics`. -//! -//! In one terminal run: -//! -//! ``` -//! cargo run -//! ``` -//! -//! In a second terminal run: -//! -//! ``` -//! cargo run -- -//! ``` -//! -//! Where `` is replaced by the listen address of the -//! first node reported in the first terminal. Look for `NewListenAddr`. -//! -//! In a third terminal run: -//! -//! ``` -//! curl localhost:/metrics -//! ``` -//! -//! Where `` is replaced by the listen -//! port of the metrics server of the first or the second node. Look for -//! `tide::server Server listening on`. -//! -//! You should see a long list of metrics printed to the terminal. Check the -//! `libp2p_ping` metrics, they should be `>0`. +#![doc = include_str!("../README.md")] -use env_logger::Env; -use futures::executor::block_on; -use futures::stream::StreamExt; -use libp2p::core::{upgrade::Version, Multiaddr, Transport}; -use libp2p::identity::PeerId; +use futures::StreamExt; +use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use log::info; +use opentelemetry::sdk; +use opentelemetry_api::KeyValue; use prometheus_client::registry::Registry; use std::error::Error; -use std::thread; +use std::time::Duration; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; mod http_service; -fn main() -> Result<(), Box> { - env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + setup_tracing()?; - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - let local_pub_key = local_key.public(); - info!("Local peer id: {local_peer_id:?}"); + let mut metric_registry = Registry::default(); - let mut swarm = SwarmBuilder::without_executor( - tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(), - Behaviour::new(local_pub_key), - local_peer_id, - ) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_bandwidth_metrics(&mut metric_registry) + .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) + .build(); swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; if let Some(addr) = std::env::args().nth(1) { let remote: Multiaddr = addr.parse()?; swarm.dial(remote)?; - info!("Dialed {}", addr) + tracing::info!(address=%addr, "Dialed address") } - let mut metric_registry = Registry::default(); let metrics = Metrics::new(&mut metric_registry); - thread::spawn(move || block_on(http_service::metrics_server(metric_registry))); + tokio::spawn(http_service::metrics_server(metric_registry)); - block_on(async { - loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { - info!("{:?}", ping_event); - metrics.record(&ping_event); - } - SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { - info!("{:?}", identify_event); - metrics.record(&identify_event); - } - swarm_event => { - info!("{:?}", swarm_event); - metrics.record(&swarm_event); - } + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { + tracing::info!(?ping_event); + metrics.record(&ping_event); + } + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { + tracing::info!(?identify_event); + metrics.record(&identify_event); + } + swarm_event => { + tracing::info!(?swarm_event); + metrics.record(&swarm_event); } } - }); + } +} + +fn setup_tracing() -> Result<(), Box> { + let tracer = opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .with_trace_config( + sdk::trace::Config::default().with_resource(sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])), + ) + .install_batch(opentelemetry::runtime::Tokio)?; + + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) + .with( + tracing_opentelemetry::layer() + .with_tracer(tracer) + .with_filter(EnvFilter::from_default_env()), + ) + .try_init()?; + Ok(()) } /// Our network behaviour. -/// -/// For illustrative purposes, this includes the [`keep_alive::Behaviour`]) behaviour so the ping actually happen -/// and can be observed via the metrics. #[derive(NetworkBehaviour)] struct Behaviour { identify: identify::Behaviour, - keep_alive: keep_alive::Behaviour, ping: ping::Behaviour, } @@ -134,7 +122,6 @@ impl Behaviour { "/ipfs/0.1.0".into(), local_pub_key, )), - keep_alive: keep_alive::Behaviour::default(), } } } diff --git a/examples/ping-example/Cargo.toml b/examples/ping-example/Cargo.toml deleted file mode 100644 index db612b556f7..00000000000 --- a/examples/ping-example/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "ping-example" -version = "0.1.0" -edition = "2021" -publish = false -license = "MIT" - -[dependencies] -async-std = { version = "1.12", features = ["attributes"] } -async-trait = "0.1" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "dns", "macros", "noise", "ping", "tcp", "websocket", "yamux"] } -multiaddr = { version = "0.17.1" } \ No newline at end of file diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml new file mode 100644 index 00000000000..93128c3ed13 --- /dev/null +++ b/examples/ping/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "ping-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[package.metadata.release] +release = false + +[dependencies] +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] } +tokio = { version = "1.35.1", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/ping/README.md b/examples/ping/README.md new file mode 100644 index 00000000000..9abc8b713b9 --- /dev/null +++ b/examples/ping/README.md @@ -0,0 +1,30 @@ +## Description + +The ping example showcases how to create a network of nodes that establish connections, negotiate the ping protocol, and ping each other. + +## Usage + +To run the example, follow these steps: + +1. In a first terminal window, run the following command: + + ```sh + cargo run + ``` + + This command starts a node and prints the `PeerId` and the listening addresses, such as `Listening on "/ip4/0.0.0.0/tcp/24915"`. + +2. In a second terminal window, start a new instance of the example with the following command: + + ```sh + cargo run -- /ip4/127.0.0.1/tcp/24915 + ``` + + Replace `/ip4/127.0.0.1/tcp/24915` with the listen address of the first node obtained from the first terminal window. + +3. The two nodes will establish a connection, negotiate the ping protocol, and begin pinging each other. + +## Conclusion + +The ping example demonstrates the basic usage of **libp2p** to create a simple p2p network and implement a ping protocol. +By running multiple nodes and observing the ping behavior, users can gain insights into how **libp2p** facilitates communication and interaction between peers. diff --git a/examples/ping-example/src/main.rs b/examples/ping/src/main.rs similarity index 54% rename from examples/ping-example/src/main.rs rename to examples/ping/src/main.rs index ab67a8d2ba6..911b0384f89 100644 --- a/examples/ping-example/src/main.rs +++ b/examples/ping/src/main.rs @@ -18,52 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! Ping example -//! -//! See ../src/tutorial.rs for a step-by-step guide building the example below. -//! -//! In the first terminal window, run: -//! -//! ```sh -//! cargo run -//! ``` -//! -//! It will print the PeerId and the listening addresses, e.g. `Listening on -//! "/ip4/0.0.0.0/tcp/24915"` -//! -//! In the second terminal window, start a new instance of the example with: -//! -//! ```sh -//! cargo run -- /ip4/127.0.0.1/tcp/24915 -//! ``` -//! -//! The two nodes establish a connection, negotiate the ping protocol -//! and begin pinging each other. +#![doc = include_str!("../README.md")] use futures::prelude::*; -use libp2p::core::upgrade::Version; -use libp2p::{ - identity, noise, ping, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, -}; -use std::error::Error; +use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; +use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; -#[async_std::main] +#[tokio::main] async fn main() -> Result<(), Box> { - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let transport = tcp::async_io::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key)?) - .multiplex(yamux::Config::default()) - .boxed(); - - let mut swarm = - SwarmBuilder::with_async_std_executor(transport, Behaviour::default(), local_peer_id) - .build(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|_| ping::Behaviour::default())? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX))) + .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned // port. @@ -85,13 +62,3 @@ async fn main() -> Result<(), Box> { } } } - -/// Our network behaviour. -/// -/// For illustrative purposes, this includes the [`KeepAlive`](keep_alive::Behaviour) behaviour so a continuous sequence of -/// pings can be observed. -#[derive(NetworkBehaviour, Default)] -struct Behaviour { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour, -} diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 97bf64f21a8..127c002a73c 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -5,10 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.2.7", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/relay-server/README.md b/examples/relay-server/README.md new file mode 100644 index 00000000000..ae2ab3fdfd8 --- /dev/null +++ b/examples/relay-server/README.md @@ -0,0 +1,28 @@ +## Description + +The **libp2p** relay example showcases how to create a relay node that can route messages between different peers in a p2p network. + +## Usage + +To run the example, follow these steps: + +1. Run the relay node by executing the following command: + + ```sh + cargo run -- --port --secret-key-seed + ``` + + Replace `` with the port number on which the relay node will listen for incoming connections. + Replace `` with a seed value used to generate a deterministic peer ID for the relay node. + +2. The relay node will start listening for incoming connections. +It will print the listening address once it is ready. + +3. Connect other **libp2p** nodes to the relay node by specifying the relay's listening address as one of the bootstrap nodes in their configuration. + +4. Once the connections are established, the relay node will facilitate communication between the connected peers, allowing them to exchange messages and data. + +## Conclusion + +The **libp2p** relay example demonstrates how to implement a relay node. +By running a relay node and connecting other **libp2p** nodes to it, users can create a decentralized network where peers can communicate and interact with each other. diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index e78d82ccef0..881e0830dbb 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -19,67 +19,80 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#![doc = include_str!("../README.md")] + use clap::Parser; use futures::executor::block_on; use futures::stream::StreamExt; use libp2p::{ core::multiaddr::Protocol, - core::upgrade, - core::{Multiaddr, Transport}, - identify, identity, - identity::PeerId, - noise, ping, relay, - swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, + core::Multiaddr, + identify, identity, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; +use tracing_subscriber::EnvFilter; fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); - println!("opt: {opt:?}"); // Create a static known PeerId based on given secret let local_key: identity::Keypair = generate_ed25519(opt.secret_key_seed); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id:?}"); - - let tcp_transport = tcp::async_io::Transport::default(); - - let transport = tcp_transport - .upgrade(upgrade::Version::V1Lazy) - .authenticate( - noise::Config::new(&local_key).expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(libp2p::yamux::Config::default()) - .boxed(); - let behaviour = Behaviour { - relay: relay::Behaviour::new(local_peer_id, Default::default()), - ping: ping::Behaviour::new(ping::Config::new()), - identify: identify::Behaviour::new(identify::Config::new( - "/TODO/0.0.1".to_string(), - local_key.public(), - )), - }; - - let mut swarm = SwarmBuilder::without_executor(transport, behaviour, local_peer_id).build(); + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_key) + .with_async_std() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_behaviour(|key| Behaviour { + relay: relay::Behaviour::new(key.public().to_peer_id(), Default::default()), + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new(identify::Config::new( + "/TODO/0.0.1".to_string(), + key.public(), + )), + })? + .build(); // Listen on all interfaces - let listen_addr = Multiaddr::empty() + let listen_addr_tcp = Multiaddr::empty() .with(match opt.use_ipv6 { Some(true) => Protocol::from(Ipv6Addr::UNSPECIFIED), _ => Protocol::from(Ipv4Addr::UNSPECIFIED), }) .with(Protocol::Tcp(opt.port)); - swarm.listen_on(listen_addr)?; + swarm.listen_on(listen_addr_tcp)?; + + let listen_addr_quic = Multiaddr::empty() + .with(match opt.use_ipv6 { + Some(true) => Protocol::from(Ipv6Addr::UNSPECIFIED), + _ => Protocol::from(Ipv4Addr::UNSPECIFIED), + }) + .with(Protocol::Udp(opt.port)) + .with(Protocol::QuicV1); + swarm.listen_on(listen_addr_quic)?; block_on(async { loop { match swarm.next().await.expect("Infinite Stream.") { SwarmEvent::Behaviour(event) => { + if let BehaviourEvent::Identify(identify::Event::Received { + info: identify::Info { observed_addr, .. }, + .. + }) = &event + { + swarm.add_external_address(observed_addr.clone()); + } + println!("{event:?}") } SwarmEvent::NewListenAddr { address, .. } => { diff --git a/examples/rendezvous/Cargo.toml b/examples/rendezvous/Cargo.toml index 591b738c98d..4ee410f503f 100644 --- a/examples/rendezvous/Cargo.toml +++ b/examples/rendezvous/Cargo.toml @@ -5,11 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../../libp2p", features = ["async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } -log = "0.4" -tokio = { version = "1.28", features = [ "rt-multi-thread", "macros", "time" ] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = [ "async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } +tokio = { version = "1.35", features = ["rt-multi-thread", "macros", "time"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/rendezvous/README.md b/examples/rendezvous/README.md new file mode 100644 index 00000000000..4f2a893592e --- /dev/null +++ b/examples/rendezvous/README.md @@ -0,0 +1,51 @@ +## Description + +The rendezvous protocol example showcases how to implement a rendezvous server and interact with it using different binaries. +The rendezvous server facilitates peer registration and discovery, enabling peers to find and communicate with each other in a decentralized manner. + +## Usage + +To run the example, follow these steps: + +1. Start the rendezvous server by running the following command: + + ```sh + RUST_LOG=info cargo run --bin rendezvous-example + ``` + + This command starts the rendezvous server, which will listen for incoming connections and handle peer registrations and discovery. + +2. Register a peer by executing the following command: + + ```sh + RUST_LOG=info cargo run --bin rzv-register + ``` + + This command registers a peer with the rendezvous server, allowing the peer to be discovered by other peers. + +3. Try to discover the registered peer from the previous step by running the following command: + + ```sh + RUST_LOG=info cargo run --bin rzv-discover + ``` + + This command attempts to discover the registered peer using the rendezvous server. + If successful, it will print the details of the discovered peer. + +4. Additionally, you can try discovering a peer using the identify protocol by executing the following command: + + ```sh + RUST_LOG=info cargo run --bin rzv-identify + ``` + + This command demonstrates peer discovery using the identify protocol. + It will print the peer's identity information if successful. + +5. Experiment with different registrations, discoveries, and combinations of protocols to explore the capabilities of the rendezvous protocol and libp2p library. + +## Conclusion + +The rendezvous protocol example provides a practical demonstration of how to implement peer registration and discovery using **libp2p**. +By running the rendezvous server and utilizing the provided binaries, users can register peers and discover them in a decentralized network. + +Feel free to explore the code and customize the behavior of the rendezvous server and the binaries to suit your specific use cases. diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index bd551c68f5a..edd3d10a0ce 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -20,43 +20,41 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identity, multiaddr::Protocol, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; #[tokio::main] -async fn main() { - env_logger::init(); +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| MyBehaviour { + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, - }, - PeerId::from(key_pair.public()), - ) - .build(); - - log::info!("Local peer id: {}", swarm.local_peer_id()); + })? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); swarm.dial(rendezvous_point_address.clone()).unwrap(); @@ -67,7 +65,7 @@ async fn main() { tokio::select! { event = swarm.select_next_some() => match event { SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { - log::info!( + tracing::info!( "Connected to rendezvous point, discovering nodes in '{}' namespace ...", NAMESPACE ); @@ -89,9 +87,9 @@ async fn main() { for registration in registrations { for address in registration.record.addresses() { let peer = registration.record.peer_id(); - log::info!("Discovered peer {} at {}", peer, address); + tracing::info!(%peer, %address, "Discovered peer"); - let p2p_suffix = Protocol::P2p(*peer.as_ref()); + let p2p_suffix = Protocol::P2p(peer); let address_with_p2p = if !address.ends_with(&Multiaddr::empty().with(p2p_suffix.clone())) { address.clone().with(p2p_suffix) @@ -105,12 +103,13 @@ async fn main() { } SwarmEvent::Behaviour(MyBehaviourEvent::Ping(ping::Event { peer, - result: Ok(ping::Success::Ping { rtt }), + result: Ok(rtt), + .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!(%peer, "Ping is {}ms", rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } }, _ = discover_tick.tick(), if cookie.is_some() => @@ -128,5 +127,4 @@ async fn main() { struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index b970fc01a6e..1d545592829 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -20,43 +20,43 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identify, identity, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + identify, noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + ) + .unwrap() + .with_behaviour(|key| MyBehaviour { identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), - key_pair.public(), + key.public(), )), - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, - }, - PeerId::from(key_pair.public()), - ) - .build(); - - log::info!("Local peer id: {}", swarm.local_peer_id()); + }) + .unwrap() + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()); @@ -65,24 +65,27 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Identify(identify::Event::Received { .. })) => { - swarm.behaviour_mut().rendezvous.register( + if let Err(error) = swarm.behaviour_mut().rendezvous.register( rendezvous::Namespace::from_static("rendezvous"), rendezvous_point, None, - ); + ) { + tracing::error!("Failed to register: {error}"); + return; + } } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( rendezvous::client::Event::Registered { @@ -91,7 +94,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -99,19 +102,29 @@ async fn main() { ); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( - rendezvous::client::Event::RegisterFailed(error), + rendezvous::client::Event::RegisterFailed { + rendezvous_node, + namespace, + error, + }, )) => { - log::error!("Failed to register {}", error); + tracing::error!( + "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", + rendezvous_node, + namespace, + error + ); return; } SwarmEvent::Behaviour(MyBehaviourEvent::Ping(ping::Event { peer, - result: Ok(ping::Success::Ping { rtt }), + result: Ok(rtt), + .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } @@ -122,5 +135,4 @@ struct MyBehaviour { identify: identify::Behaviour, rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index 40053aa96b9..bd848238d4a 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -20,66 +20,69 @@ use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identity, noise, ping, rendezvous, - swarm::{keep_alive, AddressScore, NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, Multiaddr, PeerId, Transport, + noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" .parse() .unwrap(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { - rendezvous: rendezvous::client::Behaviour::new(key_pair.clone()), + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + ) + .unwrap() + .with_behaviour(|key| MyBehaviour { + rendezvous: rendezvous::client::Behaviour::new(key.clone()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, - }, - PeerId::from(key_pair.public()), - ) - .build(); + }) + .unwrap() + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); // In production the external address should be the publicly facing IP address of the rendezvous point. // This address is recorded in the registration entry by the rendezvous point. let external_address = "/ip4/127.0.0.1/tcp/0".parse::().unwrap(); - swarm.add_external_address(external_address, AddressScore::Infinite); - - log::info!("Local peer id: {}", swarm.local_peer_id()); + swarm.add_external_address(external_address); swarm.dial(rendezvous_point_address.clone()).unwrap(); while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { - swarm.behaviour_mut().rendezvous.register( + if let Err(error) = swarm.behaviour_mut().rendezvous.register( rendezvous::Namespace::from_static("rendezvous"), rendezvous_point, None, - ); - log::info!("Connection established with rendezvous point {}", peer_id); + ) { + tracing::error!("Failed to register: {error}"); + return; + } + tracing::info!("Connection established with rendezvous point {}", peer_id); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( @@ -89,7 +92,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -97,19 +100,29 @@ async fn main() { ); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( - rendezvous::client::Event::RegisterFailed(error), + rendezvous::client::Event::RegisterFailed { + rendezvous_node, + namespace, + error, + }, )) => { - log::error!("Failed to register {}", error); + tracing::error!( + "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", + rendezvous_node, + namespace, + error + ); return; } SwarmEvent::Behaviour(MyBehaviourEvent::Ping(ping::Event { peer, - result: Ok(ping::Success::Ping { rtt }), + result: Ok(rtt), + .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } @@ -119,5 +132,4 @@ async fn main() { struct MyBehaviour { rendezvous: rendezvous::client::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index 7d6e5ad44a4..989abdb59df 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -18,74 +18,60 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -/// Examples for the rendezvous protocol: -/// -/// 1. Run the rendezvous server: -/// ``` -/// RUST_LOG=info cargo run --bin rendezvous-example -/// ``` -/// 2. Register a peer: -/// ``` -/// RUST_LOG=info cargo run --bin rzv-register -/// ``` -/// 3. Try to discover the peer from (2): -/// ``` -/// RUST_LOG=info cargo run --bin rzv-discover -/// ``` -/// 4. Try to discover with identify: -/// ``` -/// RUST_LOG=info cargo run --bin rzv-identify -/// ``` +#![doc = include_str!("../README.md")] + use futures::StreamExt; use libp2p::{ - core::transport::upgrade::Version, - identify, identity, noise, ping, rendezvous, - swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}, - tcp, yamux, PeerId, Transport, + identify, noise, ping, rendezvous, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, }; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] -async fn main() { - env_logger::init(); +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); - let key_pair = identity::Keypair::generate_ed25519(); + // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which is + // used as the rendezvous point by the other peer examples. + let keypair = libp2p::identity::ed25519::Keypair::try_from_bytes(&mut [0; 32]).unwrap().into(); - let mut swarm = SwarmBuilder::with_tokio_executor( - tcp::tokio::Transport::default() - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&key_pair).unwrap()) - .multiplex(yamux::Config::default()) - .boxed(), - MyBehaviour { + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|key| MyBehaviour { identify: identify::Behaviour::new(identify::Config::new( "rendezvous-example/1.0.0".to_string(), - key_pair.public(), + key.public(), )), rendezvous: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, - }, - PeerId::from(key_pair.public()), - ) - .build(); - - log::info!("Local peer id: {}", swarm.local_peer_id()); + })? + .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(5))) + .build(); let _ = swarm.listen_on("/ip4/0.0.0.0/tcp/62649".parse().unwrap()); while let Some(event) = swarm.next().await { match event { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - log::info!("Connected to {}", peer_id); + tracing::info!("Connected to {}", peer_id); } SwarmEvent::ConnectionClosed { peer_id, .. } => { - log::info!("Disconnected from {}", peer_id); + tracing::info!("Disconnected from {}", peer_id); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( rendezvous::server::Event::PeerRegistered { peer, registration }, )) => { - log::info!( + tracing::info!( "Peer {} registered for namespace '{}'", peer, registration.namespace @@ -97,17 +83,19 @@ async fn main() { registrations, }, )) => { - log::info!( + tracing::info!( "Served peer {} with {} registrations", enquirer, registrations.len() ); } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } + + Ok(()) } #[derive(NetworkBehaviour)] @@ -115,5 +103,4 @@ struct MyBehaviour { identify: identify::Behaviour, rendezvous: rendezvous::server::Behaviour, ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, } diff --git a/examples/upnp/Cargo.toml b/examples/upnp/Cargo.toml new file mode 100644 index 00000000000..db9825c8742 --- /dev/null +++ b/examples/upnp/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "upnp-example" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[package.metadata.release] +release = false + +[dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +futures = "0.3.30" +libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "yamux", "upnp"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[lints] +workspace = true diff --git a/examples/upnp/README.md b/examples/upnp/README.md new file mode 100644 index 00000000000..48335bfa9fc --- /dev/null +++ b/examples/upnp/README.md @@ -0,0 +1,23 @@ +## Description + +The upnp example showcases how to use the upnp network behaviour to externally open ports on the network gateway. + + +## Usage + +To run the example, follow these steps: + +1. In a terminal window, run the following command: + + ```sh + cargo run + ``` + +2. This command will start the swarm and print the `NewExternalAddr` if the gateway supports `UPnP` or + `GatewayNotFound` if it doesn't. + + +## Conclusion + +The upnp example demonstrates the usage of **libp2p** to externally open a port on the gateway if it +supports [`UPnP`](https://en.wikipedia.org/wiki/Universal_Plug_and_Play). diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs new file mode 100644 index 00000000000..fd0764990d1 --- /dev/null +++ b/examples/upnp/src/main.rs @@ -0,0 +1,74 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![doc = include_str!("../README.md")] + +use futures::prelude::*; +use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; +use std::error::Error; +use tracing_subscriber::EnvFilter; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(|_| upnp::tokio::Behaviour::default())? + .build(); + + // Tell the swarm to listen on all interfaces and a random, OS-assigned + // port. + swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?; + + // Dial the peer identified by the multi-address given as the second + // command-line argument, if any. + if let Some(addr) = std::env::args().nth(1) { + let remote: Multiaddr = addr.parse()?; + swarm.dial(remote)?; + println!("Dialed {addr}") + } + + loop { + match swarm.select_next_some().await { + SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"), + SwarmEvent::Behaviour(upnp::Event::NewExternalAddr(addr)) => { + println!("New external address: {addr}"); + } + SwarmEvent::Behaviour(upnp::Event::GatewayNotFound) => { + println!("Gateway does not support UPnP"); + break; + } + SwarmEvent::Behaviour(upnp::Event::NonRoutableGateway) => { + println!("Gateway is not exposed directly to the public Internet, i.e. it itself has a private IP address."); + break; + } + _ => {} + } + } + Ok(()) +} diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml new file mode 100644 index 00000000000..679e2e8e8a0 --- /dev/null +++ b/hole-punching-tests/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "hole-punching-tests" +version = "0.1.0" +edition = "2021" +publish = false +license = "MIT" + +[dependencies] +anyhow = "1" +env_logger = "0.10.1" +futures = "0.3.30" +libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } +tokio = { version = "1.35.1", features = ["full"] } +serde = { version = "1.0.193", features = ["derive"] } +serde_json = "1.0.108" +either = "1.9.0" diff --git a/hole-punching-tests/Dockerfile b/hole-punching-tests/Dockerfile new file mode 100644 index 00000000000..864f058799e --- /dev/null +++ b/hole-punching-tests/Dockerfile @@ -0,0 +1,19 @@ +# syntax=docker/dockerfile:1.5-labs +FROM rust:1.73.0 as builder + +# Run with access to the target cache to speed up builds +WORKDIR /workspace +ADD . . + +# Build the relay as a statically-linked binary. Unfortunately, we must specify the `--target` explicitly. See https://msfjarvis.dev/posts/building-static-rust-binaries-for-linux/. +RUN --mount=type=cache,target=./target \ + --mount=type=cache,target=/usr/local/cargo/registry \ + RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package hole-punching-tests --target $(rustc -vV | grep host | awk '{print $2}') + +RUN --mount=type=cache,target=./target \ + mv ./target/$(rustc -vV | grep host | awk '{print $2}')/release/hole-punching-tests /usr/local/bin/hole-punching-tests + +FROM alpine:3 +COPY --from=builder /usr/local/bin/hole-punching-tests /usr/bin/hole-punch-client +RUN --mount=type=cache,target=/var/cache/apk apk add bind-tools jq curl tcpdump iproute2-tc +ENV RUST_BACKTRACE=1 diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs new file mode 100644 index 00000000000..4f81cd65480 --- /dev/null +++ b/hole-punching-tests/src/main.rs @@ -0,0 +1,369 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use anyhow::{Context, Result}; +use either::Either; +use futures::stream::StreamExt; +use libp2p::core::transport::ListenerId; +use libp2p::swarm::dial_opts::DialOpts; +use libp2p::swarm::ConnectionId; +use libp2p::{ + core::multiaddr::{Multiaddr, Protocol}, + dcutr, identify, noise, ping, relay, + swarm::{NetworkBehaviour, SwarmEvent}, + tcp, yamux, Swarm, +}; +use redis::AsyncCommands; +use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; +use std::time::Duration; +use std::{fmt, io}; + +/// The redis key we push the relay's TCP listen address to. +const RELAY_TCP_ADDRESS: &str = "RELAY_TCP_ADDRESS"; +/// The redis key we push the relay's QUIC listen address to. +const RELAY_QUIC_ADDRESS: &str = "RELAY_QUIC_ADDRESS"; +/// The redis key we push the listen client's PeerId to. +const LISTEN_CLIENT_PEER_ID: &str = "LISTEN_CLIENT_PEER_ID"; + +#[tokio::main] +async fn main() -> Result<()> { + env_logger::builder() + .parse_filters("debug,netlink_proto=warn,rustls=warn,multistream_select=warn,libp2p_core::transport::choice=off,libp2p_swarm::connection=warn,libp2p_quic=trace") + .parse_default_env() + .init(); + + let mode = get_env("MODE")?; + let transport = get_env("TRANSPORT")?; + + let mut redis = RedisClient::new("redis", 6379).await?; + + let relay_addr = match transport { + TransportProtocol::Tcp => redis.pop::(RELAY_TCP_ADDRESS).await?, + TransportProtocol::Quic => redis.pop::(RELAY_QUIC_ADDRESS).await?, + }; + + let mut swarm = libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::new().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_relay_client(noise::Config::new, yamux::Config::default)? + .with_behaviour(|key, relay_client| { + Ok(Behaviour { + relay_client, + identify: identify::Behaviour::new(identify::Config::new( + "/hole-punch-tests/1".to_owned(), + key.public(), + )), + dcutr: dcutr::Behaviour::new(key.public().to_peer_id()), + ping: ping::Behaviour::new( + ping::Config::default().with_interval(Duration::from_secs(1)), + ), + }) + })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + client_listen_on_transport(&mut swarm, transport).await?; + let id = client_setup(&mut swarm, &mut redis, relay_addr.clone(), mode).await?; + + let mut hole_punched_peer_connection = None; + + loop { + match ( + swarm.next().await.unwrap(), + hole_punched_peer_connection, + id, + ) { + ( + SwarmEvent::Behaviour(BehaviourEvent::RelayClient( + relay::client::Event::ReservationReqAccepted { .. }, + )), + _, + _, + ) => { + tracing::info!("Relay accepted our reservation request."); + + redis + .push(LISTEN_CLIENT_PEER_ID, swarm.local_peer_id()) + .await?; + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Ok(connection_id), + })), + _, + _, + ) => { + tracing::info!("Successfully hole-punched to {remote_peer_id}"); + + hole_punched_peer_connection = Some(connection_id); + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + connection, + result: Ok(rtt), + .. + })), + Some(hole_punched_connection), + _, + ) if mode == Mode::Dial && connection == hole_punched_connection => { + println!("{}", serde_json::to_string(&Report::new(rtt))?); + + return Ok(()); + } + ( + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Err(error), + .. + })), + _, + _, + ) => { + tracing::info!("Failed to hole-punched to {remote_peer_id}"); + return Err(anyhow::Error::new(error)); + } + ( + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + }, + _, + Either::Left(reservation), + ) if listener_id == reservation => { + anyhow::bail!("Reservation on relay failed: {e}"); + } + ( + SwarmEvent::OutgoingConnectionError { + connection_id, + error, + .. + }, + _, + Either::Right(circuit), + ) if connection_id == circuit => { + anyhow::bail!("Circuit request relay failed: {error}"); + } + _ => {} + } + } +} + +#[derive(serde::Serialize)] +struct Report { + rtt_to_holepunched_peer_millis: u128, +} + +impl Report { + fn new(rtt: Duration) -> Self { + Self { + rtt_to_holepunched_peer_millis: rtt.as_millis(), + } + } +} + +fn get_env(key: &'static str) -> Result +where + T: FromStr, + T::Err: std::error::Error + Send + Sync + 'static, +{ + let val = std::env::var(key) + .with_context(|| format!("Missing env var `{key}`"))? + .parse() + .with_context(|| format!("Failed to parse `{key}`)"))?; + + Ok(val) +} + +async fn client_listen_on_transport( + swarm: &mut Swarm, + transport: TransportProtocol, +) -> Result<()> { + let listen_addr = match transport { + TransportProtocol::Tcp => tcp_addr(Ipv4Addr::UNSPECIFIED.into()), + TransportProtocol::Quic => quic_addr(Ipv4Addr::UNSPECIFIED.into()), + }; + let expected_listener_id = swarm + .listen_on(listen_addr) + .context("Failed to listen on address")?; + + let mut listen_addresses = 0; + + // We should have at least two listen addresses, one for localhost and the actual interface. + while listen_addresses < 2 { + if let SwarmEvent::NewListenAddr { + listener_id, + address, + } = swarm.next().await.unwrap() + { + if listener_id == expected_listener_id { + listen_addresses += 1; + } + + tracing::info!("Listening on {address}"); + } + } + Ok(()) +} + +async fn client_setup( + swarm: &mut Swarm, + redis: &mut RedisClient, + relay_addr: Multiaddr, + mode: Mode, +) -> Result> { + let either = match mode { + Mode::Listen => { + let id = swarm.listen_on(relay_addr.with(Protocol::P2pCircuit))?; + + Either::Left(id) + } + Mode::Dial => { + let remote_peer_id = redis.pop(LISTEN_CLIENT_PEER_ID).await?; + + let opts = DialOpts::from( + relay_addr + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(remote_peer_id)), + ); + let id = opts.connection_id(); + + swarm.dial(opts)?; + + Either::Right(id) + } + }; + + Ok(either) +} + +fn tcp_addr(addr: IpAddr) -> Multiaddr { + Multiaddr::empty().with(addr.into()).with(Protocol::Tcp(0)) +} + +fn quic_addr(addr: IpAddr) -> Multiaddr { + Multiaddr::empty() + .with(addr.into()) + .with(Protocol::Udp(0)) + .with(Protocol::QuicV1) +} + +struct RedisClient { + inner: redis::aio::Connection, +} + +impl RedisClient { + async fn new(host: &str, port: u16) -> Result { + let client = redis::Client::open(format!("redis://{host}:{port}/")) + .context("Bad redis server URL")?; + let connection = client + .get_async_connection() + .await + .context("Failed to connect to redis server")?; + + Ok(Self { inner: connection }) + } + + async fn push(&mut self, key: &str, value: impl ToString) -> Result<()> { + let value = value.to_string(); + + tracing::debug!("Pushing {key}={value} to redis"); + + self.inner.rpush(key, value).await?; + + Ok(()) + } + + async fn pop(&mut self, key: &str) -> Result + where + V: FromStr + fmt::Display, + V::Err: std::error::Error + Send + Sync + 'static, + { + tracing::debug!("Fetching {key} from redis"); + + let value = self + .inner + .blpop::<_, HashMap>(key, 0) + .await? + .remove(key) + .with_context(|| format!("Failed to get value for {key} from redis"))? + .parse()?; + + tracing::debug!("{key}={value}"); + + Ok(value) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum TransportProtocol { + Tcp, + Quic, +} + +impl FromStr for TransportProtocol { + type Err = io::Error; + fn from_str(mode: &str) -> Result { + match mode { + "tcp" => Ok(TransportProtocol::Tcp), + "quic" => Ok(TransportProtocol::Quic), + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Expected either 'tcp' or 'quic'", + )), + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +enum Mode { + Dial, + Listen, +} + +impl FromStr for Mode { + type Err = io::Error; + fn from_str(mode: &str) -> Result { + match mode { + "dial" => Ok(Mode::Dial), + "listen" => Ok(Mode::Listen), + _ => Err(io::Error::new( + io::ErrorKind::Other, + "Expected either 'dial' or 'listen'", + )), + } + } +} + +#[derive(NetworkBehaviour)] +struct Behaviour { + relay_client: relay::client::Behaviour, + identify: identify::Behaviour, + dcutr: dcutr::Behaviour, + ping: ping::Behaviour, +} diff --git a/hole-punching-tests/version.json b/hole-punching-tests/version.json new file mode 100644 index 00000000000..f5db52d1c2d --- /dev/null +++ b/hole-punching-tests/version.json @@ -0,0 +1,8 @@ +{ + "id": "rust-libp2p-head", + "containerImageID": "rust-libp2p-head", + "transports": [ + "tcp", + "quic" + ] +} diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 9aacfac822d..004943ce195 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -1,12 +1,81 @@ -## 0.2.0 - unreleased +## 0.2.8 + +- Bump `ring` to `0.17.5. + See [PR 4779](https://github.com/libp2p/rust-libp2p/pull/4779). + +## 0.2.7 + +- Add `rand` feature to gate methods requiring a random number generator, enabling use in restricted environments (e.g. smartcontracts). + This feature is not enabled by default. + See [PR 4349](https://github.com/libp2p/rust-libp2p/pull/4349). + +## 0.2.6 + +- Make `PeerId::to_bytes` and `PeerId::to_base58` take `self` by value to follow Rust convention of `Copy` types. + See [PR 4653](https://github.com/libp2p/rust-libp2p/pull/4653). + +## 0.2.5 + +- Fix usage of HKDF within `Keypair::derive_secret`. + See [PR 4554](https://github.com/libp2p/rust-libp2p/pull/4554). + +## 0.2.4 + +- Implement `Keypair::derive_secret`, to deterministically derive a new secret from the embedded secret key. + See [PR 4554]. + +[PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554 + +## 0.2.3 + +- Fix [RUSTSEC-2022-0093] by updating `ed25519-dalek` to `2.0`. + See [PR 4337] + +[RUSTSEC-2022-0093]: https://rustsec.org/advisories/RUSTSEC-2022-0093 +[PR 4337]: https://github.com/libp2p/rust-libp2p/pull/4337 + +## 0.2.2 + +- Implement `from_protobuf_encoding` for RSA `Keypair`. + See [PR 4193]. + +[PR 4193]: https://github.com/libp2p/rust-libp2p/pull/4193 + +## 0.2.1 + +- Expose `KeyType` for `PublicKey` and `Keypair`. + See [PR 4107]. + +[PR 4107]: https://github.com/libp2p/rust-libp2p/pull/4107 + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. - Add support for exporting and importing ECDSA keys via the libp2p [protobuf format]. See [PR 3863]. +- Make `Keypair` and `PublicKey` opaque. + See [PR 3866]. + +- Remove `identity::secp256k1::SecretKey::sign_hash` and make `identity::secp256k1::SecretKey::sign` infallible. + See [PR 3850]. + +- Remove deprecated items. See [PR 3928]. + +- Remove `PeerId::try_from_multiaddr`. + `multiaddr::Protocol::P2p` is now type-safe and contains a `PeerId` directly, rendering this function obsolete. + See [PR 3656]. + +- Remove `PeerId::is_public_key` because it is unused and can be implemented externally. + See [PR 3656]. + +[PR 3656]: https://github.com/libp2p/rust-libp2p/pull/3656 +[PR 3850]: https://github.com/libp2p/rust-libp2p/pull/3850 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3863]: https://github.com/libp2p/rust-libp2p/pull/3863 +[PR 3866]: https://github.com/libp2p/rust-libp2p/pull/3866 +[PR 3928]: https://github.com/libp2p/rust-libp2p/pull/3928 [protobuf format]: https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md#keys ## 0.1.2 diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 5508e7c77f0..5234507ec2d 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "libp2p-identity" -version = "0.2.0" +version = "0.2.8" edition = "2021" description = "Data structures and algorithms for identifying peers in libp2p." -rust-version = { workspace = true } +rust-version = "1.73.0" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking", "cryptography"] @@ -13,39 +13,40 @@ categories = ["cryptography"] [dependencies] asn1_der = { version = "0.7.6", optional = true } -bs58 = { version = "0.4.0", optional = true } -ed25519-dalek = { version = "1.0.1", optional = true } +bs58 = { version = "0.5.0", optional = true } +ed25519-dalek = { version = "2.1", optional = true } +hkdf = { version = "0.12.4", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } -log = "0.4" -multiaddr = { version = "0.17.1", optional = true } -multihash = { version = "0.17.0", default-features = false, features = ["std"], optional = true } -p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } -quick-protobuf = { version = "0.8.1", optional = true } +tracing = "0.1.37" +multihash = { version = "0.19.1", optional = true } +p256 = { version = "0.13", default-features = false, features = [ "ecdsa", "std", "pem"], optional = true } +quick-protobuf = "0.8.1" rand = { version = "0.8", optional = true } sec1 = { version = "0.7", default-features = false, optional = true } serde = { version = "1", optional = true, features = ["derive"] } -sha2 = { version = "0.10.0", optional = true } +sha2 = { version = "0.10.8", optional = true } thiserror = { version = "1.0", optional = true } void = { version = "1.0", optional = true } -zeroize = { version = "1.6", optional = true } +zeroize = { version = "1.7", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ring = { version = "0.16.9", features = ["alloc", "std"], default-features = false, optional = true} +ring = { version = "0.17.5", features = [ "alloc", "std"], default-features = false, optional = true } [features] -secp256k1 = [ "dep:libsecp256k1", "dep:asn1_der", "dep:rand", "dep:sha2", "dep:zeroize", "dep:quick-protobuf" ] -ecdsa = [ "dep:p256", "dep:rand", "dep:void", "dep:zeroize", "dep:sec1", "dep:quick-protobuf" ] -rsa = [ "dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize", "dep:quick-protobuf" ] -ed25519 = [ "dep:ed25519-dalek", "dep:rand", "dep:zeroize", "dep:quick-protobuf" ] -peerid = [ "dep:multihash", "dep:multiaddr", "dep:bs58", "dep:rand", "dep:thiserror", "dep:sha2" ] +secp256k1 = ["dep:libsecp256k1", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize"] +ecdsa = ["dep:p256", "dep:void", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] +rsa = ["dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize"] +ed25519 = ["dep:ed25519-dalek", "dep:zeroize", "dep:sha2", "dep:hkdf"] +peerid = ["dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf"] +rand = ["dep:rand", "ed25519-dalek?/rand_core"] [dev-dependencies] quickcheck = { workspace = true } -base64 = "0.21.0" -criterion = "0.4" -hex-literal = "0.4.1" -rmp-serde = "1.0" +base64 = "0.21.5" serde_json = "1.0" +rmp-serde = "1.1" +criterion = "0.5" +hex-literal = "0.4.1" [[bench]] name = "peer_id" @@ -57,3 +58,6 @@ harness = false all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/identity/src/ecdsa.rs b/identity/src/ecdsa.rs index 04f767fbc65..2f1a286d46d 100644 --- a/identity/src/ecdsa.rs +++ b/identity/src/ecdsa.rs @@ -44,6 +44,7 @@ pub struct Keypair { impl Keypair { /// Generate a new random ECDSA keypair. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } @@ -109,14 +110,6 @@ impl SecretKey { self.0.to_bytes().to_vec() } - /// Decode a secret key from a byte buffer containing raw scalar of the key. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead" - )] - pub fn from_bytes(buf: &[u8]) -> Result { - Self::try_from_bytes(buf) - } - /// Try to parse a secret key from a byte buffer containing raw scalar of the key. pub fn try_from_bytes(buf: impl AsRef<[u8]>) -> Result { SigningKey::from_bytes(buf.as_ref().into()) @@ -158,21 +151,12 @@ pub struct PublicKey(VerifyingKey); impl PublicKey { /// Verify an ECDSA signature on a message using the public key. pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { - let sig = match Signature::from_der(sig) { - Ok(sig) => sig, - Err(_) => return false, + let Ok(sig) = Signature::from_der(sig) else { + return false; }; self.0.verify(msg, &sig).is_ok() } - /// Decode a public key from a byte buffer containing raw components of a key with or without compression. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." - )] - pub fn from_bytes(k: &[u8]) -> Result { - Self::try_from_bytes(k) - } - /// Try to parse a public key from a byte buffer containing raw components of a key with or without compression. pub fn try_from_bytes(k: &[u8]) -> Result { let enc_pt = EncodedPoint::from_bytes(k) @@ -194,14 +178,6 @@ impl PublicKey { Self::add_asn1_header(&buf) } - /// Decode a public key into a DER encoded byte buffer as defined by SEC1 standard. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_der` instead." - )] - pub fn decode_der(k: &[u8]) -> Result { - Self::try_decode_der(k) - } - /// Try to decode a public key from a DER encoded byte buffer as defined by SEC1 standard. pub fn try_decode_der(k: &[u8]) -> Result { let buf = Self::del_asn1_header(k).ok_or_else(|| { @@ -289,6 +265,7 @@ mod tests { use super::*; #[test] + #[cfg(feature = "rand")] fn sign_verify() { let pair = Keypair::generate(); let pk = pair.public(); diff --git a/identity/src/ed25519.rs b/identity/src/ed25519.rs index 4887a425e41..529a4dddea1 100644 --- a/identity/src/ed25519.rs +++ b/identity/src/ed25519.rs @@ -25,43 +25,25 @@ use core::cmp; use core::fmt; use core::hash; use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; -use rand::RngCore; use std::convert::TryFrom; use zeroize::Zeroize; /// An Ed25519 keypair. -pub struct Keypair(ed25519::Keypair); +#[derive(Clone)] +pub struct Keypair(ed25519::SigningKey); impl Keypair { /// Generate a new random Ed25519 keypair. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } - /// Encode the keypair into a byte array by concatenating the bytes - /// of the secret scalar and the compressed public point, - /// an informal standard for encoding Ed25519 keypairs. - #[deprecated(note = "Renamed to `Keypair::to_bytes`")] - pub fn encode(&self) -> [u8; 64] { - self.to_bytes() - } - /// Convert the keypair into a byte array by concatenating the bytes /// of the secret scalar and the compressed public point, /// an informal standard for encoding Ed25519 keypairs. pub fn to_bytes(&self) -> [u8; 64] { - self.0.to_bytes() - } - - /// Decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) - /// produced by [`Keypair::to_bytes`], zeroing the input on success. - /// - /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_from_bytes` instead." - )] - pub fn decode(kp: &mut [u8]) -> Result { - Self::try_from_bytes(kp) + self.0.to_keypair_bytes() } /// Try to parse a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) @@ -69,7 +51,10 @@ impl Keypair { /// /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. pub fn try_from_bytes(kp: &mut [u8]) -> Result { - ed25519::Keypair::from_bytes(kp) + let bytes = <[u8; 64]>::try_from(&*kp) + .map_err(|e| DecodingError::failed_to_parse("Ed25519 keypair", e))?; + + ed25519::SigningKey::from_keypair_bytes(&bytes) .map(|k| { kp.zeroize(); Keypair(k) @@ -84,60 +69,41 @@ impl Keypair { /// Get the public key of this keypair. pub fn public(&self) -> PublicKey { - PublicKey(self.0.public) + PublicKey(self.0.verifying_key()) } /// Get the secret key of this keypair. pub fn secret(&self) -> SecretKey { - SecretKey::try_from_bytes(&mut self.0.secret.to_bytes()) - .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") + SecretKey(self.0.to_bytes()) } } impl fmt::Debug for Keypair { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Keypair") - .field("public", &self.0.public) + .field("public", &self.0.verifying_key()) .finish() } } -impl Clone for Keypair { - fn clone(&self) -> Keypair { - let mut sk_bytes = self.0.secret.to_bytes(); - let secret = SecretKey::try_from_bytes(&mut sk_bytes) - .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") - .0; - - Keypair(ed25519::Keypair { - secret, - public: self.0.public, - }) - } -} - /// Demote an Ed25519 keypair to a secret key. impl From for SecretKey { fn from(kp: Keypair) -> SecretKey { - SecretKey(kp.0.secret) + SecretKey(kp.0.to_bytes()) } } /// Promote an Ed25519 secret key into a keypair. impl From for Keypair { fn from(sk: SecretKey) -> Keypair { - let secret: ed25519::ExpandedSecretKey = (&sk.0).into(); - let public = ed25519::PublicKey::from(&secret); - Keypair(ed25519::Keypair { - secret: sk.0, - public, - }) + let signing = ed25519::SigningKey::from_bytes(&sk.0); + Keypair(signing) } } /// An Ed25519 public key. #[derive(Eq, Clone)] -pub struct PublicKey(ed25519::PublicKey); +pub struct PublicKey(ed25519::VerifyingKey); impl fmt::Debug for PublicKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -163,7 +129,7 @@ impl hash::Hash for PublicKey { impl cmp::PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.0.as_bytes().partial_cmp(other.0.as_bytes()) + Some(self.cmp(other)) } } @@ -181,50 +147,30 @@ impl PublicKey { .is_ok() } - /// Encode the public key into a byte array in compressed form, i.e. - /// where one coordinate is represented by a single bit. - #[deprecated(note = "Renamed to `PublicKey::to_bytes` to reflect actual behaviour.")] - pub fn encode(&self) -> [u8; 32] { - self.to_bytes() - } - /// Convert the public key to a byte array in compressed form, i.e. /// where one coordinate is represented by a single bit. pub fn to_bytes(&self) -> [u8; 32] { self.0.to_bytes() } - /// Decode a public key from a byte array as produced by `to_bytes`. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." - )] - pub fn decode(k: &[u8]) -> Result { - Self::try_from_bytes(k) - } - /// Try to parse a public key from a byte array containing the actual key as produced by `to_bytes`. pub fn try_from_bytes(k: &[u8]) -> Result { - ed25519::PublicKey::from_bytes(k) + let k = <[u8; 32]>::try_from(k) + .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e))?; + ed25519::VerifyingKey::from_bytes(&k) .map_err(|e| DecodingError::failed_to_parse("Ed25519 public key", e)) .map(PublicKey) } } /// An Ed25519 secret key. +#[derive(Clone)] pub struct SecretKey(ed25519::SecretKey); /// View the bytes of the secret key. impl AsRef<[u8]> for SecretKey { fn as_ref(&self) -> &[u8] { - self.0.as_bytes() - } -} - -impl Clone for SecretKey { - fn clone(&self) -> SecretKey { - let mut sk_bytes = self.0.to_bytes(); - Self::try_from_bytes(&mut sk_bytes) - .expect("ed25519::SecretKey::from_bytes(to_bytes(k)) != k") + &self.0[..] } } @@ -236,25 +182,10 @@ impl fmt::Debug for SecretKey { impl SecretKey { /// Generate a new Ed25519 secret key. + #[cfg(feature = "rand")] pub fn generate() -> SecretKey { - let mut bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut bytes); - SecretKey( - ed25519::SecretKey::from_bytes(&bytes).expect( - "this returns `Err` only if the length is wrong; the length is correct; qed", - ), - ) - } - - /// Create an Ed25519 secret key from a byte slice, zeroing the input on success. - /// If the bytes do not constitute a valid Ed25519 secret key, an error is - /// returned. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." - )] - #[allow(unused_mut)] - pub fn from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { - Self::try_from_bytes(sk_bytes) + let signing = ed25519::SigningKey::generate(&mut rand::rngs::OsRng); + SecretKey(signing.to_bytes()) } /// Try to parse an Ed25519 secret key from a byte slice @@ -263,11 +194,15 @@ impl SecretKey { /// returned. pub fn try_from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { let sk_bytes = sk_bytes.as_mut(); - let secret = ed25519::SecretKey::from_bytes(&*sk_bytes) + let secret = <[u8; 32]>::try_from(&*sk_bytes) .map_err(|e| DecodingError::failed_to_parse("Ed25519 secret key", e))?; sk_bytes.zeroize(); Ok(SecretKey(secret)) } + + pub(crate) fn to_bytes(&self) -> [u8; 32] { + self.0 + } } #[cfg(test)] @@ -276,10 +211,11 @@ mod tests { use quickcheck::*; fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { - kp1.public() == kp2.public() && kp1.0.secret.as_bytes() == kp2.0.secret.as_bytes() + kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() } #[test] + #[cfg(feature = "rand")] fn ed25519_keypair_encode_decode() { fn prop() -> bool { let kp1 = Keypair::generate(); @@ -291,10 +227,11 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn ed25519_keypair_from_secret() { fn prop() -> bool { let kp1 = Keypair::generate(); - let mut sk = kp1.0.secret.to_bytes(); + let mut sk = kp1.0.to_bytes(); let kp2 = Keypair::from(SecretKey::try_from_bytes(&mut sk).unwrap()); eq_keypairs(&kp1, &kp2) && sk == [0u8; 32] } @@ -302,6 +239,7 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn ed25519_signature() { let kp = Keypair::generate(); let pk = kp.public(); diff --git a/identity/src/error.rs b/identity/src/error.rs index b27582c7f2c..2b8291dbb8b 100644 --- a/identity/src/error.rs +++ b/identity/src/error.rs @@ -33,6 +33,7 @@ pub struct DecodingError { } impl DecodingError { + #[allow(dead_code)] pub(crate) fn missing_feature(feature_name: &'static str) -> Self { Self { msg: format!("cargo feature `{feature_name}` is not enabled"), @@ -77,13 +78,6 @@ impl DecodingError { } } - pub(crate) fn decoding_unsupported(key_type: &'static str) -> Self { - Self { - msg: format!("decoding {key_type} key from Protobuf is unsupported"), - source: None, - } - } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub(crate) fn encoding_unsupported(key_type: &'static str) -> Self { Self { @@ -114,11 +108,19 @@ pub struct SigningError { /// An error during encoding of key material. impl SigningError { - #[cfg(any(feature = "secp256k1", feature = "rsa"))] - pub(crate) fn new(msg: S, source: Option>) -> Self { + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub(crate) fn new(msg: S) -> Self { Self { msg: msg.to_string(), - source, + source: None, + } + } + + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + pub(crate) fn source(self, source: impl Error + Send + Sync + 'static) -> Self { + Self { + source: Some(Box::new(source)), + ..self } } } diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 58a019e34e6..d1a70fb45a2 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -18,9 +18,34 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::{DecodingError, OtherVariantError, SigningError}; +#[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" +))] +use crate::error::OtherVariantError; +use crate::error::{DecodingError, SigningError}; +#[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" +))] use crate::proto; -use quick_protobuf::{BytesReader, MessageWrite, Writer}; +#[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" +))] +use quick_protobuf::{BytesReader, Writer}; +#[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" +))] use std::convert::TryFrom; #[cfg(feature = "ed25519")] @@ -34,6 +59,7 @@ use crate::secp256k1; #[cfg(feature = "ecdsa")] use crate::ecdsa; +use crate::KeyType; /// Identity keypair of a node. /// @@ -52,67 +78,51 @@ use crate::ecdsa; /// let keypair = Keypair::rsa_from_pkcs8(&mut bytes); /// ``` /// +#[derive(Debug, Clone)] +pub struct Keypair { + keypair: KeyPairInner, +} + #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] -pub enum Keypair { +enum KeyPairInner { /// An Ed25519 keypair. #[cfg(feature = "ed25519")] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into_ed25519` instead." - )] Ed25519(ed25519::Keypair), /// An RSA keypair. #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into_rsa` instead." - )] Rsa(rsa::Keypair), /// A Secp256k1 keypair. #[cfg(feature = "secp256k1")] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into_secp256k1` instead." - )] Secp256k1(secp256k1::Keypair), /// An ECDSA keypair. #[cfg(feature = "ecdsa")] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `Keypair::try_into_ecdsa` instead." - )] Ecdsa(ecdsa::Keypair), } impl Keypair { /// Generate a new Ed25519 keypair. - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] pub fn generate_ed25519() -> Keypair { - #[allow(deprecated)] - Keypair::Ed25519(ed25519::Keypair::generate()) + Keypair { + keypair: KeyPairInner::Ed25519(ed25519::Keypair::generate()), + } } /// Generate a new Secp256k1 keypair. - #[cfg(feature = "secp256k1")] + #[cfg(all(feature = "secp256k1", feature = "rand"))] pub fn generate_secp256k1() -> Keypair { - #[allow(deprecated)] - Keypair::Secp256k1(secp256k1::Keypair::generate()) + Keypair { + keypair: KeyPairInner::Secp256k1(secp256k1::Keypair::generate()), + } } /// Generate a new ECDSA keypair. - #[cfg(feature = "ecdsa")] + #[cfg(all(feature = "ecdsa", feature = "rand"))] pub fn generate_ecdsa() -> Keypair { - #[allow(deprecated)] - Keypair::Ecdsa(ecdsa::Keypair::generate()) - } - - #[cfg(feature = "ed25519")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ed25519` instead." - )] - pub fn into_ed25519(self) -> Option { - self.try_into().ok() + Keypair { + keypair: KeyPairInner::Ecdsa(ecdsa::Keypair::generate()), + } } #[cfg(feature = "ed25519")] @@ -120,40 +130,16 @@ impl Keypair { self.try_into() } - #[cfg(feature = "secp256k1")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_secp256k1` instead." - )] - pub fn into_secp256k1(self) -> Option { - self.try_into().ok() - } - #[cfg(feature = "secp256k1")] pub fn try_into_secp256k1(self) -> Result { self.try_into() } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_rsa` instead." - )] - pub fn into_rsa(self) -> Option { - self.try_into().ok() - } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_into_rsa(self) -> Result { self.try_into() } - #[cfg(feature = "ecdsa")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ecdsa` instead." - )] - pub fn into_ecdsa(self) -> Option { - self.try_into().ok() - } - #[cfg(feature = "ecdsa")] pub fn try_into_ecdsa(self) -> Result { self.try_into() @@ -168,8 +154,9 @@ impl Keypair { note = "Use `rsa::Keypair::try_decode_pkcs8` and promote it into `Keypair` instead." )] pub fn rsa_from_pkcs8(pkcs8_der: &mut [u8]) -> Result { - #[allow(deprecated)] - rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(Keypair::Rsa) + rsa::Keypair::try_decode_pkcs8(pkcs8_der).map(|kp| Keypair { + keypair: KeyPairInner::Rsa(kp), + }) } /// Decode a keypair from a DER-encoded Secp256k1 secret key in an ECPrivateKey @@ -181,9 +168,9 @@ impl Keypair { note = "Use `secp256k1::Keypair::try_from_bytes` and promote it into `Keypair` instead." )] pub fn secp256k1_from_der(der: &mut [u8]) -> Result { - #[allow(deprecated)] - secp256k1::SecretKey::try_decode_der(der) - .map(|sk| Keypair::Secp256k1(secp256k1::Keypair::from(sk))) + secp256k1::SecretKey::try_decode_der(der).map(|sk| Keypair { + keypair: KeyPairInner::Secp256k1(secp256k1::Keypair::from(sk)), + }) } /// Decode a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) @@ -195,80 +182,106 @@ impl Keypair { note = "Use `ed25519::Keypair::try_decode` and promote it into `Keypair` instead." )] pub fn ed25519_from_bytes(bytes: impl AsMut<[u8]>) -> Result { - #[allow(deprecated)] - Ok(Keypair::Ed25519(ed25519::Keypair::from( - ed25519::SecretKey::try_from_bytes(bytes)?, - ))) + Ok(Keypair { + keypair: KeyPairInner::Ed25519(ed25519::Keypair::from( + ed25519::SecretKey::try_from_bytes(bytes)?, + )), + }) } /// Sign a message using the private key of this keypair, producing /// a signature that can be verified using the corresponding public key. + #[allow(unused_variables)] pub fn sign(&self, msg: &[u8]) -> Result, SigningError> { - use Keypair::*; - #[allow(deprecated)] - match self { + match self.keypair { #[cfg(feature = "ed25519")] - Ed25519(ref pair) => Ok(pair.sign(msg)), + KeyPairInner::Ed25519(ref pair) => Ok(pair.sign(msg)), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Rsa(ref pair) => pair.sign(msg), + KeyPairInner::Rsa(ref pair) => pair.sign(msg), #[cfg(feature = "secp256k1")] - Secp256k1(ref pair) => pair.secret().sign(msg), + KeyPairInner::Secp256k1(ref pair) => Ok(pair.secret().sign(msg)), #[cfg(feature = "ecdsa")] - Ecdsa(ref pair) => Ok(pair.secret().sign(msg)), + KeyPairInner::Ecdsa(ref pair) => Ok(pair.secret().sign(msg)), } } /// Get the public key of this keypair. pub fn public(&self) -> PublicKey { - use Keypair::*; - #[allow(deprecated)] - match self { + match self.keypair { #[cfg(feature = "ed25519")] - Ed25519(pair) => PublicKey::Ed25519(pair.public()), + KeyPairInner::Ed25519(ref pair) => PublicKey { + publickey: PublicKeyInner::Ed25519(pair.public()), + }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Rsa(pair) => PublicKey::Rsa(pair.public()), + KeyPairInner::Rsa(ref pair) => PublicKey { + publickey: PublicKeyInner::Rsa(pair.public()), + }, #[cfg(feature = "secp256k1")] - Secp256k1(pair) => PublicKey::Secp256k1(pair.public().clone()), + KeyPairInner::Secp256k1(ref pair) => PublicKey { + publickey: PublicKeyInner::Secp256k1(pair.public().clone()), + }, #[cfg(feature = "ecdsa")] - Ecdsa(pair) => PublicKey::Ecdsa(pair.public().clone()), + KeyPairInner::Ecdsa(ref pair) => PublicKey { + publickey: PublicKeyInner::Ecdsa(pair.public().clone()), + }, } } /// Encode a private key as protobuf structure. + #[deprecated( + note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ed25519` instead." + )] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { self.encode_protobuf() } - /// Encode a private key as protobuf structure. /// /// See for details on the encoding. pub fn encode_protobuf(&self) -> Result, DecodingError> { - #[allow(deprecated)] - let pk: proto::PrivateKey = match self { - #[cfg(feature = "ed25519")] - Self::Ed25519(data) => proto::PrivateKey { - Type: proto::KeyType::Ed25519, - Data: data.to_bytes().to_vec(), - }, - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Self::Rsa(_) => return Err(DecodingError::encoding_unsupported("RSA")), - #[cfg(feature = "secp256k1")] - Self::Secp256k1(data) => proto::PrivateKey { - Type: proto::KeyType::Secp256k1, - Data: data.secret().to_bytes().to_vec(), - }, - #[cfg(feature = "ecdsa")] - Self::Ecdsa(data) => proto::PrivateKey { - Type: proto::KeyType::ECDSA, - Data: data.secret().encode_der(), - }, - }; + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + { + use quick_protobuf::MessageWrite; + + #[allow(deprecated)] + let pk: proto::PrivateKey = match self.keypair { + #[cfg(feature = "ed25519")] + KeyPairInner::Ed25519(ref data) => proto::PrivateKey { + Type: proto::KeyType::Ed25519, + Data: data.to_bytes().to_vec(), + }, + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + KeyPairInner::Rsa(_) => return Err(DecodingError::encoding_unsupported("RSA")), + #[cfg(feature = "secp256k1")] + KeyPairInner::Secp256k1(ref data) => proto::PrivateKey { + Type: proto::KeyType::Secp256k1, + Data: data.secret().to_bytes().to_vec(), + }, + #[cfg(feature = "ecdsa")] + KeyPairInner::Ecdsa(ref data) => proto::PrivateKey { + Type: proto::KeyType::ECDSA, + Data: data.secret().encode_der(), + }, + }; - let mut buf = Vec::with_capacity(pk.get_size()); - let mut writer = Writer::new(&mut buf); - pk.write_message(&mut writer).expect("Encoding to succeed"); + let mut buf = Vec::with_capacity(pk.get_size()); + let mut writer = Writer::new(&mut buf); + pk.write_message(&mut writer).expect("Encoding to succeed"); - Ok(buf) + Ok(buf) + } + + #[cfg(not(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + )))] + unreachable!() } /// Decode a private key from a protobuf structure and parse it as a [`Keypair`]. @@ -282,67 +295,181 @@ impl Keypair { /// Try to decode a private key from a protobuf structure and parse it as a [`Keypair`]. #[cfg_attr(not(feature = "ed25519"), allow(unused_mut))] pub fn try_decode_protobuf(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + { + use quick_protobuf::MessageRead; + + let mut reader = BytesReader::from_bytes(bytes); + let mut private_key = proto::PrivateKey::from_reader(&mut reader, bytes) + .map_err(|e| DecodingError::bad_protobuf("private key bytes", e)) + .map(zeroize::Zeroizing::new)?; + + #[allow(unreachable_code)] + match private_key.Type { + proto::KeyType::Ed25519 => { + #[cfg(feature = "ed25519")] + return ed25519::Keypair::try_from_bytes(&mut private_key.Data).map(|sk| { + Keypair { + keypair: KeyPairInner::Ed25519(sk), + } + }); + Err(DecodingError::missing_feature("ed25519")) + } + proto::KeyType::RSA => { + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + return rsa::Keypair::try_decode_pkcs1(&mut private_key.Data).map(|sk| { + Keypair { + keypair: KeyPairInner::Rsa(sk), + } + }); + Err(DecodingError::missing_feature("rsa")) + } + proto::KeyType::Secp256k1 => { + #[cfg(feature = "secp256k1")] + return secp256k1::SecretKey::try_from_bytes(&mut private_key.Data).map( + |key| Keypair { + keypair: KeyPairInner::Secp256k1(key.into()), + }, + ); + + Err(DecodingError::missing_feature("secp256k1")) + } + proto::KeyType::ECDSA => { + #[cfg(feature = "ecdsa")] + return ecdsa::SecretKey::try_decode_der(&mut private_key.Data).map(|key| { + Keypair { + keypair: KeyPairInner::Ecdsa(key.into()), + } + }); + + Err(DecodingError::missing_feature("ecdsa")) + } + } + } - let mut reader = BytesReader::from_bytes(bytes); - let mut private_key = proto::PrivateKey::from_reader(&mut reader, bytes) - .map_err(|e| DecodingError::bad_protobuf("private key bytes", e)) - .map(zeroize::Zeroizing::new)?; + #[cfg(not(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + )))] + unreachable!() + } - #[allow(deprecated, unreachable_code)] - match private_key.Type { - proto::KeyType::Ed25519 => { - #[cfg(feature = "ed25519")] - return ed25519::Keypair::try_from_bytes(&mut private_key.Data) - .map(Keypair::Ed25519); - Err(DecodingError::missing_feature("ed25519")) - } - proto::KeyType::RSA => Err(DecodingError::decoding_unsupported("RSA")), - proto::KeyType::Secp256k1 => { - #[cfg(feature = "secp256k1")] - return secp256k1::Keypair::try_from_bytes(&mut private_key.Data) - .map(Keypair::Secp256k1); - Err(DecodingError::missing_feature("secp256k1")) - } - proto::KeyType::ECDSA => { - #[cfg(feature = "ecdsa")] - return ecdsa::SecretKey::try_decode_der(&mut private_key.Data) - .map(|key| Keypair::Ecdsa(key.into())); - Err(DecodingError::missing_feature("ecdsa")) - } + /// Return a [`KeyType`] of the [`Keypair`]. + pub fn key_type(&self) -> KeyType { + match self.keypair { + #[cfg(feature = "ed25519")] + KeyPairInner::Ed25519(_) => KeyType::Ed25519, + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + KeyPairInner::Rsa(_) => KeyType::RSA, + #[cfg(feature = "secp256k1")] + KeyPairInner::Secp256k1(_) => KeyType::Secp256k1, + #[cfg(feature = "ecdsa")] + KeyPairInner::Ecdsa(_) => KeyType::Ecdsa, } } -} + /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. + /// + /// This works for all key types except RSA where it returns `None`. + /// + /// # Example + /// + /// ``` + /// # fn main() { + /// # use libp2p_identity as identity; + /// let key = identity::Keypair::generate_ed25519(); + /// + /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); + /// # } + /// ``` + /// + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + pub fn derive_secret(&self, domain: &[u8]) -> Option<[u8; 32]> { + let mut okm = [0u8; 32]; + hkdf::Hkdf::::new(None, &self.secret()?) + .expand(domain, &mut okm) + .expect("okm.len() == 32"); + + Some(okm) + } + + // We build docs with all features so this doesn't need to have any docs. + #[cfg(not(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + )))] + pub fn derive_secret(&self, _: &[u8]) -> Option<[u8; 32]> { + None + } + + /// Return the secret key of the [`Keypair`]. + #[allow(dead_code)] + pub(crate) fn secret(&self) -> Option<[u8; 32]> { + match self.keypair { + #[cfg(feature = "ed25519")] + KeyPairInner::Ed25519(ref inner) => Some(inner.secret().to_bytes()), + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + KeyPairInner::Rsa(_) => None, + #[cfg(feature = "secp256k1")] + KeyPairInner::Secp256k1(ref inner) => Some(inner.secret().to_bytes()), + #[cfg(feature = "ecdsa")] + KeyPairInner::Ecdsa(ref inner) => Some( + inner + .secret() + .to_bytes() + .try_into() + .expect("Ecdsa's private key should be 32 bytes"), + ), + } + } +} #[cfg(feature = "ecdsa")] impl From for Keypair { fn from(kp: ecdsa::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Ecdsa(kp) + Keypair { + keypair: KeyPairInner::Ecdsa(kp), + } } } #[cfg(feature = "ed25519")] impl From for Keypair { fn from(kp: ed25519::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Ed25519(kp) + Keypair { + keypair: KeyPairInner::Ed25519(kp), + } } } #[cfg(feature = "secp256k1")] impl From for Keypair { fn from(kp: secp256k1::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Secp256k1(kp) + Keypair { + keypair: KeyPairInner::Secp256k1(kp), + } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl From for Keypair { fn from(kp: rsa::Keypair) -> Self { - #[allow(deprecated)] - Keypair::Rsa(kp) + Keypair { + keypair: KeyPairInner::Rsa(kp), + } } } @@ -351,15 +478,14 @@ impl TryInto for Keypair { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ed25519(inner) => Ok(inner), + match self.keypair { + KeyPairInner::Ed25519(inner) => Ok(inner), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Keypair::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + KeyPairInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "secp256k1")] - Keypair::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + KeyPairInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), #[cfg(feature = "ecdsa")] - Keypair::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + KeyPairInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } @@ -369,15 +495,14 @@ impl TryInto for Keypair { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Ecdsa(inner) => Ok(inner), + match self.keypair { + KeyPairInner::Ecdsa(inner) => Ok(inner), #[cfg(feature = "ed25519")] - Keypair::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + KeyPairInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Keypair::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + KeyPairInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "secp256k1")] - Keypair::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + KeyPairInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), } } } @@ -387,15 +512,14 @@ impl TryInto for Keypair { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Secp256k1(inner) => Ok(inner), + match self.keypair { + KeyPairInner::Secp256k1(inner) => Ok(inner), #[cfg(feature = "ed25519")] - Keypair::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + KeyPairInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Keypair::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + KeyPairInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "ecdsa")] - Keypair::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + KeyPairInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } @@ -405,167 +529,138 @@ impl TryInto for Keypair { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - Keypair::Rsa(inner) => Ok(inner), + match self.keypair { + KeyPairInner::Rsa(inner) => Ok(inner), #[cfg(feature = "ed25519")] - Keypair::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + KeyPairInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(feature = "secp256k1")] - Keypair::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + KeyPairInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), #[cfg(feature = "ecdsa")] - Keypair::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + KeyPairInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } -/// The public key of a node's identity keypair. #[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum PublicKey { +pub(crate) enum PublicKeyInner { /// A public Ed25519 key. #[cfg(feature = "ed25519")] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `PublicKey::from` and `PublicKey::into_ed25519` instead." - )] Ed25519(ed25519::PublicKey), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] /// A public RSA key. - - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `PublicKey::from` and `PublicKey::into_rsa` instead." - )] Rsa(rsa::PublicKey), #[cfg(feature = "secp256k1")] /// A public Secp256k1 key. - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `PublicKey::from` and `PublicKey::into_secp256k1` instead." - )] Secp256k1(secp256k1::PublicKey), /// A public ECDSA key. #[cfg(feature = "ecdsa")] - #[deprecated( - since = "0.1.0", - note = "This enum will be made opaque in the future, use `PublicKey::from` and `PublicKey::into_ecdsa` instead." - )] Ecdsa(ecdsa::PublicKey), } +/// The public key of a node's identity keypair. +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct PublicKey { + pub(crate) publickey: PublicKeyInner, +} + impl PublicKey { /// Verify a signature for a message using this public key, i.e. check /// that the signature has been produced by the corresponding /// private key (authenticity), and that the message has not been /// tampered with (integrity). #[must_use] + #[allow(unused_variables)] pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { - use PublicKey::*; - #[allow(deprecated)] - match self { + match self.publickey { #[cfg(feature = "ed25519")] - Ed25519(pk) => pk.verify(msg, sig), + PublicKeyInner::Ed25519(ref pk) => pk.verify(msg, sig), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - Rsa(pk) => pk.verify(msg, sig), + PublicKeyInner::Rsa(ref pk) => pk.verify(msg, sig), #[cfg(feature = "secp256k1")] - Secp256k1(pk) => pk.verify(msg, sig), + PublicKeyInner::Secp256k1(ref pk) => pk.verify(msg, sig), #[cfg(feature = "ecdsa")] - Ecdsa(pk) => pk.verify(msg, sig), + PublicKeyInner::Ecdsa(ref pk) => pk.verify(msg, sig), } } - #[cfg(feature = "ed25519")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_into_ed25519` instead." - )] - pub fn into_ed25519(self) -> Option { - self.try_into().ok() - } - #[cfg(feature = "ed25519")] pub fn try_into_ed25519(self) -> Result { self.try_into() } - #[cfg(feature = "secp256k1")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_into_secp256k1` instead." - )] - pub fn into_secp256k1(self) -> Option { - self.try_into().ok() - } - #[cfg(feature = "secp256k1")] pub fn try_into_secp256k1(self) -> Result { self.try_into() } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_into_rsa` instead." - )] - pub fn into_rsa(self) -> Option { - self.try_into().ok() - } - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] pub fn try_into_rsa(self) -> Result { self.try_into() } - #[cfg(feature = "ecdsa")] - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_into_ecdsa` instead." - )] - pub fn into_ecdsa(self) -> Option { - self.try_into().ok() - } - #[cfg(feature = "ecdsa")] pub fn try_into_ecdsa(self) -> Result { self.try_into() } - /// Encode the public key into a protobuf structure for storage or - /// exchange with other nodes. - #[deprecated(note = "Renamed to `PublicKey::encode_protobuf`.")] - pub fn to_protobuf_encoding(&self) -> Vec { - Self::encode_protobuf(self) - } - /// Encode the public key into a protobuf structure for storage or /// exchange with other nodes. pub fn encode_protobuf(&self) -> Vec { - let public_key = proto::PublicKey::from(self); - - let mut buf = Vec::with_capacity(public_key.get_size()); - let mut writer = Writer::new(&mut buf); - public_key - .write_message(&mut writer) - .expect("Encoding to succeed"); - - buf - } + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + { + use quick_protobuf::MessageWrite; + let public_key = proto::PublicKey::from(self); + + let mut buf = Vec::with_capacity(public_key.get_size()); + let mut writer = Writer::new(&mut buf); + public_key + .write_message(&mut writer) + .expect("Encoding to succeed"); + + buf + } - /// Decode a public key from a protobuf structure, e.g. read from storage - /// or received from another node. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf` instead." - )] - pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf(bytes) + #[cfg(not(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + )))] + unreachable!() } /// Decode a public key from a protobuf structure, e.g. read from storage /// or received from another node. + #[allow(unused_variables)] pub fn try_decode_protobuf(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - - let pubkey = proto::PublicKey::from_reader(&mut reader, bytes) - .map_err(|e| DecodingError::bad_protobuf("public key bytes", e))?; + #[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + ))] + { + use quick_protobuf::MessageRead; + let mut reader = BytesReader::from_bytes(bytes); + + let pubkey = proto::PublicKey::from_reader(&mut reader, bytes) + .map_err(|e| DecodingError::bad_protobuf("public key bytes", e))?; + + pubkey.try_into() + } - pubkey.try_into() + #[cfg(not(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" + )))] + unreachable!() } /// Convert the `PublicKey` into the corresponding `PeerId`. @@ -573,38 +668,76 @@ impl PublicKey { pub fn to_peer_id(&self) -> crate::PeerId { self.into() } + + /// Return a [`KeyType`] of the [`PublicKey`]. + pub fn key_type(&self) -> KeyType { + match self.publickey { + #[cfg(feature = "ed25519")] + PublicKeyInner::Ed25519(_) => KeyType::Ed25519, + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] + PublicKeyInner::Rsa(_) => KeyType::RSA, + #[cfg(feature = "secp256k1")] + PublicKeyInner::Secp256k1(_) => KeyType::Secp256k1, + #[cfg(feature = "ecdsa")] + PublicKeyInner::Ecdsa(_) => KeyType::Ecdsa, + } + } } +#[cfg(any( + feature = "ecdsa", + feature = "secp256k1", + feature = "ed25519", + feature = "rsa" +))] impl TryFrom for PublicKey { type Error = DecodingError; fn try_from(pubkey: proto::PublicKey) -> Result { - #[allow(deprecated)] - #[allow(unreachable_code)] match pubkey.Type { + #[cfg(feature = "ed25519")] + proto::KeyType::Ed25519 => Ok(ed25519::PublicKey::try_from_bytes(&pubkey.Data).map( + |kp| PublicKey { + publickey: PublicKeyInner::Ed25519(kp), + }, + )?), + #[cfg(not(feature = "ed25519"))] proto::KeyType::Ed25519 => { - #[cfg(feature = "ed25519")] - return ed25519::PublicKey::try_from_bytes(&pubkey.Data).map(PublicKey::Ed25519); - log::debug!("support for ed25519 was disabled at compile-time"); + tracing::debug!("support for ed25519 was disabled at compile-time"); Err(DecodingError::missing_feature("ed25519")) } + #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] proto::KeyType::RSA => { - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - return rsa::PublicKey::try_decode_x509(&pubkey.Data).map(PublicKey::Rsa); - log::debug!("support for RSA was disabled at compile-time"); + Ok( + rsa::PublicKey::try_decode_x509(&pubkey.Data).map(|kp| PublicKey { + publickey: PublicKeyInner::Rsa(kp), + })?, + ) + } + #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] + proto::KeyType::RSA => { + tracing::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::missing_feature("rsa")) } + #[cfg(feature = "secp256k1")] + proto::KeyType::Secp256k1 => Ok(secp256k1::PublicKey::try_from_bytes(&pubkey.Data) + .map(|kp| PublicKey { + publickey: PublicKeyInner::Secp256k1(kp), + })?), + #[cfg(not(feature = "secp256k1"))] proto::KeyType::Secp256k1 => { - #[cfg(feature = "secp256k1")] - return secp256k1::PublicKey::try_from_bytes(&pubkey.Data) - .map(PublicKey::Secp256k1); - log::debug!("support for secp256k1 was disabled at compile-time"); + tracing::debug!("support for secp256k1 was disabled at compile-time"); Err(DecodingError::missing_feature("secp256k1")) } + #[cfg(feature = "ecdsa")] + proto::KeyType::ECDSA => Ok(ecdsa::PublicKey::try_decode_der(&pubkey.Data).map( + |kp| PublicKey { + publickey: PublicKeyInner::Ecdsa(kp), + }, + )?), + #[cfg(not(feature = "ecdsa"))] proto::KeyType::ECDSA => { - #[cfg(feature = "ecdsa")] - return ecdsa::PublicKey::try_decode_der(&pubkey.Data).map(PublicKey::Ecdsa); - log::debug!("support for ECDSA was disabled at compile-time"); + tracing::debug!("support for ECDSA was disabled at compile-time"); Err(DecodingError::missing_feature("ecdsa")) } } @@ -616,15 +749,14 @@ impl TryInto for PublicKey { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - PublicKey::Ed25519(inner) => Ok(inner), + match self.publickey { + PublicKeyInner::Ed25519(inner) => Ok(inner), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - PublicKey::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + PublicKeyInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "secp256k1")] - PublicKey::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + PublicKeyInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), #[cfg(feature = "ecdsa")] - PublicKey::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + PublicKeyInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } @@ -634,15 +766,14 @@ impl TryInto for PublicKey { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - PublicKey::Ecdsa(inner) => Ok(inner), + match self.publickey { + PublicKeyInner::Ecdsa(inner) => Ok(inner), #[cfg(feature = "ed25519")] - PublicKey::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + PublicKeyInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - PublicKey::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + PublicKeyInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "secp256k1")] - PublicKey::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + PublicKeyInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), } } } @@ -652,15 +783,14 @@ impl TryInto for PublicKey { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - PublicKey::Secp256k1(inner) => Ok(inner), + match self.publickey { + PublicKeyInner::Secp256k1(inner) => Ok(inner), #[cfg(feature = "ed25519")] - PublicKey::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + PublicKeyInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - PublicKey::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), + PublicKeyInner::Rsa(_) => Err(OtherVariantError::new(crate::KeyType::RSA)), #[cfg(feature = "ecdsa")] - PublicKey::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + PublicKeyInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } @@ -670,15 +800,14 @@ impl TryInto for PublicKey { type Error = OtherVariantError; fn try_into(self) -> Result { - #[allow(deprecated)] - match self { - PublicKey::Rsa(inner) => Ok(inner), + match self.publickey { + PublicKeyInner::Rsa(inner) => Ok(inner), #[cfg(feature = "ed25519")] - PublicKey::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), + PublicKeyInner::Ed25519(_) => Err(OtherVariantError::new(crate::KeyType::Ed25519)), #[cfg(feature = "secp256k1")] - PublicKey::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), + PublicKeyInner::Secp256k1(_) => Err(OtherVariantError::new(crate::KeyType::Secp256k1)), #[cfg(feature = "ecdsa")] - PublicKey::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), + PublicKeyInner::Ecdsa(_) => Err(OtherVariantError::new(crate::KeyType::Ecdsa)), } } } @@ -686,53 +815,58 @@ impl TryInto for PublicKey { #[cfg(feature = "ed25519")] impl From for PublicKey { fn from(key: ed25519::PublicKey) -> Self { - #[allow(deprecated)] // TODO: Remove when PublicKey::Ed25519 is made opaque - PublicKey::Ed25519(key) + PublicKey { + publickey: PublicKeyInner::Ed25519(key), + } } } #[cfg(feature = "secp256k1")] impl From for PublicKey { fn from(key: secp256k1::PublicKey) -> Self { - #[allow(deprecated)] // TODO: Remove when PublicKey::Secp256k1 is made opaque - PublicKey::Secp256k1(key) + PublicKey { + publickey: PublicKeyInner::Secp256k1(key), + } } } #[cfg(feature = "ecdsa")] impl From for PublicKey { fn from(key: ecdsa::PublicKey) -> Self { - #[allow(deprecated)] // TODO: Remove when PublicKey::Ecdsa is made opaque - PublicKey::Ecdsa(key) + PublicKey { + publickey: PublicKeyInner::Ecdsa(key), + } } } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] impl From for PublicKey { fn from(key: rsa::PublicKey) -> Self { - #[allow(deprecated)] // TODO: Remove when PublicKey::Rsa is made opaque - PublicKey::Rsa(key) + PublicKey { + publickey: PublicKeyInner::Rsa(key), + } } } #[cfg(test)] mod tests { use super::*; - #[cfg(feature = "peerid")] - use crate::PeerId; - use base64::prelude::*; - use std::str::FromStr; #[test] #[cfg(feature = "ed25519")] + #[cfg(feature = "peerid")] fn keypair_protobuf_roundtrip_ed25519() { - let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!("080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e")).unwrap(); + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( + "080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e" + )) + .unwrap(); + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!( "080112201ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e" )) .unwrap(); - roundtrip_protobuf_encoding(&priv_key, &pub_key); + roundtrip_protobuf_encoding(&priv_key, &pub_key, KeyType::Ed25519); } #[test] @@ -744,7 +878,7 @@ mod tests { .unwrap(); let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!("0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de3d300fa36ae0e8f5d530899d83abab44abf3161f162a4bc901d8e6ecda020e8b6d5f8da30525e71d6851510c098e5c47c646a597fb4dcec034e9f77c409e62")).unwrap(); - roundtrip_protobuf_encoding(&priv_key, &pub_key); + roundtrip_protobuf_encoding(&priv_key, &pub_key, KeyType::Ecdsa); } // #[test] @@ -768,11 +902,11 @@ mod tests { )) .unwrap(); - roundtrip_protobuf_encoding(&priv_key, &pub_key); + roundtrip_protobuf_encoding(&priv_key, &pub_key, KeyType::Secp256k1); } #[cfg(feature = "peerid")] - fn roundtrip_protobuf_encoding(private_key: &Keypair, public_key: &PublicKey) { + fn roundtrip_protobuf_encoding(private_key: &Keypair, public_key: &PublicKey, tpe: KeyType) { assert_eq!(&private_key.public(), public_key); let encoded_priv = private_key @@ -794,22 +928,22 @@ mod tests { decoded_public.to_peer_id(), "PeerId from roundtripped public key should be the same" ); + assert_eq!(private_key.key_type(), tpe) } #[test] #[cfg(feature = "peerid")] fn keypair_from_protobuf_encoding() { - // E.g. retrieved from an IPFS config file. - let base_64_encoded = "CAESQL6vdKQuznQosTrW7FWI9At+XX7EBf0BnZLhb6w+N+XSQSdfInl6c7U4NuxXJlhKcRBlBw9d0tj2dfBIVf6mcPA="; - let expected_peer_id = - PeerId::from_str("12D3KooWEChVMMMzV8acJ53mJHrw1pQ27UAGkCxWXLJutbeUMvVu").unwrap(); - - let encoded = BASE64_STANDARD.decode(base_64_encoded).unwrap(); - - let keypair = Keypair::try_decode_protobuf(&encoded).unwrap(); - let peer_id = keypair.public().to_peer_id(); + let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( + "080012ae123082092a0201000282020100e1beab071d08200bde24eef00d049449b07770ff9910257b2d7d5dda242ce8f0e2f12e1af4b32d9efd2c090f66b0f29986dbb645dae9880089704a94e5066d594162ae6ee8892e6ec70701db0a6c445c04778eb3de1293aa1a23c3825b85c6620a2bc3f82f9b0c309bc0ab3aeb1873282bebd3da03c33e76c21e9beb172fd44c9e43be32e2c99827033cf8d0f0c606f4579326c930eb4e854395ad941256542c793902185153c474bed109d6ff5141ebf9cd256cf58893a37f83729f97e7cb435ec679d2e33901d27bb35aa0d7e20561da08885ef0abbf8e2fb48d6a5487047a9ecb1ad41fa7ed84f6e3e8ecd5d98b3982d2a901b4454991766da295ab78822add5612a2df83bcee814cf50973e80d7ef38111b1bd87da2ae92438a2c8cbcc70b31ee319939a3b9c761dbc13b5c086d6b64bf7ae7dacc14622375d92a8ff9af7eb962162bbddebf90acb32adb5e4e4029f1c96019949ecfbfeffd7ac1e3fbcc6b6168c34be3d5a2e5999fcbb39bba7adbca78eab09b9bc39f7fa4b93411f4cc175e70c0a083e96bfaefb04a9580b4753c1738a6a760ae1afd851a1a4bdad231cf56e9284d832483df215a46c1c21bdf0c6cfe951c18f1ee4078c79c13d63edb6e14feaeffabc90ad317e4875fe648101b0864097e998f0ca3025ef9638cd2b0caecd3770ab54a1d9c6ca959b0f5dcbc90caeefc4135baca6fd475224269bbe1b02030100010282020100a472ffa858efd8588ce59ee264b957452f3673acdf5631d7bfd5ba0ef59779c231b0bc838a8b14cae367b6d9ef572c03c7883b0a3c652f5c24c316b1ccfd979f13d0cd7da20c7d34d9ec32dfdc81ee7292167e706d705efde5b8f3edfcba41409e642f8897357df5d320d21c43b33600a7ae4e505db957c1afbc189d73f0b5d972d9aaaeeb232ca20eebd5de6fe7f29d01470354413cc9a0af1154b7af7c1029adcd67c74b4798afeb69e09f2cb387305e73a1b5f450202d54f0ef096fe1bde340219a1194d1ac9026e90b366cce0c59b239d10e4888f52ca1780824d39ae01a6b9f4dd6059191a7f12b2a3d8db3c2868cd4e5a5862b8b625a4197d52c6ac77710116ebd3ced81c4d91ad5fdfbed68312ebce7eea45c1833ca3acf7da2052820eacf5c6b07d086dabeb893391c71417fd8a4b1829ae2cf60d1749d0e25da19530d889461c21da3492a8dc6ccac7de83ac1c2185262c7473c8cc42f547cc9864b02a8073b6aa54a037d8c0de3914784e6205e83d97918b944f11b877b12084c0dd1d36592f8a4f8b8da5bb404c3d2c079b22b6ceabfbcb637c0dbe0201f0909d533f8bf308ada47aee641a012a494d31b54c974e58b87f140258258bb82f31692659db7aa07e17a5b2a0832c24e122d3a8babcc9ee74cbb07d3058bb85b15f6f6b2674aba9fd34367be9782d444335fbed31e3c4086c652597c27104938b47fa10282010100e9fdf843c1550070ca711cb8ff28411466198f0e212511c3186623890c0071bf6561219682fe7dbdfd81176eba7c4faba21614a20721e0fcd63768e6d925688ecc90992059ac89256e0524de90bf3d8a052ce6a9f6adafa712f3107a016e20c80255c9e37d8206d1bc327e06e66eb24288da866b55904fd8b59e6b2ab31bc5eab47e597093c63fab7872102d57b4c589c66077f534a61f5f65127459a33c91f6db61fc431b1ae90be92b4149a3255291baf94304e3efb77b1107b5a3bda911359c40a53c347ff9100baf8f36dc5cd991066b5bdc28b39ed644f404afe9213f4d31c9d4e40f3a5f5e3c39bebeb244e84137544e1a1839c1c8aaebf0c78a7fad590282010100f6fa1f1e6b803742d5490b7441152f500970f46feb0b73a6e4baba2aaf3c0e245ed852fc31d86a8e46eb48e90fac409989dfee45238f97e8f1f8e83a136488c1b04b8a7fb695f37b8616307ff8a8d63e8cfa0b4fb9b9167ffaebabf111aa5a4344afbabd002ae8961c38c02da76a9149abdde93eb389eb32595c29ba30d8283a7885218a5a9d33f7f01dbdf85f3aad016c071395491338ec318d39220e1c7bd69d3d6b520a13a30d745c102b827ad9984b0dd6aed73916ffa82a06c1c111e7047dcd2668f988a0570a71474992eecf416e068f029ec323d5d635fd24694fc9bf96973c255d26c772a95bf8b7f876547a5beabf86f06cd21b67994f944e7a5493028201010095b02fd30069e547426a8bea58e8a2816f33688dac6c6f6974415af8402244a22133baedf34ce499d7036f3f19b38eb00897c18949b0c5a25953c71aeeccfc8f6594173157cc854bd98f16dffe8f28ca13b77eb43a2730585c49fc3f608cd811bb54b03b84bddaa8ef910988567f783012266199667a546a18fd88271fbf63a45ae4fd4884706da8befb9117c0a4d73de5172f8640b1091ed8a4aea3ed4641463f5ff6a5e3401ad7d0c92811f87956d1fd5f9a1d15c7f3839a08698d9f35f9d966e5000f7cb2655d7b6c4adcd8a9d950ea5f61bb7c9a33c17508f9baa313eecfee4ae493249ebe05a5d7770bbd3551b2eeb752e3649e0636de08e3d672e66cb90282010100ad93e4c31072b063fc5ab5fe22afacece775c795d0efdf7c704cfc027bde0d626a7646fc905bb5a80117e3ca49059af14e0160089f9190065be9bfecf12c3b2145b211c8e89e42dd91c38e9aa23ca73697063564f6f6aa6590088a738722df056004d18d7bccac62b3bafef6172fc2a4b071ea37f31eff7a076bcab7dd144e51a9da8754219352aef2c73478971539fa41de4759285ea626fa3c72e7085be47d554d915bbb5149cb6ef835351f231043049cd941506a034bf2f8767f3e1e42ead92f91cb3d75549b57ef7d56ac39c2d80d67f6a2b4ca192974bfc5060e2dd171217971002193dba12e7e4133ab201f07500a90495a38610279b13a48d54f0c99028201003e3a1ac0c2b67d54ed5c4bbe04a7db99103659d33a4f9d35809e1f60c282e5988dddc964527f3b05e6cc890eab3dcb571d66debf3a5527704c87264b3954d7265f4e8d2c637dd89b491b9cf23f264801f804b90454d65af0c4c830d1aef76f597ef61b26ca857ecce9cb78d4f6c2218c00d2975d46c2b013fbf59b750c3b92d8d3ed9e6d1fd0ef1ec091a5c286a3fe2dead292f40f380065731e2079ebb9f2a7ef2c415ecbb488da98f3a12609ca1b6ec8c734032c8bd513292ff842c375d4acd1b02dfb206b24cd815f8e2f9d4af8e7dea0370b19c1b23cc531d78b40e06e1119ee2e08f6f31c6e2e8444c568d13c5d451a291ae0c9f1d4f27d23b3a00d60ad" + )) + .unwrap(); + let pub_key = PublicKey::try_decode_protobuf(&hex_literal::hex!( + "080012a60430820222300d06092a864886f70d01010105000382020f003082020a0282020100e1beab071d08200bde24eef00d049449b07770ff9910257b2d7d5dda242ce8f0e2f12e1af4b32d9efd2c090f66b0f29986dbb645dae9880089704a94e5066d594162ae6ee8892e6ec70701db0a6c445c04778eb3de1293aa1a23c3825b85c6620a2bc3f82f9b0c309bc0ab3aeb1873282bebd3da03c33e76c21e9beb172fd44c9e43be32e2c99827033cf8d0f0c606f4579326c930eb4e854395ad941256542c793902185153c474bed109d6ff5141ebf9cd256cf58893a37f83729f97e7cb435ec679d2e33901d27bb35aa0d7e20561da08885ef0abbf8e2fb48d6a5487047a9ecb1ad41fa7ed84f6e3e8ecd5d98b3982d2a901b4454991766da295ab78822add5612a2df83bcee814cf50973e80d7ef38111b1bd87da2ae92438a2c8cbcc70b31ee319939a3b9c761dbc13b5c086d6b64bf7ae7dacc14622375d92a8ff9af7eb962162bbddebf90acb32adb5e4e4029f1c96019949ecfbfeffd7ac1e3fbcc6b6168c34be3d5a2e5999fcbb39bba7adbca78eab09b9bc39f7fa4b93411f4cc175e70c0a083e96bfaefb04a9580b4753c1738a6a760ae1afd851a1a4bdad231cf56e9284d832483df215a46c1c21bdf0c6cfe951c18f1ee4078c79c13d63edb6e14feaeffabc90ad317e4875fe648101b0864097e998f0ca3025ef9638cd2b0caecd3770ab54a1d9c6ca959b0f5dcbc90caeefc4135baca6fd475224269bbe1b0203010001" + )) + .unwrap(); - assert_eq!(expected_peer_id, peer_id); + assert_eq!(priv_key.public(), pub_key); } #[test] @@ -833,7 +967,7 @@ mod tests { } #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn test_publickey_from_ed25519_public_key() { let pubkey = Keypair::generate_ed25519().public(); let ed25519_pubkey = pubkey @@ -844,10 +978,11 @@ mod tests { let converted_pubkey = PublicKey::from(ed25519_pubkey); assert_eq!(converted_pubkey, pubkey); + assert_eq!(converted_pubkey.key_type(), KeyType::Ed25519) } #[test] - #[cfg(feature = "secp256k1")] + #[cfg(all(feature = "secp256k1", feature = "rand"))] fn test_publickey_from_secp256k1_public_key() { let pubkey = Keypair::generate_secp256k1().public(); let secp256k1_pubkey = pubkey @@ -857,15 +992,24 @@ mod tests { let converted_pubkey = PublicKey::from(secp256k1_pubkey); assert_eq!(converted_pubkey, pubkey); + assert_eq!(converted_pubkey.key_type(), KeyType::Secp256k1) } #[test] - #[cfg(feature = "ecdsa")] + #[cfg(all(feature = "ecdsa", feature = "rand"))] fn test_publickey_from_ecdsa_public_key() { let pubkey = Keypair::generate_ecdsa().public(); let ecdsa_pubkey = pubkey.clone().try_into_ecdsa().expect("A ecdsa keypair"); let converted_pubkey = PublicKey::from(ecdsa_pubkey); assert_eq!(converted_pubkey, pubkey); + assert_eq!(converted_pubkey.key_type(), KeyType::Ecdsa) + } + + #[test] + #[cfg(feature = "ecdsa")] + fn test_secret_from_ecdsa_private_key() { + let keypair = Keypair::generate_ecdsa(); + assert!(keypair.derive_secret(b"domain separator!").is_some()) } } diff --git a/identity/src/keypair_dummy.rs b/identity/src/keypair_dummy.rs deleted file mode 100644 index 7ae7c5030f6..00000000000 --- a/identity/src/keypair_dummy.rs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::error::{DecodingError, SigningError}; - -#[derive(Debug, Clone)] -pub enum Keypair {} - -impl Keypair { - pub fn sign(&self, _: &[u8]) -> Result, SigningError> { - unreachable!("Can never construct empty enum") - } - - pub fn public(&self) -> PublicKey { - unreachable!("Can never construct empty enum") - } - - #[deprecated(note = "Renamed to `encode_protobuf`")] - pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { - unreachable!("Can never encode empty enum") - } - - pub fn encode_protobuf(&self) -> Vec { - unreachable!("Can never encode empty enum") - } - - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `Keypair::try_decode_protobuf` instead." - )] - pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf(bytes) - } - - pub fn try_decode_protobuf(_: &[u8]) -> Result { - Err(DecodingError::missing_feature( - "ecdsa|rsa|ed25519|secp256k1", - )) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum PublicKey {} - -impl PublicKey { - #[must_use] - pub fn verify(&self, _: &[u8], _: &[u8]) -> bool { - unreachable!("Can never construct empty enum") - } - - #[deprecated(note = "Renamed to `encode_protobuf`")] - pub fn to_protobuf_encoding(&self) -> Vec { - unreachable!("Can never encode empty enum") - } - - pub fn encode_protobuf(&self) -> Vec { - unreachable!("Can never encode empty enum") - } - - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_protobuf` instead." - )] - pub fn from_protobuf_encoding(bytes: &[u8]) -> Result { - Self::try_decode_protobuf(bytes) - } - - pub fn try_decode_protobuf(_: &[u8]) -> Result { - Err(DecodingError::missing_feature( - "ecdsa|rsa|ed25519|secp256k1", - )) - } - - #[cfg(feature = "peerid")] - pub fn to_peer_id(&self) -> crate::PeerId { - unreachable!("Can never construct empty enum") - } -} diff --git a/identity/src/lib.rs b/identity/src/lib.rs index 322ad67d40c..c78e68d1652 100644 --- a/identity/src/lib.rs +++ b/identity/src/lib.rs @@ -60,20 +60,6 @@ pub mod rsa; pub mod secp256k1; mod error; -#[cfg(any( - feature = "ecdsa", - feature = "secp256k1", - feature = "ed25519", - feature = "rsa" -))] -mod keypair; -#[cfg(all( - not(feature = "ecdsa"), - not(feature = "secp256k1"), - not(feature = "ed25519"), - not(feature = "rsa") -))] -#[path = "./keypair_dummy.rs"] mod keypair; #[cfg(feature = "peerid")] mod peer_id; @@ -98,25 +84,24 @@ impl zeroize::Zeroize for proto::PrivateKey { ))] impl From<&PublicKey> for proto::PublicKey { fn from(key: &PublicKey) -> Self { - #[allow(deprecated)] - match key { + match &key.publickey { #[cfg(feature = "ed25519")] - PublicKey::Ed25519(key) => proto::PublicKey { + keypair::PublicKeyInner::Ed25519(key) => proto::PublicKey { Type: proto::KeyType::Ed25519, Data: key.to_bytes().to_vec(), }, #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - PublicKey::Rsa(key) => proto::PublicKey { + keypair::PublicKeyInner::Rsa(key) => proto::PublicKey { Type: proto::KeyType::RSA, Data: key.encode_x509(), }, #[cfg(feature = "secp256k1")] - PublicKey::Secp256k1(key) => proto::PublicKey { + keypair::PublicKeyInner::Secp256k1(key) => proto::PublicKey { Type: proto::KeyType::Secp256k1, Data: key.to_bytes().to_vec(), }, #[cfg(feature = "ecdsa")] - PublicKey::Ecdsa(key) => proto::PublicKey { + keypair::PublicKeyInner::Ecdsa(key) => proto::PublicKey { Type: proto::KeyType::ECDSA, Data: key.encode_der(), }, @@ -131,6 +116,7 @@ pub use peer_id::{ParseError, PeerId}; /// The type of key a `KeyPair` is holding. #[derive(Debug, PartialEq, Eq)] +#[allow(clippy::upper_case_acronyms)] pub enum KeyType { Ed25519, RSA, diff --git a/identity/src/peer_id.rs b/identity/src/peer_id.rs index 51d9e4f1f6d..1d85fe66ffa 100644 --- a/identity/src/peer_id.rs +++ b/identity/src/peer_id.rs @@ -18,8 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use multiaddr::{Multiaddr, Protocol}; -use multihash::{Code, Error, MultihashGeneric}; +#[cfg(feature = "rand")] use rand::Rng; use sha2::Digest as _; use std::{convert::TryFrom, fmt, str::FromStr}; @@ -30,7 +29,7 @@ use thiserror::Error; /// Must be big enough to accommodate for `MAX_INLINE_KEY_LENGTH`. /// 64 satisfies that and can hold 512 bit hashes which is what the ecosystem typically uses. /// Given that this appears in our type-signature, using a "common" number here makes us more compatible. -type Multihash = MultihashGeneric<64>; +type Multihash = multihash::Multihash<64>; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -80,9 +79,9 @@ impl PeerId { } /// Parses a `PeerId` from bytes. - pub fn from_bytes(data: &[u8]) -> Result { + pub fn from_bytes(data: &[u8]) -> Result { PeerId::from_multihash(Multihash::from_bytes(data)?) - .map_err(|mh| Error::UnsupportedCode(mh.code())) + .map_err(|mh| ParseError::UnsupportedCode(mh.code())) } /// Tries to turn a `Multihash` into a `PeerId`. @@ -100,50 +99,26 @@ impl PeerId { } } - /// Tries to extract a [`PeerId`] from the given [`Multiaddr`]. - /// - /// In case the given [`Multiaddr`] ends with `/p2p/`, this function - /// will return the encapsulated [`PeerId`], otherwise it will return `None`. - pub fn try_from_multiaddr(address: &Multiaddr) -> Option { - address.iter().last().and_then(|p| match p { - Protocol::P2p(hash) => PeerId::from_multihash(hash).ok(), - _ => None, - }) - } - /// Generates a random peer ID from a cryptographically secure PRNG. /// /// This is useful for randomly walking on a DHT, or for testing purposes. + #[cfg(feature = "rand")] pub fn random() -> PeerId { let peer_id = rand::thread_rng().gen::<[u8; 32]>(); PeerId { - multihash: Multihash::wrap(Code::Identity.into(), &peer_id) - .expect("The digest size is never too large"), + multihash: Multihash::wrap(0x0, &peer_id).expect("The digest size is never too large"), } } /// Returns a raw bytes representation of this `PeerId`. - pub fn to_bytes(&self) -> Vec { + pub fn to_bytes(self) -> Vec { self.multihash.to_bytes() } /// Returns a base-58 encoded string of this `PeerId`. - pub fn to_base58(&self) -> String { + pub fn to_base58(self) -> String { bs58::encode(self.to_bytes()).into_string() } - - /// Checks whether the public key passed as parameter matches the public key of this `PeerId`. - /// - /// Returns `None` if this `PeerId`s hash algorithm is not supported when encoding the - /// given public key, otherwise `Some` boolean as the result of an equality check. - pub fn is_public_key(&self, public_key: &crate::PublicKey) -> Option { - use multihash::MultihashDigest as _; - - let alg = Code::try_from(self.multihash.code()) - .expect("Internal multihash is always a valid `Code`"); - let enc = public_key.encode_protobuf(); - Some(alg.digest(&enc) == self.multihash) - } } impl From for PeerId { @@ -246,12 +221,15 @@ impl<'de> Deserialize<'de> for PeerId { } } +/// Error when parsing a [`PeerId`] from string or bytes. #[derive(Debug, Error)] pub enum ParseError { #[error("base-58 decode error: {0}")] B58(#[from] bs58::decode::Error), - #[error("decoding multihash failed")] - MultiHash, + #[error("unsupported multihash code '{0}'")] + UnsupportedCode(u64), + #[error("invalid multihash")] + InvalidMultihash(#[from] multihash::Error), } impl FromStr for PeerId { @@ -260,7 +238,9 @@ impl FromStr for PeerId { #[inline] fn from_str(s: &str) -> Result { let bytes = bs58::decode(s).into_vec()?; - PeerId::from_bytes(&bytes).map_err(|_| ParseError::MultiHash) + let peer_id = PeerId::from_bytes(&bytes)?; + + Ok(peer_id) } } @@ -269,15 +249,7 @@ mod tests { use super::*; #[test] - #[cfg(feature = "ed25519")] - fn peer_id_is_public_key() { - let key = crate::Keypair::generate_ed25519().public(); - let peer_id = key.to_peer_id(); - assert_eq!(peer_id.is_public_key(&key), Some(true)); - } - - #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn peer_id_into_bytes_then_from_bytes() { let peer_id = crate::Keypair::generate_ed25519().public().to_peer_id(); let second = PeerId::from_bytes(&peer_id.to_bytes()).unwrap(); @@ -285,7 +257,7 @@ mod tests { } #[test] - #[cfg(feature = "ed25519")] + #[cfg(all(feature = "ed25519", feature = "rand"))] fn peer_id_to_base58_then_back() { let peer_id = crate::Keypair::generate_ed25519().public().to_peer_id(); let second: PeerId = peer_id.to_base58().parse().unwrap(); @@ -293,38 +265,11 @@ mod tests { } #[test] + #[cfg(feature = "rand")] fn random_peer_id_is_valid() { for _ in 0..5000 { let peer_id = PeerId::random(); assert_eq!(peer_id, PeerId::from_bytes(&peer_id.to_bytes()).unwrap()); } } - - #[test] - fn extract_peer_id_from_multi_address() { - let address = "/memory/1234/p2p/12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc" - .to_string() - .parse() - .unwrap(); - - #[allow(deprecated)] - let peer_id = PeerId::try_from_multiaddr(&address).unwrap(); - - assert_eq!( - peer_id, - "12D3KooWGQmdpzHXCqLno4mMxWXKNFQHASBeF99gTm2JR8Vu5Bdc" - .parse() - .unwrap() - ); - } - - #[test] - fn no_panic_on_extract_peer_id_from_multi_address_if_not_present() { - let address = "/memory/1234".to_string().parse().unwrap(); - - #[allow(deprecated)] - let maybe_empty = PeerId::try_from_multiaddr(&address); - - assert!(maybe_empty.is_none()); - } } diff --git a/identity/src/rsa.rs b/identity/src/rsa.rs index afc685b19b0..91a1126272f 100644 --- a/identity/src/rsa.rs +++ b/identity/src/rsa.rs @@ -42,13 +42,15 @@ impl std::fmt::Debug for Keypair { } impl Keypair { - /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo - /// format (i.e. unencrypted) as defined in [RFC5208]. + /// Decode an RSA keypair from a DER-encoded private key in PKCS#1 RSAPrivateKey + /// format (i.e. unencrypted) as defined in [RFC3447]. /// - /// [RFC5208]: https://tools.ietf.org/html/rfc5208#section-5 - #[deprecated(note = "Renamed to `Keypair::try_decode_pkcs8`.")] - pub fn from_pkcs8(der: &mut [u8]) -> Result { - Self::try_decode_pkcs8(der) + /// [RFC3447]: https://tools.ietf.org/html/rfc3447#appendix-A.1.2 + pub fn try_decode_pkcs1(der: &mut [u8]) -> Result { + let kp = RsaKeyPair::from_der(der) + .map_err(|e| DecodingError::failed_to_parse("RSA DER PKCS#1 RSAPrivateKey", e))?; + der.zeroize(); + Ok(Keypair(Arc::new(kp))) } /// Decode an RSA keypair from a DER-encoded private key in PKCS#8 PrivateKeyInfo @@ -69,11 +71,11 @@ impl Keypair { /// Sign a message with this keypair. pub fn sign(&self, data: &[u8]) -> Result, SigningError> { - let mut signature = vec![0; self.0.public_modulus_len()]; + let mut signature = vec![0; self.0.public().modulus_len()]; let rng = SystemRandom::new(); match self.0.sign(&RSA_PKCS1_SHA256, &rng, data, &mut signature) { Ok(()) => Ok(signature), - Err(e) => Err(SigningError::new("RSA", Some(Box::new(e)))), + Err(e) => Err(SigningError::new("RSA").source(e)), } } } @@ -116,15 +118,6 @@ impl PublicKey { .expect("RSA X.509 public key encoding failed.") } - /// Decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo - /// structure. See also `encode_x509`. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_decode_x509` instead." - )] - pub fn decode_x509(pk: &[u8]) -> Result { - Self::try_decode_x509(pk) - } - /// Try to decode an RSA public key from a DER-encoded X.509 SubjectPublicKeyInfo /// structure. See also `encode_x509`. pub fn try_decode_x509(pk: &[u8]) -> Result { diff --git a/identity/src/secp256k1.rs b/identity/src/secp256k1.rs index 0b4b97710b9..cd57cc980de 100644 --- a/identity/src/secp256k1.rs +++ b/identity/src/secp256k1.rs @@ -20,7 +20,7 @@ //! Secp256k1 keys. -use super::error::{DecodingError, SigningError}; +use super::error::DecodingError; use asn1_der::typed::{DerDecodable, Sequence}; use core::cmp; use core::fmt; @@ -38,6 +38,7 @@ pub struct Keypair { impl Keypair { /// Generate a new sec256k1 `Keypair`. + #[cfg(feature = "rand")] pub fn generate() -> Keypair { Keypair::from(SecretKey::generate()) } @@ -95,23 +96,11 @@ impl fmt::Debug for SecretKey { impl SecretKey { /// Generate a new random Secp256k1 secret key. + #[cfg(feature = "rand")] pub fn generate() -> SecretKey { SecretKey(libsecp256k1::SecretKey::random(&mut rand::thread_rng())) } - /// Create a secret key from a byte slice, zeroing the slice on success. - /// If the bytes do not constitute a valid Secp256k1 secret key, an - /// error is returned. - /// - /// Note that the expected binary format is the same as `libsecp256k1`'s. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `SecretKey::try_from_bytes` instead." - )] - #[allow(unused_mut)] - pub fn from_bytes(mut sk: impl AsMut<[u8]>) -> Result { - Self::try_from_bytes(sk) - } - /// Try to parse a secret key from a byte slice, zeroing the slice on success. /// If the bytes do not constitute a valid Secp256k1 secret key, an /// error is returned. @@ -159,25 +148,25 @@ impl SecretKey { /// ECDSA signature, as defined in [RFC3278]. /// /// [RFC3278]: https://tools.ietf.org/html/rfc3278#section-8.2 - pub fn sign(&self, msg: &[u8]) -> Result, SigningError> { - self.sign_hash(Sha256::digest(msg).as_ref()) - } + pub fn sign(&self, msg: &[u8]) -> Vec { + let generic_array = Sha256::digest(msg); - /// Returns the raw bytes of the secret key. - pub fn to_bytes(&self) -> [u8; 32] { - self.0.serialize() - } + // FIXME: Once `generic-array` hits 1.0, we should be able to just use `Into` here. + let mut array = [0u8; 32]; + array.copy_from_slice(generic_array.as_slice()); + + let message = Message::parse(&array); - /// Sign a raw message of length 256 bits with this secret key, produces a DER-encoded - /// ECDSA signature. - pub fn sign_hash(&self, msg: &[u8]) -> Result, SigningError> { - let m = Message::parse_slice(msg) - .map_err(|_| SigningError::new("failed to parse secp256k1 digest", None))?; - Ok(libsecp256k1::sign(&m, &self.0) + libsecp256k1::sign(&message, &self.0) .0 .serialize_der() .as_ref() - .into()) + .into() + } + + /// Returns the raw bytes of the secret key. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.serialize() } } @@ -209,7 +198,7 @@ impl hash::Hash for PublicKey { impl cmp::PartialOrd for PublicKey { fn partial_cmp(&self, other: &Self) -> Option { - self.to_bytes().partial_cmp(&other.to_bytes()) + Some(self.cmp(other)) } } @@ -232,39 +221,17 @@ impl PublicKey { .unwrap_or(false) } - /// Encode the public key in compressed form, i.e. with one coordinate - /// represented by a single bit. - #[deprecated(note = "Renamed to `PublicKey::to_bytes`.")] - pub fn encode(&self) -> [u8; 33] { - self.to_bytes() - } - /// Convert the public key to a byte buffer in compressed form, i.e. with one coordinate /// represented by a single bit. pub fn to_bytes(&self) -> [u8; 33] { self.0.serialize_compressed() } - /// Encode the public key in uncompressed form. - #[deprecated(note = "Renamed to `PublicKey::to_bytes_uncompressed`.")] - pub fn encode_uncompressed(&self) -> [u8; 65] { - self.to_bytes_uncompressed() - } - /// Convert the public key to a byte buffer in uncompressed form. pub fn to_bytes_uncompressed(&self) -> [u8; 65] { self.0.serialize() } - /// Decode a public key from a byte slice in the the format produced - /// by `encode`. - #[deprecated( - note = "This method name does not follow Rust naming conventions, use `PublicKey::try_from_bytes` instead." - )] - pub fn decode(k: &[u8]) -> Result { - Self::try_from_bytes(k) - } - /// Decode a public key from a byte slice in the the format produced /// by `encode`. pub fn try_from_bytes(k: &[u8]) -> Result { @@ -279,6 +246,7 @@ mod tests { use super::*; #[test] + #[cfg(feature = "rand")] fn secp256k1_secret_from_bytes() { let sk1 = SecretKey::generate(); let mut sk_bytes = [0; 32]; diff --git a/identity/tests/keypair_api.rs b/identity/tests/keypair_api.rs index f0d16da2899..bafaf512a46 100644 --- a/identity/tests/keypair_api.rs +++ b/identity/tests/keypair_api.rs @@ -10,4 +10,5 @@ fn using_keypair(kp: Keypair) { let _ = kp.encode_protobuf(); let _ = kp.sign(&[]); let _ = kp.public(); + let _: Option<[u8; 32]> = kp.derive_secret(b"foobar"); } diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index 22b825efc91..c45b50dee64 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -5,16 +5,51 @@ version = "0.1.0" publish = false license = "MIT" +[package.metadata.release] +release = false + +[lib] +crate-type = ["cdylib", "rlib"] + [dependencies] anyhow = "1" -either = "1.8.0" -env_logger = "0.10.0" -futures = "0.3.28" -libp2p = { path = "../libp2p", features = ["websocket", "yamux", "tcp", "tokio", "ping", "noise", "tls", "dns", "rsa", "macros"] } -libp2p-quic = { workspace = true, features = ["tokio"] } +either = "1.9.0" +futures = "0.3.30" +rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +axum = "0.7" +libp2p = { path = "../libp2p", features = [ "ping", "noise", "tls", "rsa", "macros", "websocket", "tokio", "yamux", "tcp", "dns", "identify", "quic"] } +libp2p-mplex = { path = "../muxers/mplex" } +libp2p-noise = { workspace = true } +libp2p-tls = { workspace = true } libp2p-webrtc = { workspace = true, features = ["tokio"] } +mime_guess = "2.0" +redis = { version = "0.23.3", default-features = false, features = [ + "tokio-comp", +] } +rust-embed = "8.1" +serde_json = "1" +thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 +tokio = { version = "1.35.1", features = ["full"] } +tower-http = { version = "0.5", features = ["cors", "fs", "trace"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +libp2p = { path = "../libp2p", features = [ "ping", "macros", "webtransport-websys", "wasm-bindgen", "identify", "websocket-websys", "yamux", "noise"] } libp2p-mplex = { path = "../muxers/mplex" } -log = "0.4" -rand = "0.8.5" -redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } -tokio = { version = "1.28.0", features = ["full"] } +libp2p-webrtc-websys = { workspace = true } +wasm-bindgen = { version = "0.2" } +wasm-bindgen-futures = { version = "0.4" } +wasm-logger = { version = "0.2.0" } +instant = "0.1.12" +reqwest = { version = "0.11", features = ["json"] } +console_error_panic_hook = { version = "0.1.7" } +futures-timer = "3.0.2" + +[lints] +workspace = true diff --git a/interop-tests/Dockerfile b/interop-tests/Dockerfile deleted file mode 100644 index 129b981307c..00000000000 --- a/interop-tests/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -# syntax=docker/dockerfile:1.5-labs -FROM rust:1.67.0 - -# Run with access to the target cache to speed up builds -WORKDIR /workspace -ADD . . -RUN --mount=type=cache,target=./target \ - --mount=type=cache,target=/usr/local/cargo/registry \ - cargo build --package interop-tests - -RUN --mount=type=cache,target=./target \ - mv ./target/debug/ping /usr/local/bin/testplan - -ENV RUST_BACKTRACE=1 -ENTRYPOINT ["testplan"] diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium new file mode 100644 index 00000000000..5ec46e313aa --- /dev/null +++ b/interop-tests/Dockerfile.chromium @@ -0,0 +1,26 @@ +# syntax=docker/dockerfile:1.5-labs +FROM rust:1.73.0 as chef +RUN rustup target add wasm32-unknown-unknown +RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" +RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" +RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin +WORKDIR /app + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --package interop-tests --target wasm32-unknown-unknown --recipe-path recipe.json +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo chef cook --release --package interop-tests --target x86_64-unknown-linux-gnu --bin wasm_ping --recipe-path recipe.json +# Build application +COPY . . +RUN wasm-pack build --target web interop-tests +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target x86_64-unknown-linux-gnu --bin wasm_ping + +FROM selenium/standalone-chrome:115.0 +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/release/wasm_ping /usr/local/bin/testplan +ENV RUST_BACKTRACE=1 +ENTRYPOINT ["testplan"] diff --git a/interop-tests/Dockerfile.native b/interop-tests/Dockerfile.native new file mode 100644 index 00000000000..91e6cf8893e --- /dev/null +++ b/interop-tests/Dockerfile.native @@ -0,0 +1,21 @@ +# syntax=docker/dockerfile:1.5-labs +FROM lukemathwalker/cargo-chef:0.1.62-rust-1.73.0 as chef +WORKDIR /app + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo chef cook --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping --recipe-path recipe.json +# Build application +COPY . . +RUN RUSTFLAGS='-C target-feature=+crt-static' cargo build --release --package interop-tests --target $(rustc -vV | grep host | awk '{print $2}') --bin native_ping +RUN cp /app/target/$(rustc -vV | grep host | awk '{print $2}')/release/native_ping /usr/local/bin/testplan + +FROM scratch +COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan +ENV RUST_BACKTRACE=1 +ENTRYPOINT ["testplan"] diff --git a/interop-tests/README.md b/interop-tests/README.md index 6a933edf6c0..c2805ddf707 100644 --- a/interop-tests/README.md +++ b/interop-tests/README.md @@ -8,17 +8,34 @@ You can run this test locally by having a local Redis instance and by having another peer that this test can dial or listen for. For example to test that we can dial/listen for ourselves we can do the following: -1. Start redis (needed by the tests): `docker run --rm -it -p 6379:6379 - redis/redis-stack`. -2. In one terminal run the dialer: `REDIS_ADDR=localhost:6379 ip="0.0.0.0" - transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping` -3. In another terminal, run the listener: `REDIS_ADDR=localhost:6379 - ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin ping` +1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine` +2. In one terminal run the dialer: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="true" cargo run --bin native_ping` +3. In another terminal, run the listener: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="false" cargo run --bin native_ping` +If testing `transport=quic-v1`, then remove `security` and `muxer` variables from command line, because QUIC protocol comes with its own encryption and multiplexing. To test the interop with other versions do something similar, except replace one of these nodes with the other version's interop test. +# Running this test with webtransport dialer in browser + +To run the webtransport test from within the browser, you'll need the +`chromedriver` in your `$PATH`, compatible with your Chrome browser. +Firefox is not yet supported as it doesn't support all required features yet +(in v114 there is no support for certhashes). + +1. Build the wasm package: `wasm-pack build --target web` +2. Run the dialer: `redis_addr=127.0.0.1:6379 ip=0.0.0.0 transport=webtransport is_dialer=true cargo run --bin wasm_ping` + +# Running this test with webrtc-direct + +To run the webrtc-direct test, you'll need the `chromedriver` in your `$PATH`, compatible with your Chrome browser. + +1. Start redis: `docker run --rm -p 6379:6379 redis:7-alpine`. +2. Build the wasm package: `wasm-pack build --target web` +3. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` +4. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` + # Running all interop tests locally with Compose To run this test against all released libp2p versions you'll need to have the @@ -26,8 +43,8 @@ To run this test against all released libp2p versions you'll need to have the the following (from the root directory of this repository): 1. Build the image: `docker build -t rust-libp2p-head . -f interop-tests/Dockerfile`. -1. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. -1. Run the test: +2. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. +3. Run the test: ``` RUST_LIBP2P="$PWD"; (cd /libp2p/test-plans/multidim-interop/ && npm run test -- --extra-version=$RUST_LIBP2P/interop-tests/ping-version.json --name-filter="rust-libp2p-head") ``` diff --git a/interop-tests/chromium-ping-version.json b/interop-tests/chromium-ping-version.json new file mode 100644 index 00000000000..6ee0a0756d4 --- /dev/null +++ b/interop-tests/chromium-ping-version.json @@ -0,0 +1,11 @@ +{ + "id": "chromium-rust-libp2p-head", + "containerImageID": "chromium-rust-libp2p-head", + "transports": [ + { "name": "webtransport", "onlyDial": true }, + { "name": "webrtc-direct", "onlyDial": true }, + { "name": "ws", "onlyDial": true } + ], + "secureChannels": ["noise"], + "muxers": ["mplex", "yamux"] +} diff --git a/interop-tests/ping-version.json b/interop-tests/native-ping-version.json similarity index 67% rename from interop-tests/ping-version.json rename to interop-tests/native-ping-version.json index fbf858e85e8..c509f72bfd0 100644 --- a/interop-tests/ping-version.json +++ b/interop-tests/native-ping-version.json @@ -1,6 +1,6 @@ { - "id": "rust-libp2p-head", - "containerImageID": "rust-libp2p-head", + "id": "native-rust-libp2p-head", + "containerImageID": "native-rust-libp2p-head", "transports": [ "ws", "tcp", diff --git a/interop-tests/pkg/readme.md b/interop-tests/pkg/readme.md new file mode 100644 index 00000000000..28a771b5ac5 --- /dev/null +++ b/interop-tests/pkg/readme.md @@ -0,0 +1,6 @@ +# Wasm package directory + +Content of this directory should be generated with +``` +wasm pack build --target web +``` diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs new file mode 100644 index 00000000000..52000f90a86 --- /dev/null +++ b/interop-tests/src/arch.rs @@ -0,0 +1,308 @@ +// Native re-exports +#[cfg(not(target_arch = "wasm32"))] +pub(crate) use native::{build_swarm, init_logger, sleep, Instant, RedisClient}; + +// Wasm re-exports +#[cfg(target_arch = "wasm32")] +pub(crate) use wasm::{build_swarm, init_logger, sleep, Instant, RedisClient}; + +#[cfg(not(target_arch = "wasm32"))] +pub(crate) mod native { + use std::time::Duration; + + use anyhow::{bail, Context, Result}; + use futures::future::BoxFuture; + use futures::FutureExt; + use libp2p::identity::Keypair; + use libp2p::swarm::{NetworkBehaviour, Swarm}; + use libp2p::{noise, tcp, tls, yamux}; + use libp2p_mplex as mplex; + use libp2p_webrtc as webrtc; + use redis::AsyncCommands; + use tracing_subscriber::EnvFilter; + + use crate::{Muxer, SecProtocol, Transport}; + + pub(crate) type Instant = std::time::Instant; + + pub(crate) fn init_logger() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + } + + pub(crate) fn sleep(duration: Duration) -> BoxFuture<'static, ()> { + tokio::time::sleep(duration).boxed() + } + + pub(crate) async fn build_swarm( + ip: &str, + transport: Transport, + sec_protocol: Option, + muxer: Option, + behaviour_constructor: impl FnOnce(&Keypair) -> B, + ) -> Result<(Swarm, String)> { + let (swarm, addr) = match (transport, sec_protocol, muxer) { + (Transport::QuicV1, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/udp/0/quic-v1"), + ), + (Transport::Tcp, Some(SecProtocol::Tls), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + tls::Config::new, + mplex::MplexConfig::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Tcp, Some(SecProtocol::Tls), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + tls::Config::new, + yamux::Config::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Tcp, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + mplex::MplexConfig::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Tcp, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + )? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0"), + ), + (Transport::Ws, Some(SecProtocol::Tls), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(tls::Config::new, mplex::MplexConfig::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::Ws, Some(SecProtocol::Tls), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(tls::Config::new, yamux::Config::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(noise::Config::new, mplex::MplexConfig::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_websocket(noise::Config::new, yamux::Config::default) + .await? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/ws"), + ), + (Transport::WebRtcDirect, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|key| { + Ok(webrtc::tokio::Transport::new( + key.clone(), + webrtc::tokio::Certificate::generate(&mut rand::thread_rng())?, + )) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/udp/0/webrtc-direct"), + ), + (t, s, m) => bail!("Unsupported combination: {t:?} {s:?} {m:?}"), + }; + Ok((swarm, addr)) + } + + pub(crate) struct RedisClient(redis::Client); + + impl RedisClient { + pub(crate) fn new(redis_addr: &str) -> Result { + Ok(Self( + redis::Client::open(redis_addr).context("Could not connect to redis")?, + )) + } + + pub(crate) async fn blpop(&self, key: &str, timeout: u64) -> Result> { + let mut conn = self.0.get_async_connection().await?; + Ok(conn.blpop(key, timeout as usize).await?) + } + + pub(crate) async fn rpush(&self, key: &str, value: String) -> Result<()> { + let mut conn = self.0.get_async_connection().await?; + conn.rpush(key, value).await?; + Ok(()) + } + } +} + +#[cfg(target_arch = "wasm32")] +pub(crate) mod wasm { + use anyhow::{bail, Context, Result}; + use futures::future::{BoxFuture, FutureExt}; + use libp2p::core::upgrade::Version; + use libp2p::identity::Keypair; + use libp2p::swarm::{NetworkBehaviour, Swarm}; + use libp2p::{noise, websocket_websys, webtransport_websys, yamux, Transport as _}; + use libp2p_mplex as mplex; + use libp2p_webrtc_websys as webrtc_websys; + use std::time::Duration; + + use crate::{BlpopRequest, Muxer, SecProtocol, Transport}; + + pub(crate) type Instant = instant::Instant; + + pub(crate) fn init_logger() { + console_error_panic_hook::set_once(); + wasm_logger::init(wasm_logger::Config::default()); + } + + pub(crate) fn sleep(duration: Duration) -> BoxFuture<'static, ()> { + futures_timer::Delay::new(duration).boxed() + } + + pub(crate) async fn build_swarm( + ip: &str, + transport: Transport, + sec_protocol: Option, + muxer: Option, + behaviour_constructor: impl FnOnce(&Keypair) -> B, + ) -> Result<(Swarm, String)> { + Ok(match (transport, sec_protocol, muxer) { + (Transport::Webtransport, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + webtransport_websys::Transport::new(webtransport_websys::Config::new( + &local_key, + )) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/udp/0/quic/webtransport"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Mplex)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + Ok(websocket_websys::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate( + noise::Config::new(&local_key) + .context("failed to initialise noise")?, + ) + .multiplex(mplex::MplexConfig::new())) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/wss"), + ), + (Transport::Ws, Some(SecProtocol::Noise), Some(Muxer::Yamux)) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + Ok(websocket_websys::Transport::default() + .upgrade(Version::V1Lazy) + .authenticate( + noise::Config::new(&local_key) + .context("failed to initialise noise")?, + ) + .multiplex(yamux::Config::default())) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/tcp/0/wss"), + ), + (Transport::WebRtcDirect, None, None) => ( + libp2p::SwarmBuilder::with_new_identity() + .with_wasm_bindgen() + .with_other_transport(|local_key| { + webrtc_websys::Transport::new(webrtc_websys::Config::new(&local_key)) + })? + .with_behaviour(behaviour_constructor)? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) + .build(), + format!("/ip4/{ip}/udp/0/webrtc-direct"), + ), + (t, s, m) => bail!("Unsupported combination: {t:?} {s:?} {m:?}"), + }) + } + + pub(crate) struct RedisClient(String); + + impl RedisClient { + pub(crate) fn new(base_url: &str) -> Result { + Ok(Self(base_url.to_owned())) + } + + pub(crate) async fn blpop(&self, key: &str, timeout: u64) -> Result> { + let res = reqwest::Client::new() + .post(&format!("http://{}/blpop", self.0)) + .json(&BlpopRequest { + key: key.to_owned(), + timeout, + }) + .send() + .await? + .json() + .await?; + Ok(res) + } + + pub(crate) async fn rpush(&self, _: &str, _: String) -> Result<()> { + bail!("unimplemented") + } + } +} diff --git a/interop-tests/src/bin/config/mod.rs b/interop-tests/src/bin/config/mod.rs new file mode 100644 index 00000000000..dff297ef412 --- /dev/null +++ b/interop-tests/src/bin/config/mod.rs @@ -0,0 +1,44 @@ +use std::env; + +use anyhow::{Context, Result}; + +#[derive(Debug, Clone)] +pub(crate) struct Config { + pub(crate) transport: String, + pub(crate) sec_protocol: Option, + pub(crate) muxer: Option, + pub(crate) ip: String, + pub(crate) is_dialer: bool, + pub(crate) test_timeout: u64, + pub(crate) redis_addr: String, +} + +impl Config { + pub(crate) fn from_env() -> Result { + let transport = + env::var("transport").context("transport environment variable is not set")?; + let ip = env::var("ip").context("ip environment variable is not set")?; + let is_dialer = env::var("is_dialer") + .unwrap_or_else(|_| "true".into()) + .parse::()?; + let test_timeout = env::var("test_timeout_seconds") + .unwrap_or_else(|_| "180".into()) + .parse::()?; + let redis_addr = env::var("redis_addr") + .map(|addr| format!("redis://{addr}")) + .unwrap_or_else(|_| "redis://redis:6379".into()); + + let sec_protocol = env::var("security").ok(); + let muxer = env::var("muxer").ok(); + + Ok(Self { + transport, + sec_protocol, + muxer, + ip, + is_dialer, + test_timeout, + redis_addr, + }) + } +} diff --git a/interop-tests/src/bin/native_ping.rs b/interop-tests/src/bin/native_ping.rs new file mode 100644 index 00000000000..2fb6ce12e29 --- /dev/null +++ b/interop-tests/src/bin/native_ping.rs @@ -0,0 +1,23 @@ +use anyhow::Result; + +mod config; + +#[tokio::main] +async fn main() -> Result<()> { + let config = config::Config::from_env()?; + + let report = interop_tests::run_test( + &config.transport, + &config.ip, + config.is_dialer, + config.test_timeout, + &config.redis_addr, + config.sec_protocol, + config.muxer, + ) + .await?; + + println!("{}", serde_json::to_string(&report)?); + + Ok(()) +} diff --git a/interop-tests/src/bin/ping.rs b/interop-tests/src/bin/ping.rs deleted file mode 100644 index 042be8b3eb8..00000000000 --- a/interop-tests/src/bin/ping.rs +++ /dev/null @@ -1,267 +0,0 @@ -use std::env; -use std::str::FromStr; -use std::time::{Duration, Instant}; - -use anyhow::{bail, Context, Result}; -use either::Either; -use env_logger::{Env, Target}; -use futures::StreamExt; -use libp2p::core::muxing::StreamMuxerBox; -use libp2p::core::upgrade::Version; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent}; -use libp2p::websocket::WsConfig; -use libp2p::{ - identity, noise, ping, swarm::SwarmBuilder, tcp, tls, yamux, Multiaddr, PeerId, Transport as _, -}; -use libp2p_mplex as mplex; -use libp2p_quic as quic; -use libp2p_webrtc as webrtc; -use redis::AsyncCommands; - -#[tokio::main] -async fn main() -> Result<()> { - let local_key = identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - let transport_param: Transport = from_env("transport")?; - - let ip = env::var("ip").context("ip environment variable is not set")?; - - let is_dialer = env::var("is_dialer") - .unwrap_or_else(|_| "true".into()) - .parse::()?; - - let test_timeout = env::var("test_timeout_seconds") - .unwrap_or_else(|_| "180".into()) - .parse::()?; - - let redis_addr = env::var("redis_addr") - .map(|addr| format!("redis://{addr}")) - .unwrap_or_else(|_| "redis://redis:6379".into()); - - let client = redis::Client::open(redis_addr).context("Could not connect to redis")?; - - // Build the transport from the passed ENV var. - let (boxed_transport, local_addr) = match (transport_param, from_env("security")) { - (Transport::QuicV1, _) => ( - quic::tokio::Transport::new(quic::Config::new(&local_key)) - .map(|(p, c), _| (p, StreamMuxerBox::new(c))) - .boxed(), - format!("/ip4/{ip}/udp/0/quic-v1"), - ), - (Transport::Tcp, Ok(SecProtocol::Tls)) => ( - tcp::tokio::Transport::new(tcp::Config::new()) - .upgrade(Version::V1Lazy) - .authenticate(tls::Config::new(&local_key).context("failed to initialise tls")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), - format!("/ip4/{ip}/tcp/0"), - ), - (Transport::Tcp, Ok(SecProtocol::Noise)) => ( - tcp::tokio::Transport::new(tcp::Config::new()) - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key).context("failed to intialise noise")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), - format!("/ip4/{ip}/tcp/0"), - ), - (Transport::Ws, Ok(SecProtocol::Tls)) => ( - WsConfig::new(tcp::tokio::Transport::new(tcp::Config::new())) - .upgrade(Version::V1Lazy) - .authenticate(tls::Config::new(&local_key).context("failed to initialise tls")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), - format!("/ip4/{ip}/tcp/0/ws"), - ), - (Transport::Ws, Ok(SecProtocol::Noise)) => ( - WsConfig::new(tcp::tokio::Transport::new(tcp::Config::new())) - .upgrade(Version::V1Lazy) - .authenticate(noise::Config::new(&local_key).context("failed to intialise noise")?) - .multiplex(muxer_protocol_from_env()?) - .timeout(Duration::from_secs(5)) - .boxed(), - format!("/ip4/{ip}/tcp/0/ws"), - ), - (Transport::WebRtcDirect, _) => ( - webrtc::tokio::Transport::new( - local_key, - webrtc::tokio::Certificate::generate(&mut rand::thread_rng())?, - ) - .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) - .boxed(), - format!("/ip4/{ip}/udp/0/webrtc-direct"), - ), - (Transport::Tcp, Err(_)) => bail!("Missing security protocol for TCP transport"), - (Transport::Ws, Err(_)) => bail!("Missing security protocol for Websocket transport"), - }; - - let mut swarm = SwarmBuilder::with_tokio_executor( - boxed_transport, - Behaviour { - ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), - keep_alive: keep_alive::Behaviour, - }, - local_peer_id, - ) - .build(); - - let mut conn = client.get_async_connection().await?; - - log::info!("Running ping test: {}", swarm.local_peer_id()); - env_logger::Builder::from_env(Env::default().default_filter_or("info")) - .target(Target::Stdout) - .init(); - - log::info!( - "Test instance, listening for incoming connections on: {:?}.", - local_addr - ); - let id = swarm.listen_on(local_addr.parse()?)?; - - // Run a ping interop test. Based on `is_dialer`, either dial the address - // retrieved via `listenAddr` key over the redis connection. Or wait to be pinged and have - // `dialerDone` key ready on the redis connection. - if is_dialer { - let result: Vec = conn.blpop("listenerAddr", test_timeout as usize).await?; - let other = result - .get(1) - .context("Failed to wait for listener to be ready")?; - - let handshake_start = Instant::now(); - - swarm.dial(other.parse::()?)?; - log::info!("Test instance, dialing multiaddress on: {}.", other); - - let rtt = loop { - if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { - peer: _, - result: Ok(ping::Success::Ping { rtt }), - }))) = swarm.next().await - { - log::info!("Ping successful: {rtt:?}"); - break rtt.as_millis() as f32; - } - }; - - let handshake_plus_ping = handshake_start.elapsed().as_millis() as f32; - println!( - r#"{{"handshakePlusOneRTTMillis": {handshake_plus_ping:.1}, "pingRTTMilllis": {rtt:.1}}}"# - ); - } else { - loop { - if let Some(SwarmEvent::NewListenAddr { - listener_id, - address, - }) = swarm.next().await - { - if address.to_string().contains("127.0.0.1") { - continue; - } - if listener_id == id { - let ma = format!("{address}/p2p/{local_peer_id}"); - conn.rpush("listenerAddr", ma).await?; - break; - } - } - } - - // Drive Swarm in the background while we await for `dialerDone` to be ready. - tokio::spawn(async move { - loop { - swarm.next().await; - } - }); - tokio::time::sleep(Duration::from_secs(test_timeout)).await; - bail!("Test should have been killed by the test runner!"); - } - - Ok(()) -} - -fn muxer_protocol_from_env() -> Result> { - Ok(match from_env("muxer")? { - Muxer::Yamux => Either::Left(yamux::Config::default()), - Muxer::Mplex => Either::Right(mplex::MplexConfig::new()), - }) -} - -/// Supported transports by rust-libp2p. -#[derive(Clone, Debug)] -pub enum Transport { - Tcp, - QuicV1, - WebRtcDirect, - Ws, -} - -impl FromStr for Transport { - type Err = anyhow::Error; - - fn from_str(s: &str) -> std::result::Result { - Ok(match s { - "tcp" => Self::Tcp, - "quic-v1" => Self::QuicV1, - "webrtc-direct" => Self::WebRtcDirect, - "ws" => Self::Ws, - other => bail!("unknown transport {other}"), - }) - } -} - -/// Supported stream multiplexers by rust-libp2p. -#[derive(Clone, Debug)] -pub enum Muxer { - Mplex, - Yamux, -} - -impl FromStr for Muxer { - type Err = anyhow::Error; - - fn from_str(s: &str) -> std::result::Result { - Ok(match s { - "mplex" => Self::Mplex, - "yamux" => Self::Yamux, - other => bail!("unknown muxer {other}"), - }) - } -} - -/// Supported security protocols by rust-libp2p. -#[derive(Clone, Debug)] -pub enum SecProtocol { - Noise, - Tls, -} - -impl FromStr for SecProtocol { - type Err = anyhow::Error; - - fn from_str(s: &str) -> std::result::Result { - Ok(match s { - "noise" => Self::Noise, - "tls" => Self::Tls, - other => bail!("unknown security protocol {other}"), - }) - } -} - -#[derive(NetworkBehaviour)] -struct Behaviour { - ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, -} - -/// Helper function to get a ENV variable into an test parameter like `Transport`. -pub fn from_env(env_var: &str) -> Result -where - T: FromStr, -{ - env::var(env_var) - .with_context(|| format!("{env_var} environment variable is not set"))? - .parse() - .map_err(Into::into) -} diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs new file mode 100644 index 00000000000..e1bb2ea49fb --- /dev/null +++ b/interop-tests/src/bin/wasm_ping.rs @@ -0,0 +1,239 @@ +#![allow(non_upper_case_globals)] + +use std::future::IntoFuture; +use std::process::Stdio; +use std::time::Duration; + +use anyhow::{bail, Context, Result}; +use axum::http::{header, Uri}; +use axum::response::{Html, IntoResponse, Response}; +use axum::routing::get; +use axum::{extract::State, http::StatusCode, routing::post, Json, Router}; +use redis::{AsyncCommands, Client}; +use thirtyfour::prelude::*; +use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::net::TcpListener; +use tokio::process::Child; +use tokio::sync::mpsc; +use tower_http::cors::CorsLayer; +use tower_http::trace::TraceLayer; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + +use interop_tests::{BlpopRequest, Report}; + +mod config; + +const BIND_ADDR: &str = "127.0.0.1:8080"; + +/// Embedded Wasm package +/// +/// Make sure to build the wasm with `wasm-pack build --target web` +#[derive(rust_embed::RustEmbed)] +#[folder = "pkg"] +struct WasmPackage; + +#[derive(Clone)] +struct TestState { + redis_client: Client, + config: config::Config, + results_tx: mpsc::Sender>, +} + +#[tokio::main] +async fn main() -> Result<()> { + // start logging + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + // read env variables + let config = config::Config::from_env()?; + let test_timeout = Duration::from_secs(config.test_timeout); + + // create a redis client + let redis_client = + Client::open(config.redis_addr.as_str()).context("Could not connect to redis")?; + let (results_tx, mut results_rx) = mpsc::channel(1); + + let state = TestState { + redis_client, + config, + results_tx, + }; + + // create a wasm-app service + let app = Router::new() + // Redis proxy + .route("/blpop", post(redis_blpop)) + // Report tests status + .route("/results", post(post_results)) + // Wasm ping test trigger + .route("/", get(serve_index_html)) + // Wasm app static files + .fallback(serve_wasm_pkg) + // Middleware + .layer(CorsLayer::very_permissive()) + .layer(TraceLayer::new_for_http()) + .with_state(state); + + // Run the service in background + tokio::spawn(axum::serve(TcpListener::bind(BIND_ADDR).await?, app).into_future()); + + // Start executing the test in a browser + let (mut chrome, driver) = open_in_browser().await?; + + // Wait for the outcome to be reported + let test_result = match tokio::time::timeout(test_timeout, results_rx.recv()).await { + Ok(received) => received.unwrap_or(Err("Results channel closed".to_owned())), + Err(_) => Err("Test timed out".to_owned()), + }; + + // Close the browser after we got the results + driver.quit().await?; + chrome.kill().await?; + + match test_result { + Ok(report) => println!("{}", serde_json::to_string(&report)?), + Err(error) => bail!("Tests failed: {error}"), + } + + Ok(()) +} + +async fn open_in_browser() -> Result<(Child, WebDriver)> { + // start a webdriver process + // currently only the chromedriver is supported as firefox doesn't + // have support yet for the certhashes + let chromedriver = if cfg!(windows) { + "chromedriver.cmd" + } else { + "chromedriver" + }; + let mut chrome = tokio::process::Command::new(chromedriver) + .arg("--port=45782") + .stdout(Stdio::piped()) + .spawn()?; + // read driver's stdout + let driver_out = chrome + .stdout + .take() + .context("No stdout found for webdriver")?; + // wait for the 'ready' message + let mut reader = BufReader::new(driver_out).lines(); + while let Some(line) = reader.next_line().await? { + if line.contains("ChromeDriver was started successfully.") { + break; + } + } + + // run a webdriver client + let mut caps = DesiredCapabilities::chrome(); + caps.set_headless()?; + let driver = WebDriver::new("http://localhost:45782", caps).await?; + // go to the wasm test service + driver.goto(format!("http://{BIND_ADDR}")).await?; + + Ok((chrome, driver)) +} + +/// Redis proxy handler. +/// `blpop` is currently the only redis client method used in a ping dialer. +async fn redis_blpop( + state: State, + request: Json, +) -> Result>, StatusCode> { + let client = state.0.redis_client; + let mut conn = client.get_async_connection().await.map_err(|e| { + tracing::warn!("Failed to connect to redis: {e}"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + let res = conn + .blpop(&request.key, request.timeout as usize) + .await + .map_err(|e| { + tracing::warn!( + key=%request.key, + timeout=%request.timeout, + "Failed to get list elem key within timeout: {e}" + ); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + Ok(Json(res)) +} + +/// Receive test results +async fn post_results( + state: State, + request: Json>, +) -> Result<(), StatusCode> { + state.0.results_tx.send(request.0).await.map_err(|_| { + tracing::error!("Failed to send results"); + StatusCode::INTERNAL_SERVER_ERROR + }) +} + +/// Serve the main page which loads our javascript +async fn serve_index_html(state: State) -> Result { + let config::Config { + transport, + ip, + is_dialer, + test_timeout, + sec_protocol, + muxer, + .. + } = state.0.config; + + let sec_protocol = sec_protocol + .map(|p| format!(r#""{p}""#)) + .unwrap_or("null".to_owned()); + let muxer = muxer + .map(|p| format!(r#""{p}""#)) + .unwrap_or("null".to_owned()); + + Ok(Html(format!( + r#" + + + + + libp2p ping test + + + + + + "# + ))) +} + +async fn serve_wasm_pkg(uri: Uri) -> Result { + let path = uri.path().trim_start_matches('/').to_string(); + if let Some(content) = WasmPackage::get(&path) { + let mime = mime_guess::from_path(&path).first_or_octet_stream(); + Ok(Response::builder() + .header(header::CONTENT_TYPE, mime.as_ref()) + .body(content.data.into()) + .unwrap()) + } else { + Err(StatusCode::NOT_FOUND) + } +} diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs new file mode 100644 index 00000000000..0154bec51a4 --- /dev/null +++ b/interop-tests/src/lib.rs @@ -0,0 +1,274 @@ +use std::str::FromStr; +use std::time::Duration; + +use anyhow::{bail, Context, Result}; +use futures::{FutureExt, StreamExt}; +use libp2p::identity::Keypair; +use libp2p::swarm::SwarmEvent; +use libp2p::{identify, ping, swarm::NetworkBehaviour, Multiaddr}; +#[cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; + +mod arch; + +use arch::{build_swarm, init_logger, Instant, RedisClient}; + +pub async fn run_test( + transport: &str, + ip: &str, + is_dialer: bool, + test_timeout_seconds: u64, + redis_addr: &str, + sec_protocol: Option, + muxer: Option, +) -> Result { + init_logger(); + + let test_timeout = Duration::from_secs(test_timeout_seconds); + let transport = transport.parse().context("Couldn't parse transport")?; + let sec_protocol = sec_protocol + .map(|sec_protocol| { + sec_protocol + .parse() + .context("Couldn't parse security protocol") + }) + .transpose()?; + let muxer = muxer + .map(|sec_protocol| { + sec_protocol + .parse() + .context("Couldn't parse muxer protocol") + }) + .transpose()?; + + let redis_client = RedisClient::new(redis_addr).context("Could not connect to redis")?; + + // Build the transport from the passed ENV var. + let (mut swarm, local_addr) = + build_swarm(ip, transport, sec_protocol, muxer, build_behaviour).await?; + + tracing::info!(local_peer=%swarm.local_peer_id(), "Running ping test"); + + // See https://github.com/libp2p/rust-libp2p/issues/4071. + #[cfg(not(target_arch = "wasm32"))] + let maybe_id = if transport == Transport::WebRtcDirect { + Some(swarm.listen_on(local_addr.parse()?)?) + } else { + None + }; + #[cfg(target_arch = "wasm32")] + let maybe_id = None; + + // Run a ping interop test. Based on `is_dialer`, either dial the address + // retrieved via `listenAddr` key over the redis connection. Or wait to be pinged and have + // `dialerDone` key ready on the redis connection. + match is_dialer { + true => { + let result: Vec = redis_client + .blpop("listenerAddr", test_timeout.as_secs()) + .await?; + let other = result + .get(1) + .context("Failed to wait for listener to be ready")?; + + let handshake_start = Instant::now(); + + swarm.dial(other.parse::()?)?; + tracing::info!(listener=%other, "Test instance, dialing multiaddress"); + + let rtt = loop { + if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + result: Ok(rtt), + .. + }))) = swarm.next().await + { + tracing::info!(?rtt, "Ping successful"); + break rtt.as_micros() as f32 / 1000.; + } + }; + + let handshake_plus_ping = handshake_start.elapsed().as_micros() as f32 / 1000.; + Ok(Report { + handshake_plus_one_rtt_millis: handshake_plus_ping, + ping_rtt_millis: rtt, + }) + } + false => { + // Listen if we haven't done so already. + // This is a hack until https://github.com/libp2p/rust-libp2p/issues/4071 is fixed at which point we can do this unconditionally here. + let id = match maybe_id { + None => swarm.listen_on(local_addr.parse()?)?, + Some(id) => id, + }; + + tracing::info!( + address=%local_addr, + "Test instance, listening for incoming connections on address" + ); + + loop { + if let Some(SwarmEvent::NewListenAddr { + listener_id, + address, + }) = swarm.next().await + { + if address.to_string().contains("127.0.0.1") { + continue; + } + if listener_id == id { + let ma = format!("{address}/p2p/{}", swarm.local_peer_id()); + redis_client.rpush("listenerAddr", ma.clone()).await?; + break; + } + } + } + + // Drive Swarm while we await for `dialerDone` to be ready. + futures::future::select( + async move { + loop { + let event = swarm.next().await.unwrap(); + + tracing::debug!("{event:?}"); + } + } + .boxed(), + arch::sleep(test_timeout), + ) + .await; + + // The loop never ends so if we get here, we hit the timeout. + bail!("Test should have been killed by the test runner!"); + } + } +} + +#[cfg(target_arch = "wasm32")] +#[wasm_bindgen] +pub async fn run_test_wasm( + transport: &str, + ip: &str, + is_dialer: bool, + test_timeout_secs: u64, + base_url: &str, + sec_protocol: Option, + muxer: Option, +) -> Result<(), JsValue> { + let result = run_test( + transport, + ip, + is_dialer, + test_timeout_secs, + base_url, + sec_protocol, + muxer, + ) + .await; + tracing::info!(?result, "Sending test result"); + reqwest::Client::new() + .post(&format!("http://{}/results", base_url)) + .json(&result.map_err(|e| e.to_string())) + .send() + .await? + .error_for_status() + .map_err(|e| format!("Sending test result failed: {e}"))?; + + Ok(()) +} + +/// A request to redis proxy that will pop the value from the list +/// and will wait for it being inserted until a timeout is reached. +#[derive(serde::Deserialize, serde::Serialize)] +pub struct BlpopRequest { + pub key: String, + pub timeout: u64, +} + +/// A report generated by the test +#[derive(Copy, Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct Report { + #[serde(rename = "handshakePlusOneRTTMillis")] + handshake_plus_one_rtt_millis: f32, + #[serde(rename = "pingRTTMilllis")] + ping_rtt_millis: f32, +} + +/// Supported transports by rust-libp2p. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Transport { + Tcp, + QuicV1, + WebRtcDirect, + Ws, + Webtransport, +} + +impl FromStr for Transport { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "tcp" => Self::Tcp, + "quic-v1" => Self::QuicV1, + "webrtc-direct" => Self::WebRtcDirect, + "ws" => Self::Ws, + "webtransport" => Self::Webtransport, + other => bail!("unknown transport {other}"), + }) + } +} + +/// Supported stream multiplexers by rust-libp2p. +#[derive(Clone, Debug)] +pub enum Muxer { + Mplex, + Yamux, +} + +impl FromStr for Muxer { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "mplex" => Self::Mplex, + "yamux" => Self::Yamux, + other => bail!("unknown muxer {other}"), + }) + } +} + +/// Supported security protocols by rust-libp2p. +#[derive(Clone, Debug)] +pub enum SecProtocol { + Noise, + Tls, +} + +impl FromStr for SecProtocol { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "noise" => Self::Noise, + "tls" => Self::Tls, + other => bail!("unknown security protocol {other}"), + }) + } +} + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + ping: ping::Behaviour, + identify: identify::Behaviour, +} + +pub(crate) fn build_behaviour(key: &Keypair) -> Behaviour { + Behaviour { + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))), + // Need to include identify until https://github.com/status-im/nim-libp2p/issues/924 is resolved. + identify: identify::Behaviour::new(identify::Config::new( + "/interop-tests".to_owned(), + key.public(), + )), + } +} diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index bc47bcb0827..80b32c35643 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,4 +1,81 @@ -## 0.52.0 - unreleased +## 0.53.2 + +- Allow `SwarmBuilder::with_bandwidth_metrics` after `SwarmBuilder::with_websocket`. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + +## 0.53.1 + +- Allow `SwarmBuilder::with_quic_config` to be called without `with_tcp` first. + See [PR 4821](https://github.com/libp2p/rust-libp2p/pull/4821). +- Introduce `SwarmBuilder::with_dns_config`. + See [PR 4808](https://github.com/libp2p/rust-libp2p/pull/4808). + +## 0.53.0 + +- Raise MSRV to 1.73. + See [PR 4692](https://github.com/libp2p/rust-libp2p/pull/4692). +- Remove deprecated `libp2p-wasm-ext`. + Users should use `libp2p-websocket-websys` instead. + See [PR 4694](https://github.com/libp2p/rust-libp2p/pull/4694). +- Remove deprecated `libp2p-deflate`. + See [issue 4522](https://github.com/libp2p/rust-libp2p/issues/4522) for details. + See [PR 4729](https://github.com/libp2p/rust-libp2p/pull/4729). +- Remove deprecated `development_transport`. + Use `libp2p::SwarmBuilder` instead. + See [PR 4732](https://github.com/libp2p/rust-libp2p/pull/4732). +- Introduce `SwarmBuilder::with_bandwidth_metrics` exposing Prometheus bandwidth metrics per transport protocol stack and direction (in-/ outbound). + Deprecate `Transport::with_bandwidth_logging` and `SwarmBuilder::with_bandwidth_logging` in favor of the new `SwarmBuilder::with_bandwidth_metrics`. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.52.4 + +- Introduce `libp2p::websocket_websys` module behind `websocket-websys` feature flag. + This supersedes the existing `libp2p::wasm_ext` module which is now deprecated. + See [PR 3679]. + +- Introduce a new `libp2p::SwarmBuilder` in favor of the now deprecated `libp2p::swarm::SwarmBuilder`. + See `libp2p::SwarmBuilder` docs on how to use the new builder. + Also see [PR 4120]. + +- Update `libp2p-identity` version to 0.2.6. + Under the hood, we feature-flagged `libp2p-identity`'s `rand` dependency but it is enabled by default when using `libp2p`. + See [PR 4349]. + +[PR 3679]: https://github.com/libp2p/rust-libp2p/pull/3679 +[PR 4120]: https://github.com/libp2p/rust-libp2p/pull/4120 +[PR 4349]: https://github.com/libp2p/rust-libp2p/pull/4349 + +## 0.52.3 + +- Add `libp2p-quic` stable release. + +## 0.52.2 + +- Include gossipsub when compiling for wasm. + See [PR 4217]. + +- Add `json` feature which exposes `request_response::json`. + See [PR 4188]. + +- Add support for UPnP via the IGD protocol. + See [PR 4156]. + +- Add `libp2p-memory-connection-limits` providing memory usage based connection limit configurations. + See [PR 4281]. + +[PR 4188]: https://github.com/libp2p/rust-libp2p/pull/4188 +[PR 4156]: https://github.com/libp2p/rust-libp2p/pull/4156 +[PR 4217]: https://github.com/libp2p/rust-libp2p/pull/4217 +[PR 4281]: https://github.com/libp2p/rust-libp2p/pull/4281 + +## 0.52.1 + +- Add `libp2p-webtransport-websys` providing WebTransport for WASM environments. + See [PR 4015]. + +[PR 4015]: https://github.com/libp2p/rust-libp2p/pull/4015 + +## 0.52.0 - Raise MSRV to 1.65. See [PR 3715]. @@ -10,14 +87,32 @@ We encourage users to use `StreamProtocol` when implementing `UpgradeInfo`. See [PR 3746]. -[PR 3746]: https://github.com/libp2p/rust-libp2p/pull/3746 +- Rename `NetworkBehaviour::OutEvent` to `NetworkBehaviour::ToSwarm`, `ConnectionHandler::InEvent` to `ConnectionHandler::FromBehaviour`, `ConnectionHandler::OutEvent` to `ConnectionHandler::ToBehaviour`. See [PR 3848]. + +- Remove deprecated `mplex` module. + You can still depend on `libp2p-mplex` directly but we strongly encourage to migrate to `yamux`. + This also removes `mplex` from the `development_transport` and `tokio_development_transport` functions. + See [PR 3920]. + +- Remove `libp2p-perf` protocol. To use `libp2p-perf` one needs to import it directly. + See [PR 3990]. + +- Remove `libp2p-quic` and `libp2p-webrtc` protocols. + These are in alpha status and should be depended on directly. + See [PR 4041]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3746]: https://github.com/libp2p/rust-libp2p/pull/3746 +[PR 3848]: https://github.com/libp2p/rust-libp2p/pull/3848 +[PR 3920]: https://github.com/libp2p/rust-libp2p/pull/3920 +[PR 3990]: https://github.com/libp2p/rust-libp2p/pull/3990 +[PR 4041]: https://github.com/libp2p/rust-libp2p/pull/4041 ## 0.51.3 - Deprecate the `mplex` feature. -The recommended baseline stream multiplexer is `yamux`. -See [PR 3689]. + The recommended baseline stream multiplexer is `yamux`. + See [PR 3689]. [PR 3689]: https://github.com/libp2p/rust-libp2p/pull/3689 diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index b4fa79d6343..9dc9667be10 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "libp2p" edition = "2021" -rust-version = "1.65.0" +rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.52.0" +version = "0.53.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,21 +14,21 @@ categories = ["network-programming", "asynchronous"] full = [ "async-std", "autonat", + "cbor", "dcutr", - "deflate", "dns", "ecdsa", "ed25519", "floodsub", "gossipsub", "identify", + "json", "kad", "macros", "mdns", + "memory-connection-limits", "metrics", - "mplex", "noise", - "perf", "ping", "plaintext", "pnet", @@ -44,30 +44,30 @@ full = [ "tokio", "uds", "wasm-bindgen", - "wasm-ext", - "wasm-ext-websocket", - "webrtc", + "websocket-websys", "websocket", + "webtransport-websys", "yamux", + "upnp", ] -async-std = ["libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std", "libp2p-quic?/async-std"] +async-std = [ "libp2p-swarm/async-std", "libp2p-mdns?/async-io", "libp2p-tcp?/async-io", "libp2p-dns?/async-std", "libp2p-quic?/async-std",] autonat = ["dep:libp2p-autonat"] +cbor = ["libp2p-request-response?/cbor"] dcutr = ["dep:libp2p-dcutr", "libp2p-metrics?/dcutr"] -deflate = ["dep:libp2p-deflate"] dns = ["dep:libp2p-dns"] ecdsa = ["libp2p-identity/ecdsa"] ed25519 = ["libp2p-identity/ed25519"] floodsub = ["dep:libp2p-floodsub"] gossipsub = ["dep:libp2p-gossipsub", "libp2p-metrics?/gossipsub"] identify = ["dep:libp2p-identify", "libp2p-metrics?/identify"] +json = ["libp2p-request-response?/json"] kad = ["dep:libp2p-kad", "libp2p-metrics?/kad"] macros = ["libp2p-swarm/macros"] mdns = ["dep:libp2p-mdns"] +memory-connection-limits = ["dep:libp2p-memory-connection-limits"] metrics = ["dep:libp2p-metrics"] -mplex = ["dep:libp2p-mplex"] noise = ["dep:libp2p-noise"] -perf = ["dep:libp2p-perf"] ping = ["dep:libp2p-ping", "libp2p-metrics?/ping"] plaintext = ["dep:libp2p-plaintext"] pnet = ["dep:libp2p-pnet"] @@ -80,21 +80,24 @@ secp256k1 = ["libp2p-identity/secp256k1"] serde = ["libp2p-core/serde", "libp2p-kad?/serde", "libp2p-gossipsub?/serde"] tcp = ["dep:libp2p-tcp"] tls = ["dep:libp2p-tls"] -tokio = ["libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio", "libp2p-webrtc?/tokio"] +tokio = [ "libp2p-swarm/tokio", "libp2p-mdns?/tokio", "libp2p-tcp?/tokio", "libp2p-dns?/tokio", "libp2p-quic?/tokio", "libp2p-upnp?/tokio"] uds = ["dep:libp2p-uds"] -wasm-bindgen = ["futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen"] -wasm-ext = ["dep:libp2p-wasm-ext"] -wasm-ext-websocket = ["wasm-ext", "libp2p-wasm-ext?/websocket"] -webrtc = ["dep:libp2p-webrtc", "libp2p-webrtc?/pem"] +wasm-bindgen = [ "futures-timer/wasm-bindgen", "instant/wasm-bindgen", "getrandom/js", "libp2p-swarm/wasm-bindgen", "libp2p-gossipsub?/wasm-bindgen",] +websocket-websys = ["dep:libp2p-websocket-websys"] websocket = ["dep:libp2p-websocket"] +webtransport-websys = ["dep:libp2p-webtransport-websys"] yamux = ["dep:libp2p-yamux"] +upnp = ["dep:libp2p-upnp"] [dependencies] bytes = "1" +either = "1.9.0" futures = "0.3.26" futures-timer = "3.0.2" # Explicit dependency to be used in `wasm-bindgen` feature -getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature -instant = "0.1.11" # Explicit dependency to be used in `wasm-bindgen` feature +getrandom = "0.2.3" # Explicit dependency to be used in `wasm-bindgen` feature +instant = "0.1.12" # Explicit dependency to be used in `wasm-bindgen` feature +# TODO feature flag? +rw-stream-sink = { workspace = true } libp2p-allow-block-list = { workspace = true } libp2p-autonat = { workspace = true, optional = true } @@ -102,11 +105,11 @@ libp2p-connection-limits = { workspace = true } libp2p-core = { workspace = true } libp2p-dcutr = { workspace = true, optional = true } libp2p-floodsub = { workspace = true, optional = true } +libp2p-gossipsub = { workspace = true, optional = true } libp2p-identify = { workspace = true, optional = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-kad = { workspace = true, optional = true } libp2p-metrics = { workspace = true, optional = true } -libp2p-mplex = { workspace = true, optional = true } libp2p-noise = { workspace = true, optional = true } libp2p-ping = { workspace = true, optional = true } libp2p-plaintext = { workspace = true, optional = true } @@ -115,38 +118,34 @@ libp2p-relay = { workspace = true, optional = true } libp2p-rendezvous = { workspace = true, optional = true } libp2p-request-response = { workspace = true, optional = true } libp2p-swarm = { workspace = true } -libp2p-wasm-ext = { workspace = true, optional = true } +libp2p-websocket-websys = { workspace = true, optional = true } +libp2p-webtransport-websys = { workspace = true, optional = true } libp2p-yamux = { workspace = true, optional = true } - -multiaddr = { version = "0.17.0" } +multiaddr = { workspace = true } pin-project = "1.0.0" +thiserror = "1.0" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -libp2p-deflate = { workspace = true, optional = true } libp2p-dns = { workspace = true, optional = true } libp2p-mdns = { workspace = true, optional = true } -libp2p-perf = { workspace = true, optional = true } +libp2p-memory-connection-limits = { workspace = true, optional = true } libp2p-quic = { workspace = true, optional = true } libp2p-tcp = { workspace = true, optional = true } libp2p-tls = { workspace = true, optional = true } libp2p-uds = { workspace = true, optional = true } -libp2p-webrtc = { workspace = true, optional = true } +libp2p-upnp = { workspace = true, optional = true } libp2p-websocket = { workspace = true, optional = true } -[target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { workspace = true, optional = true } - [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } async-trait = "0.1" -either = "1.8.0" -env_logger = "0.10.0" clap = { version = "4.1.6", features = ["derive"] } -tokio = { version = "1.15", features = ["io-util", "io-std", "macros", "rt", "rt-multi-thread"] } +tokio = { version = "1.15", features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] } libp2p-mplex = { workspace = true } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -154,3 +153,6 @@ libp2p-tcp = { workspace = true, features = ["tokio"] } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/libp2p/src/bandwidth.rs b/libp2p/src/bandwidth.rs index dc696ce07e2..b84cbb7e27b 100644 --- a/libp2p/src/bandwidth.rs +++ b/libp2p/src/bandwidth.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +#![allow(deprecated)] + use crate::core::muxing::{StreamMuxer, StreamMuxerEvent}; use futures::{ @@ -101,6 +103,9 @@ where } /// Allows obtaining the average bandwidth of the streams. +#[deprecated( + note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." +)] pub struct BandwidthSinks { inbound: AtomicU64, outbound: AtomicU64, diff --git a/libp2p/src/builder.rs b/libp2p/src/builder.rs new file mode 100644 index 00000000000..c96c20d470a --- /dev/null +++ b/libp2p/src/builder.rs @@ -0,0 +1,603 @@ +use std::marker::PhantomData; + +mod phase; +mod select_muxer; +mod select_security; + +/// Build a [`Swarm`](libp2p_swarm::Swarm) by combining an identity, a set of +/// [`Transport`](libp2p_core::Transport)s and a +/// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour). +/// +/// ``` +/// # use libp2p::{swarm::NetworkBehaviour, SwarmBuilder}; +/// # use libp2p::core::transport::dummy::DummyTransport; +/// # use libp2p::core::muxing::StreamMuxerBox; +/// # use libp2p::identity::PeerId; +/// # use std::error::Error; +/// # +/// # #[cfg(all( +/// # not(target_arch = "wasm32"), +/// # feature = "tokio", +/// # feature = "tcp", +/// # feature = "tls", +/// # feature = "noise", +/// # feature = "quic", +/// # feature = "dns", +/// # feature = "relay", +/// # feature = "websocket", +/// # ))] +/// # async fn build_swarm() -> Result<(), Box> { +/// # #[derive(NetworkBehaviour)] +/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] +/// # struct MyBehaviour { +/// # relay: libp2p_relay::client::Behaviour, +/// # } +/// +/// let swarm = SwarmBuilder::with_new_identity() +/// .with_tokio() +/// .with_tcp( +/// Default::default(), +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_quic() +/// .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? +/// .with_dns()? +/// .with_websocket( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// ) +/// .await? +/// .with_relay_client( +/// (libp2p_tls::Config::new, libp2p_noise::Config::new), +/// libp2p_yamux::Config::default, +/// )? +/// .with_behaviour(|_key, relay| MyBehaviour { relay })? +/// .with_swarm_config(|cfg| { +/// // Edit cfg here. +/// cfg +/// }) +/// .build(); +/// # +/// # Ok(()) +/// # } +/// ``` +pub struct SwarmBuilder { + keypair: libp2p_identity::Keypair, + phantom: PhantomData, + phase: Phase, +} + +#[cfg(test)] +mod tests { + use crate::SwarmBuilder; + use libp2p_core::{muxing::StreamMuxerBox, transport::dummy::DummyTransport}; + use libp2p_identity::PeerId; + use libp2p_swarm::NetworkBehaviour; + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + ))] + fn tcp() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + ))] + fn async_std_tcp() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "async-std", feature = "quic"))] + fn async_std_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "async-std", feature = "quic"))] + fn async_std_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "tcp", feature = "tls", feature = "yamux"))] + fn tcp_yamux_mplex() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + ( + libp2p_yamux::Config::default, + libp2p_mplex::MplexConfig::default, + ), + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux" + ))] + fn tcp_tls_noise() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + ( + libp2p_yamux::Config::default, + libp2p_mplex::MplexConfig::default, + ), + ) + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn tcp_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn async_std_tcp_quic() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn tcp_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic" + ))] + fn async_std_tcp_quic_config() { + let _ = SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic_config(|config| config) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "relay" + ))] + fn tcp_relay() { + #[derive(libp2p_swarm::NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Behaviour { + dummy: libp2p_swarm::dummy::Behaviour, + relay: libp2p_relay::client::Behaviour, + } + + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_relay_client(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .unwrap() + .with_behaviour(|_, relay| Behaviour { + dummy: libp2p_swarm::dummy::Behaviour, + relay, + }) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "dns" + ))] + async fn tcp_dns() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_dns() + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "dns" + ))] + async fn tcp_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all(feature = "tokio", feature = "quic", feature = "dns"))] + async fn quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns" + ))] + async fn tcp_quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "async-std", + feature = "tcp", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns" + ))] + async fn async_std_tcp_quic_dns_config() { + SwarmBuilder::with_new_identity() + .with_async_std() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns_config( + libp2p_dns::ResolverConfig::default(), + libp2p_dns::ResolverOpts::default(), + ) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + /// Showcases how to provide custom transports unknown to the libp2p crate, e.g. WebRTC. + #[test] + #[cfg(feature = "tokio")] + fn other_transport() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + // Closure can either return a Transport directly. + .with_other_transport(|_| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? + // Or a Result containing a Transport. + .with_other_transport(|_| { + if true { + Ok(DummyTransport::<(PeerId, StreamMuxerBox)>::new()) + } else { + Err(Box::from("test")) + } + })? + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "dns", + feature = "websocket", + ))] + async fn tcp_websocket() { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .unwrap() + .with_websocket( + (libp2p_tls::Config::new, libp2p_noise::Config::new), + libp2p_yamux::Config::default, + ) + .await + .unwrap() + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + } + + #[tokio::test] + #[cfg(all( + feature = "tokio", + feature = "tcp", + feature = "tls", + feature = "noise", + feature = "yamux", + feature = "quic", + feature = "dns", + feature = "relay", + feature = "websocket", + feature = "metrics", + ))] + async fn all() { + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct MyBehaviour { + relay: libp2p_relay::client::Behaviour, + } + + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + ) + .unwrap() + .with_quic() + .with_dns() + .unwrap() + .with_websocket(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .await + .unwrap() + .with_relay_client(libp2p_tls::Config::new, libp2p_yamux::Config::default) + .unwrap() + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_key, relay| MyBehaviour { relay }) + .unwrap() + .build(); + } + + #[test] + #[cfg(all(feature = "tokio", feature = "tcp", feature = "tls", feature = "yamux"))] + fn tcp_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + Default::default(), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + )? + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[test] + #[cfg(all(feature = "tokio", feature = "quic"))] + fn quic_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_quic() + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } + + #[test] + #[cfg(feature = "tokio")] + fn other_transport_bandwidth_metrics() -> Result<(), Box> { + let _ = SwarmBuilder::with_new_identity() + .with_tokio() + .with_other_transport(|_| DummyTransport::<(PeerId, StreamMuxerBox)>::new())? + .with_bandwidth_metrics(&mut libp2p_metrics::Registry::default()) + .with_behaviour(|_| libp2p_swarm::dummy::Behaviour) + .unwrap() + .build(); + + Ok(()) + } +} diff --git a/libp2p/src/builder/phase.rs b/libp2p/src/builder/phase.rs new file mode 100644 index 00000000000..c9679a46767 --- /dev/null +++ b/libp2p/src/builder/phase.rs @@ -0,0 +1,139 @@ +#![allow(unused_imports)] + +mod bandwidth_logging; +mod bandwidth_metrics; +mod behaviour; +mod build; +mod dns; +mod identity; +mod other_transport; +mod provider; +mod quic; +mod relay; +mod swarm; +mod tcp; +mod websocket; + +use bandwidth_logging::*; +use bandwidth_metrics::*; +use behaviour::*; +use build::*; +use dns::*; +use other_transport::*; +use provider::*; +use quic::*; +use relay::*; +use swarm::*; +use tcp::*; +use websocket::*; + +use super::select_muxer::SelectMuxerUpgrade; +use super::select_security::SelectSecurityUpgrade; +use super::SwarmBuilder; + +use libp2p_core::{muxing::StreamMuxerBox, Transport}; +use libp2p_identity::Keypair; + +#[allow(unreachable_pub)] +pub trait IntoSecurityUpgrade { + type Upgrade; + type Error; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result; +} + +impl IntoSecurityUpgrade for F +where + F: for<'a> FnOnce(&'a Keypair) -> Result, +{ + type Upgrade = T; + type Error = E; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result { + (self)(keypair) + } +} + +impl IntoSecurityUpgrade for (F1, F2) +where + F1: IntoSecurityUpgrade, + F2: IntoSecurityUpgrade, +{ + type Upgrade = SelectSecurityUpgrade; + type Error = either::Either; + + fn into_security_upgrade(self, keypair: &Keypair) -> Result { + let (f1, f2) = self; + + let u1 = f1 + .into_security_upgrade(keypair) + .map_err(either::Either::Left)?; + let u2 = f2 + .into_security_upgrade(keypair) + .map_err(either::Either::Right)?; + + Ok(SelectSecurityUpgrade::new(u1, u2)) + } +} + +#[allow(unreachable_pub)] +pub trait IntoMultiplexerUpgrade { + type Upgrade; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade; +} + +impl IntoMultiplexerUpgrade for F +where + F: FnOnce() -> U, +{ + type Upgrade = U; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade { + (self)() + } +} + +impl IntoMultiplexerUpgrade for (U1, U2) +where + U1: IntoMultiplexerUpgrade, + U2: IntoMultiplexerUpgrade, +{ + type Upgrade = SelectMuxerUpgrade; + + fn into_multiplexer_upgrade(self) -> Self::Upgrade { + let (f1, f2) = self; + + let u1 = f1.into_multiplexer_upgrade(); + let u2 = f2.into_multiplexer_upgrade(); + + SelectMuxerUpgrade::new(u1, u2) + } +} + +pub trait AuthenticatedMultiplexedTransport: + Transport< + Error = Self::E, + Dial = Self::D, + ListenerUpgrade = Self::U, + Output = (libp2p_identity::PeerId, StreamMuxerBox), + > + Send + + Unpin + + 'static +{ + type E: Send + Sync + 'static; + type D: Send; + type U: Send; +} + +impl AuthenticatedMultiplexedTransport for T +where + T: Transport + Send + Unpin + 'static, + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, +{ + type E = T::Error; + type D = T::Dial; + type U = T::ListenerUpgrade; +} diff --git a/libp2p/src/builder/phase/bandwidth_logging.rs b/libp2p/src/builder/phase/bandwidth_logging.rs new file mode 100644 index 00000000000..cee9498fcaa --- /dev/null +++ b/libp2p/src/builder/phase/bandwidth_logging.rs @@ -0,0 +1,88 @@ +use super::*; +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::transport_ext::TransportExt; +use crate::SwarmBuilder; +use std::marker::PhantomData; +use std::sync::Arc; + +pub struct BandwidthLoggingPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +impl + SwarmBuilder> +{ + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder>, + Arc, + ) { + let (transport, sinks) = self.phase.transport.with_bandwidth_logging(); + ( + SwarmBuilder { + phase: BandwidthMetricsPhase { + relay_behaviour: self.phase.relay_behaviour, + transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }, + sinks, + ) + } + + pub fn without_bandwidth_logging(self) -> SwarmBuilder> { + SwarmBuilder { + phase: BandwidthMetricsPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +// Shortcuts +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder> { + self.without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/bandwidth_metrics.rs b/libp2p/src/builder/phase/bandwidth_metrics.rs new file mode 100644 index 00000000000..52daa731ddd --- /dev/null +++ b/libp2p/src/builder/phase/bandwidth_metrics.rs @@ -0,0 +1,69 @@ +use super::*; +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::transport_ext::TransportExt; +use crate::SwarmBuilder; +use std::marker::PhantomData; +use std::sync::Arc; + +pub struct BandwidthMetricsPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder> { + SwarmBuilder { + phase: BehaviourPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: libp2p_metrics::BandwidthTransport::new(self.phase.transport, registry) + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))), + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +impl SwarmBuilder> { + pub fn without_bandwidth_metrics(self) -> SwarmBuilder> { + SwarmBuilder { + phase: BehaviourPhase { + relay_behaviour: self.phase.relay_behaviour, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + } + } +} + +// Shortcuts +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_metrics().with_behaviour(constructor) + } +} + +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_bandwidth_metrics().with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/behaviour.rs b/libp2p/src/builder/phase/behaviour.rs new file mode 100644 index 00000000000..939db935c80 --- /dev/null +++ b/libp2p/src/builder/phase/behaviour.rs @@ -0,0 +1,90 @@ +use super::*; +use crate::SwarmBuilder; +use libp2p_swarm::NetworkBehaviour; +use std::convert::Infallible; +use std::marker::PhantomData; + +pub struct BehaviourPhase { + pub(crate) relay_behaviour: R, + pub(crate) transport: T, +} + +#[cfg(feature = "relay")] +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair, libp2p_relay::client::Behaviour) -> R, + ) -> Result>, R::Error> { + Ok(SwarmBuilder { + phase: SwarmPhase { + behaviour: constructor(&self.keypair, self.phase.relay_behaviour) + .try_into_behaviour()?, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + // Discard `NoRelayBehaviour`. + let _ = self.phase.relay_behaviour; + + Ok(SwarmBuilder { + phase: SwarmPhase { + behaviour: constructor(&self.keypair).try_into_behaviour()?, + transport: self.phase.transport, + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +pub trait TryIntoBehaviour: private::Sealed { + type Error; + + fn try_into_behaviour(self) -> Result; +} + +impl TryIntoBehaviour for B +where + B: NetworkBehaviour, +{ + type Error = Infallible; + + fn try_into_behaviour(self) -> Result { + Ok(self) + } +} + +impl TryIntoBehaviour for Result> +where + B: NetworkBehaviour, +{ + type Error = BehaviourError; + + fn try_into_behaviour(self) -> Result { + self.map_err(BehaviourError) + } +} + +mod private { + pub trait Sealed {} +} + +impl private::Sealed for B {} + +impl private::Sealed + for Result> +{ +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to build behaviour: {0}")] +pub struct BehaviourError(Box); diff --git a/libp2p/src/builder/phase/build.rs b/libp2p/src/builder/phase/build.rs new file mode 100644 index 00000000000..80a83994eeb --- /dev/null +++ b/libp2p/src/builder/phase/build.rs @@ -0,0 +1,31 @@ +#[allow(unused_imports)] +use super::*; + +use crate::SwarmBuilder; +use libp2p_core::Transport; +use libp2p_swarm::Swarm; + +pub struct BuildPhase { + pub(crate) behaviour: B, + pub(crate) transport: T, + pub(crate) swarm_config: libp2p_swarm::Config, +} + +const CONNECTION_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); + +impl + SwarmBuilder> +{ + pub fn build(self) -> Swarm { + Swarm::new( + libp2p_core::transport::timeout::TransportTimeout::new( + self.phase.transport, + CONNECTION_TIMEOUT, + ) + .boxed(), + self.phase.behaviour, + self.keypair.public().to_peer_id(), + self.phase.swarm_config, + ) + } +} diff --git a/libp2p/src/builder/phase/dns.rs b/libp2p/src/builder/phase/dns.rs new file mode 100644 index 00000000000..135f6c57b19 --- /dev/null +++ b/libp2p/src/builder/phase/dns.rs @@ -0,0 +1,117 @@ +use super::*; +use crate::SwarmBuilder; +use std::marker::PhantomData; + +pub struct DnsPhase { + pub(crate) transport: T, +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + // TODO: Remove `async` + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::async_std::Transport::system2(self.phase.transport)?, + }, + }) + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::tokio::Transport::system(self.phase.transport)?, + }, + }) + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::async_std::Transport::custom2( + self.phase.transport, + cfg, + opts, + ), + }, + } + } +} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: libp2p_dns::tokio::Transport::custom(self.phase.transport, cfg, opts), + }, + } + } +} + +impl SwarmBuilder> { + pub(crate) fn without_dns(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: WebsocketPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_dns() + .without_websocket() + .without_relay() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/identity.rs b/libp2p/src/builder/phase/identity.rs new file mode 100644 index 00000000000..ceb86819dc7 --- /dev/null +++ b/libp2p/src/builder/phase/identity.rs @@ -0,0 +1,21 @@ +use super::*; +use crate::SwarmBuilder; +use std::marker::PhantomData; + +pub struct IdentityPhase {} + +impl SwarmBuilder { + pub fn with_new_identity() -> SwarmBuilder { + SwarmBuilder::with_existing_identity(libp2p_identity::Keypair::generate_ed25519()) + } + + pub fn with_existing_identity( + keypair: libp2p_identity::Keypair, + ) -> SwarmBuilder { + SwarmBuilder { + keypair, + phantom: PhantomData, + phase: ProviderPhase {}, + } + } +} diff --git a/libp2p/src/builder/phase/other_transport.rs b/libp2p/src/builder/phase/other_transport.rs new file mode 100644 index 00000000000..b0d56cd92d2 --- /dev/null +++ b/libp2p/src/builder/phase/other_transport.rs @@ -0,0 +1,269 @@ +use std::convert::Infallible; +use std::marker::PhantomData; +use std::sync::Arc; + +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::Transport; +#[cfg(feature = "relay")] +use libp2p_core::{Negotiated, UpgradeInfo}; +#[cfg(feature = "relay")] +use libp2p_identity::PeerId; + +#[allow(deprecated)] +use crate::bandwidth::BandwidthSinks; +use crate::SwarmBuilder; + +use super::*; + +pub struct OtherTransportPhase { + pub(crate) transport: T, +} + +impl + SwarmBuilder> +{ + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + Ok(SwarmBuilder { + phase: OtherTransportPhase { + transport: self + .phase + .transport + .or_transport( + constructor(&self.keypair) + .try_into_transport()? + .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))), + ) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } + + pub(crate) fn without_any_other_transports(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: DnsPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl + SwarmBuilder> +{ + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_any_other_transports().with_dns().await + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_any_other_transports().with_dns() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + self.without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl + SwarmBuilder> +{ + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + self.without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(feature = "relay")] +impl + SwarmBuilder> +{ + /// See [`SwarmBuilder::with_relay_client`]. + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } +} +impl + SwarmBuilder> +{ + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder< + Provider, + BandwidthMetricsPhase, + >, + Arc, + ) { + #[allow(deprecated)] + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_bandwidth_logging() + } +} +#[cfg(feature = "metrics")] +impl + SwarmBuilder> +{ + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl + SwarmBuilder> +{ + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_behaviour(constructor) + } +} + +pub trait TryIntoTransport: private::Sealed { + type Error; + + fn try_into_transport(self) -> Result; +} + +impl TryIntoTransport for T { + type Error = Infallible; + + fn try_into_transport(self) -> Result { + Ok(self) + } +} + +impl TryIntoTransport for Result> { + type Error = TransportError; + + fn try_into_transport(self) -> Result { + self.map_err(TransportError) + } +} + +mod private { + pub trait Sealed {} +} + +impl private::Sealed for T {} + +impl private::Sealed + for Result> +{ +} + +#[derive(Debug, thiserror::Error)] +#[error("failed to build transport: {0}")] +pub struct TransportError(Box); diff --git a/libp2p/src/builder/phase/provider.rs b/libp2p/src/builder/phase/provider.rs new file mode 100644 index 00000000000..32321442689 --- /dev/null +++ b/libp2p/src/builder/phase/provider.rs @@ -0,0 +1,46 @@ +#[allow(unused_imports)] +use super::*; + +use crate::SwarmBuilder; + +pub struct ProviderPhase {} + +impl SwarmBuilder { + #[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] + pub fn with_async_std(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] + pub fn with_tokio(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } + + #[cfg(feature = "wasm-bindgen")] + pub fn with_wasm_bindgen(self) -> SwarmBuilder { + SwarmBuilder { + keypair: self.keypair, + phantom: std::marker::PhantomData, + phase: TcpPhase {}, + } + } +} + +pub enum NoProviderSpecified {} + +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std"))] +pub enum AsyncStd {} + +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio"))] +pub enum Tokio {} + +#[cfg(feature = "wasm-bindgen")] +pub enum WasmBindgen {} diff --git a/libp2p/src/builder/phase/quic.rs b/libp2p/src/builder/phase/quic.rs new file mode 100644 index 00000000000..885b16e2e03 --- /dev/null +++ b/libp2p/src/builder/phase/quic.rs @@ -0,0 +1,316 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::muxing::StreamMuxer; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(any( + feature = "relay", + all(not(target_arch = "wasm32"), feature = "websocket") +))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; +use std::{marker::PhantomData, sync::Arc}; + +pub struct QuicPhase { + pub(crate) transport: T, +} + +macro_rules! impl_quic_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $quic:ident) => { + #[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = $providerKebabCase))] + impl SwarmBuilder<$providerPascalCase, QuicPhase> { + pub fn with_quic( + self, + ) -> SwarmBuilder< + $providerPascalCase, + OtherTransportPhase, + > { + self.with_quic_config(std::convert::identity) + } + + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + $providerPascalCase, + OtherTransportPhase, + > { + SwarmBuilder { + phase: OtherTransportPhase { + transport: self + .phase + .transport + .or_transport( + libp2p_quic::$quic::Transport::new(constructor( + libp2p_quic::Config::new(&self.keypair), + )) + .map(|(peer_id, muxer), _| { + (peer_id, libp2p_core::muxing::StreamMuxerBox::new(muxer)) + }), + ) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + } + } + } + }; +} + +impl_quic_builder!("async-std", AsyncStd, async_std); +impl_quic_builder!("tokio", super::provider::Tokio, tokio); + +impl SwarmBuilder> { + pub(crate) fn without_quic(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: OtherTransportPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +impl SwarmBuilder> { + /// See [`SwarmBuilder::with_relay_client`]. + #[cfg(feature = "relay")] + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } + + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + self.without_quic().with_other_transport(constructor) + } + + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_behaviour(constructor) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub async fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_quic() + .without_any_other_transports() + .with_dns() + .await + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns( + self, + ) -> Result< + SwarmBuilder< + super::provider::Tokio, + WebsocketPhase, + >, + std::io::Error, + > { + self.without_quic() + .without_any_other_transports() + .with_dns() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "async-std", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder< + super::provider::AsyncStd, + WebsocketPhase, + > { + self.without_quic() + .without_any_other_transports() + .with_dns_config(cfg, opts) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "tokio", feature = "dns"))] +impl SwarmBuilder> { + pub fn with_dns_config( + self, + cfg: libp2p_dns::ResolverConfig, + opts: libp2p_dns::ResolverOpts, + ) -> SwarmBuilder> + { + self.without_quic() + .without_any_other_transports() + .with_dns_config(cfg, opts) + } +} + +macro_rules! impl_quic_phase_with_websocket { + ($providerKebabCase:literal, $providerPascalCase:ty, $websocketStream:ty) => { + #[cfg(all(feature = $providerKebabCase, not(target_arch = "wasm32"), feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, QuicPhase> { + /// See [`SwarmBuilder::with_websocket`]. + pub async fn with_websocket < + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + > ( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + super::websocket::WebsocketError, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_quic() + .without_any_other_transports() + .without_dns() + .with_websocket(security_upgrade, multiplexer_upgrade) + .await + } + } + } +} +impl_quic_phase_with_websocket!( + "async-std", + super::provider::AsyncStd, + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_quic_phase_with_websocket!( + "tokio", + super::provider::Tokio, + rw_stream_sink::RwStreamSink> +); +impl SwarmBuilder> { + #[allow(deprecated)] + #[deprecated(note = "Use `with_bandwidth_metrics` instead.")] + pub fn with_bandwidth_logging( + self, + ) -> ( + SwarmBuilder< + Provider, + BandwidthMetricsPhase, + >, + Arc, + ) { + #[allow(deprecated)] + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .with_bandwidth_logging() + } +} +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_quic() + .without_any_other_transports() + .without_dns() + .without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs new file mode 100644 index 00000000000..f8305f9d246 --- /dev/null +++ b/libp2p/src/builder/phase/relay.rs @@ -0,0 +1,143 @@ +use std::marker::PhantomData; + +#[cfg(feature = "relay")] +use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(feature = "relay")] +use libp2p_core::Transport; +#[cfg(any(feature = "relay", feature = "websocket"))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, StreamMuxer, UpgradeInfo}; +#[cfg(feature = "relay")] +use libp2p_identity::PeerId; + +use crate::SwarmBuilder; + +use super::*; + +pub struct RelayPhase { + pub(crate) transport: T, +} + +#[cfg(feature = "relay")] +impl SwarmBuilder> { + /// Adds a relay client transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_tcp( + /// Default::default(), + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// .with_relay_client( + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// # ; + /// # Ok(()) + /// # } + /// ``` + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + let (relay_transport, relay_behaviour) = + libp2p_relay::client::new(self.keypair.public().to_peer_id()); + let relay_transport = relay_transport + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate(security_upgrade.into_security_upgrade(&self.keypair)?) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))); + + Ok(SwarmBuilder { + phase: BandwidthLoggingPhase { + relay_behaviour, + transport: relay_transport + .or_transport(self.phase.transport) + .map(|either, _| either.into_inner()), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } +} + +pub struct NoRelayBehaviour; + +impl SwarmBuilder> { + pub(crate) fn without_relay( + self, + ) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: BandwidthLoggingPhase { + transport: self.phase.transport, + relay_behaviour: NoRelayBehaviour, + }, + } + } +} + +// Shortcuts +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_relay() + .without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) + } +} diff --git a/libp2p/src/builder/phase/swarm.rs b/libp2p/src/builder/phase/swarm.rs new file mode 100644 index 00000000000..ee456ced927 --- /dev/null +++ b/libp2p/src/builder/phase/swarm.rs @@ -0,0 +1,60 @@ +#[allow(unused_imports)] +use super::*; + +#[allow(dead_code)] +pub struct SwarmPhase { + pub(crate) behaviour: B, + pub(crate) transport: T, +} + +macro_rules! impl_with_swarm_config { + ($providerKebabCase:literal, $providerPascalCase:ty, $config:expr) => { + #[cfg(feature = $providerKebabCase)] + impl SwarmBuilder<$providerPascalCase, SwarmPhase> { + pub fn with_swarm_config( + self, + constructor: impl FnOnce(libp2p_swarm::Config) -> libp2p_swarm::Config, + ) -> SwarmBuilder<$providerPascalCase, BuildPhase> { + SwarmBuilder { + phase: BuildPhase { + behaviour: self.phase.behaviour, + transport: self.phase.transport, + swarm_config: constructor($config), + }, + keypair: self.keypair, + phantom: std::marker::PhantomData, + } + } + + // Shortcuts + pub fn build(self) -> libp2p_swarm::Swarm + where + B: libp2p_swarm::NetworkBehaviour, + T: AuthenticatedMultiplexedTransport, + { + self.with_swarm_config(std::convert::identity).build() + } + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +impl_with_swarm_config!( + "async-std", + super::provider::AsyncStd, + libp2p_swarm::Config::with_async_std_executor() +); + +#[cfg(not(target_arch = "wasm32"))] +impl_with_swarm_config!( + "tokio", + super::provider::Tokio, + libp2p_swarm::Config::with_tokio_executor() +); + +#[cfg(target_arch = "wasm32")] +impl_with_swarm_config!( + "wasm-bindgen", + super::provider::WasmBindgen, + libp2p_swarm::Config::with_wasm_executor() +); diff --git a/libp2p/src/builder/phase/tcp.rs b/libp2p/src/builder/phase/tcp.rs new file mode 100644 index 00000000000..4b7cf29b3d2 --- /dev/null +++ b/libp2p/src/builder/phase/tcp.rs @@ -0,0 +1,251 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all( + not(target_arch = "wasm32"), + any(feature = "tcp", feature = "websocket") +))] +use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; +#[cfg(all(feature = "websocket", not(target_arch = "wasm32")))] +use libp2p_core::Transport; +#[cfg(all( + not(target_arch = "wasm32"), + any(feature = "tcp", feature = "websocket") +))] +use libp2p_core::{ + upgrade::InboundConnectionUpgrade, upgrade::OutboundConnectionUpgrade, Negotiated, UpgradeInfo, +}; +use std::marker::PhantomData; + +pub struct TcpPhase {} + +macro_rules! impl_tcp_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $path:ident) => { + #[cfg(all( + not(target_arch = "wasm32"), + feature = "tcp", + feature = $providerKebabCase, + ))] + impl SwarmBuilder<$providerPascalCase, TcpPhase> { + /// Adds a TCP based transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_tcp( + /// Default::default(), + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// )? + /// # ; + /// # Ok(()) + /// # } + /// ``` + pub fn with_tcp( + self, + tcp_config: libp2p_tcp::Config, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder<$providerPascalCase, QuicPhase>, + SecUpgrade::Error, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + Ok(SwarmBuilder { + phase: QuicPhase { + transport: libp2p_tcp::$path::Transport::new(tcp_config) + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate( + security_upgrade.into_security_upgrade(&self.keypair)?, + ) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))), + }, + keypair: self.keypair, + phantom: PhantomData, + }) + } + } + }; +} + +impl_tcp_builder!("async-std", super::provider::AsyncStd, async_io); +impl_tcp_builder!("tokio", super::provider::Tokio, tokio); + +impl SwarmBuilder { + pub(crate) fn without_tcp( + self, + ) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: QuicPhase { + transport: libp2p_core::transport::dummy::DummyTransport::new(), + }, + } + } +} + +// Shortcuts +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "async-std"))] +impl SwarmBuilder { + pub fn with_quic( + self, + ) -> SwarmBuilder< + super::provider::AsyncStd, + OtherTransportPhase, + > { + self.without_tcp().with_quic() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "tokio"))] +impl SwarmBuilder { + pub fn with_quic( + self, + ) -> SwarmBuilder< + super::provider::Tokio, + OtherTransportPhase, + > { + self.without_tcp().with_quic() + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "async-std"))] +impl SwarmBuilder { + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + super::provider::AsyncStd, + OtherTransportPhase, + > { + self.without_tcp().with_quic_config(constructor) + } +} +#[cfg(all(not(target_arch = "wasm32"), feature = "quic", feature = "tokio"))] +impl SwarmBuilder { + pub fn with_quic_config( + self, + constructor: impl FnOnce(libp2p_quic::Config) -> libp2p_quic::Config, + ) -> SwarmBuilder< + super::provider::Tokio, + OtherTransportPhase, + > { + self.without_tcp().with_quic_config(constructor) + } +} +impl SwarmBuilder { + pub fn with_other_transport< + Muxer: libp2p_core::muxing::StreamMuxer + Send + 'static, + OtherTransport: Transport + Send + Unpin + 'static, + R: TryIntoTransport, + >( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result< + SwarmBuilder>, + R::Error, + > + where + ::Error: Send + Sync + 'static, + ::Dial: Send, + ::ListenerUpgrade: Send, + ::Substream: Send, + ::Error: Send + Sync, + { + self.without_tcp() + .without_quic() + .with_other_transport(constructor) + } +} +macro_rules! impl_tcp_phase_with_websocket { + ($providerKebabCase:literal, $providerPascalCase:ty, $websocketStream:ty) => { + #[cfg(all(feature = $providerKebabCase, not(target_arch = "wasm32"), feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, TcpPhase> { + /// See [`SwarmBuilder::with_websocket`]. + pub async fn with_websocket < + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + > ( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + WebsocketError, + > + where + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (libp2p_identity::PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_tcp() + .without_quic() + .without_any_other_transports() + .without_dns() + .with_websocket(security_upgrade, multiplexer_upgrade) + .await + } + } + } +} +impl_tcp_phase_with_websocket!( + "async-std", + super::provider::AsyncStd, + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_tcp_phase_with_websocket!( + "tokio", + super::provider::Tokio, + rw_stream_sink::RwStreamSink> +); diff --git a/libp2p/src/builder/phase/websocket.rs b/libp2p/src/builder/phase/websocket.rs new file mode 100644 index 00000000000..68a85bb77b7 --- /dev/null +++ b/libp2p/src/builder/phase/websocket.rs @@ -0,0 +1,229 @@ +use super::*; +use crate::SwarmBuilder; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::muxing::{StreamMuxer, StreamMuxerBox}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +use libp2p_core::Transport; +#[cfg(any( + all(not(target_arch = "wasm32"), feature = "websocket"), + feature = "relay" +))] +use libp2p_core::{InboundUpgrade, Negotiated, OutboundUpgrade, UpgradeInfo}; +#[cfg(any( + all(not(target_arch = "wasm32"), feature = "websocket"), + feature = "relay" +))] +use libp2p_identity::PeerId; +use std::marker::PhantomData; + +pub struct WebsocketPhase { + pub(crate) transport: T, +} + +macro_rules! impl_websocket_builder { + ($providerKebabCase:literal, $providerPascalCase:ty, $dnsTcp:expr, $websocketStream:ty) => { + /// Adds a websocket client transport. + /// + /// Note that both `security_upgrade` and `multiplexer_upgrade` take function pointers, + /// i.e. they take the function themselves (without the invocation via `()`), not the + /// result of the function invocation. See example below. + /// + /// ``` rust + /// # use libp2p::SwarmBuilder; + /// # use std::error::Error; + /// # async fn build_swarm() -> Result<(), Box> { + /// let swarm = SwarmBuilder::with_new_identity() + /// .with_tokio() + /// .with_websocket( + /// (libp2p_tls::Config::new, libp2p_noise::Config::new), + /// libp2p_yamux::Config::default, + /// ) + /// .await? + /// # ; + /// # Ok(()) + /// # } + /// ``` + #[cfg(all(not(target_arch = "wasm32"), feature = $providerKebabCase, feature = "websocket"))] + impl SwarmBuilder<$providerPascalCase, WebsocketPhase> { + pub async fn with_websocket< + SecUpgrade, + SecStream, + SecError, + MuxUpgrade, + MuxStream, + MuxError, + >( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + $providerPascalCase, + RelayPhase, + >, + WebsocketError, + > + + where + T: AuthenticatedMultiplexedTransport, + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade<$websocketStream>, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + { + let security_upgrade = security_upgrade.into_security_upgrade(&self.keypair) + .map_err(WebsocketErrorInner::SecurityUpgrade)?; + let websocket_transport = libp2p_websocket::WsConfig::new( + $dnsTcp.await.map_err(WebsocketErrorInner::Dns)?, + ) + .upgrade(libp2p_core::upgrade::Version::V1Lazy) + .authenticate(security_upgrade) + .multiplex(multiplexer_upgrade.into_multiplexer_upgrade()) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))); + + Ok(SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: RelayPhase { + transport: websocket_transport + .or_transport(self.phase.transport) + .map(|either, _| either.into_inner()), + }, + }) + } + } + }; +} + +impl_websocket_builder!( + "async-std", + super::provider::AsyncStd, + libp2p_dns::async_std::Transport::system(libp2p_tcp::async_io::Transport::new( + libp2p_tcp::Config::default(), + )), + rw_stream_sink::RwStreamSink< + libp2p_websocket::BytesConnection, + > +); +impl_websocket_builder!( + "tokio", + super::provider::Tokio, + // Note this is an unnecessary await for Tokio Websocket (i.e. tokio dns) in order to be consistent + // with above AsyncStd construction. + futures::future::ready(libp2p_dns::tokio::Transport::system( + libp2p_tcp::tokio::Transport::new(libp2p_tcp::Config::default()) + )), + rw_stream_sink::RwStreamSink> +); + +impl SwarmBuilder> { + pub(crate) fn without_websocket(self) -> SwarmBuilder> { + SwarmBuilder { + keypair: self.keypair, + phantom: PhantomData, + phase: RelayPhase { + transport: self.phase.transport, + }, + } + } +} + +// Shortcuts +#[cfg(feature = "relay")] +impl SwarmBuilder> { + /// See [`SwarmBuilder::with_relay_client`]. + pub fn with_relay_client( + self, + security_upgrade: SecUpgrade, + multiplexer_upgrade: MuxUpgrade, + ) -> Result< + SwarmBuilder< + Provider, + BandwidthLoggingPhase, + >, + SecUpgrade::Error, + > where + + SecStream: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, + SecError: std::error::Error + Send + Sync + 'static, + SecUpgrade: IntoSecurityUpgrade, + SecUpgrade::Upgrade: InboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + OutboundConnectionUpgrade, Output = (PeerId, SecStream), Error = SecError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + + MuxStream: libp2p_core::muxing::StreamMuxer + Send + 'static, + MuxStream::Substream: Send + 'static, + MuxStream::Error: Send + Sync + 'static, + MuxUpgrade: IntoMultiplexerUpgrade, + MuxUpgrade::Upgrade: InboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + OutboundConnectionUpgrade, Output = MuxStream, Error = MuxError> + Clone + Send + 'static, + >>::Future: Send, + >>::Future: Send, + MuxError: std::error::Error + Send + Sync + 'static, + <<>::Upgrade as UpgradeInfo>::InfoIter as IntoIterator>::IntoIter: Send, + <>::Upgrade as UpgradeInfo>::Info: Send, + { + self.without_websocket() + .with_relay_client(security_upgrade, multiplexer_upgrade) + } +} +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} +impl SwarmBuilder> { + pub fn with_behaviour>( + self, + constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, + ) -> Result>, R::Error> { + self.without_websocket() + .without_relay() + .without_bandwidth_logging() + .with_behaviour(constructor) + } +} + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +pub struct WebsocketError(#[from] WebsocketErrorInner); + +#[derive(Debug, thiserror::Error)] +#[cfg(all(not(target_arch = "wasm32"), feature = "websocket"))] +enum WebsocketErrorInner { + #[error("SecurityUpgrade")] + SecurityUpgrade(Sec), + #[cfg(feature = "dns")] + #[error("Dns")] + Dns(#[from] std::io::Error), +} diff --git a/libp2p/src/builder/select_muxer.rs b/libp2p/src/builder/select_muxer.rs new file mode 100644 index 00000000000..c93ba9d9991 --- /dev/null +++ b/libp2p/src/builder/select_muxer.rs @@ -0,0 +1,98 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![allow(unreachable_pub)] + +use either::Either; +use futures::future; +use libp2p_core::either::EitherFuture; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; +use std::iter::{Chain, Map}; + +#[derive(Debug, Clone)] +pub struct SelectMuxerUpgrade(A, B); + +impl SelectMuxerUpgrade { + pub fn new(a: A, b: B) -> Self { + SelectMuxerUpgrade(a, b) + } +} + +impl UpgradeInfo for SelectMuxerUpgrade +where + A: UpgradeInfo, + B: UpgradeInfo, +{ + type Info = Either; + type InfoIter = Chain< + Map<::IntoIter, fn(A::Info) -> Self::Info>, + Map<::IntoIter, fn(B::Info) -> Self::Info>, + >; + + fn protocol_info(&self) -> Self::InfoIter { + let a = self + .0 + .protocol_info() + .into_iter() + .map(Either::Left as fn(A::Info) -> _); + let b = self + .1 + .protocol_info() + .into_iter() + .map(Either::Right as fn(B::Info) -> _); + + a.chain(b) + } +} + +impl InboundConnectionUpgrade for SelectMuxerUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + } +} + +impl OutboundConnectionUpgrade for SelectMuxerUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + } +} diff --git a/libp2p/src/builder/select_security.rs b/libp2p/src/builder/select_security.rs new file mode 100644 index 00000000000..d6c7f8c172f --- /dev/null +++ b/libp2p/src/builder/select_security.rs @@ -0,0 +1,115 @@ +// Copyright 2023 Protocol Labs. +// Copyright 2018 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![allow(unreachable_pub)] + +use either::Either; +use futures::future::MapOk; +use futures::{future, TryFutureExt}; +use libp2p_core::either::EitherFuture; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; +use libp2p_identity::PeerId; +use std::iter::{Chain, Map}; + +/// Upgrade that combines two upgrades into one. Supports all the protocols supported by either +/// sub-upgrade. +/// +/// The protocols supported by the first element have a higher priority. +#[derive(Debug, Clone)] +pub struct SelectSecurityUpgrade(A, B); + +impl SelectSecurityUpgrade { + /// Combines two upgrades into an `SelectUpgrade`. + /// + /// The protocols supported by the first element have a higher priority. + pub fn new(a: A, b: B) -> Self { + SelectSecurityUpgrade(a, b) + } +} + +impl UpgradeInfo for SelectSecurityUpgrade +where + A: UpgradeInfo, + B: UpgradeInfo, +{ + type Info = Either; + type InfoIter = Chain< + Map<::IntoIter, fn(A::Info) -> Self::Info>, + Map<::IntoIter, fn(B::Info) -> Self::Info>, + >; + + fn protocol_info(&self) -> Self::InfoIter { + let a = self + .0 + .protocol_info() + .into_iter() + .map(Either::Left as fn(A::Info) -> _); + let b = self + .1 + .protocol_info() + .into_iter() + .map(Either::Right as fn(B::Info) -> _); + + a.chain(b) + } +} + +impl InboundConnectionUpgrade for SelectSecurityUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = (PeerId, future::Either); + type Error = Either; + type Future = MapOk< + EitherFuture, + fn(future::Either<(PeerId, TA), (PeerId, TB)>) -> (PeerId, future::Either), + >; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + .map_ok(future::Either::factor_first) + } +} + +impl OutboundConnectionUpgrade for SelectSecurityUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = (PeerId, future::Either); + type Error = Either; + type Future = MapOk< + EitherFuture, + fn(future::Either<(PeerId, TA), (PeerId, TB)>) -> (PeerId, future::Either), + >; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + .map_ok(future::Either::factor_first) + } +} diff --git a/libp2p/src/lib.rs b/libp2p/src/lib.rs index 3a8c09a068b..58f911e9445 100644 --- a/libp2p/src/lib.rs +++ b/libp2p/src/lib.rs @@ -20,11 +20,10 @@ //! libp2p is a modular peer-to-peer networking framework. //! -//! To learn more about the general libp2p multi-language framework visit -//! [libp2p.io](https://libp2p.io/). +//! To learn more about the general libp2p multi-language framework visit . //! //! To get started with this libp2p implementation in Rust, please take a look -//! at the [`tutorials`](crate::tutorials). Further examples can be found in the +//! at the [`tutorials`]. Further examples can be found in the //! [examples] directory. //! //! [examples]: https://github.com/libp2p/rust-libp2p/tree/master/examples @@ -52,10 +51,6 @@ pub use libp2p_core as core; #[cfg(feature = "dcutr")] #[doc(inline)] pub use libp2p_dcutr as dcutr; -#[cfg(feature = "deflate")] -#[cfg(not(target_arch = "wasm32"))] -#[doc(inline)] -pub use libp2p_deflate as deflate; #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] #[cfg(not(target_arch = "wasm32"))] @@ -65,7 +60,6 @@ pub use libp2p_dns as dns; #[doc(inline)] pub use libp2p_floodsub as floodsub; #[cfg(feature = "gossipsub")] -#[cfg(not(target_os = "unknown"))] #[doc(inline)] pub use libp2p_gossipsub as gossipsub; #[cfg(feature = "identify")] @@ -79,24 +73,17 @@ pub use libp2p_kad as kad; #[cfg_attr(docsrs, doc(cfg(feature = "mdns")))] #[doc(inline)] pub use libp2p_mdns as mdns; +#[cfg(feature = "memory-connection-limits")] +#[cfg(not(target_arch = "wasm32"))] +#[cfg_attr(docsrs, doc(cfg(feature = "memory-connection-limits")))] +#[doc(inline)] +pub use libp2p_memory_connection_limits as memory_connection_limits; #[cfg(feature = "metrics")] #[doc(inline)] pub use libp2p_metrics as metrics; -#[cfg(feature = "mplex")] -#[deprecated( - note = "`mplex` is not recommended anymore. Please use `yamux` instead or depend on `libp2p-mplex` directly if you need it for legacy use cases." -)] -pub mod mplex { - pub use libp2p_mplex::*; -} #[cfg(feature = "noise")] #[doc(inline)] pub use libp2p_noise as noise; -#[cfg(feature = "perf")] -#[cfg(not(target_arch = "wasm32"))] -#[cfg_attr(docsrs, doc(cfg(feature = "perf")))] -#[doc(inline)] -pub use libp2p_perf as perf; #[cfg(feature = "ping")] #[doc(inline)] pub use libp2p_ping as ping; @@ -108,12 +95,7 @@ pub use libp2p_plaintext as plaintext; pub use libp2p_pnet as pnet; #[cfg(feature = "quic")] #[cfg(not(target_arch = "wasm32"))] -#[deprecated( - note = "`quic` is only in alpha status. Please depend on `libp2p-quic` directly and don't ues the `quic` feature of `libp2p`." -)] -pub mod quic { - pub use libp2p_quic::*; -} +pub use libp2p_quic as quic; #[cfg(feature = "relay")] #[doc(inline)] pub use libp2p_relay as relay; @@ -140,26 +122,26 @@ pub use libp2p_tls as tls; #[cfg(not(target_arch = "wasm32"))] #[doc(inline)] pub use libp2p_uds as uds; -#[cfg(feature = "wasm-ext")] -#[doc(inline)] -pub use libp2p_wasm_ext as wasm_ext; -#[cfg(feature = "webrtc")] -#[cfg_attr(docsrs, doc(cfg(feature = "webrtc")))] +#[cfg(feature = "upnp")] #[cfg(not(target_arch = "wasm32"))] -#[deprecated( - note = "`webrtc` is only in alpha status. Please depend on `libp2p-webrtc` directly and don't ues the `webrtc` feature of `libp2p`." -)] -pub mod webrtc { - pub use libp2p_webrtc::*; -} +#[doc(inline)] +pub use libp2p_upnp as upnp; #[cfg(feature = "websocket")] #[cfg(not(target_arch = "wasm32"))] #[doc(inline)] pub use libp2p_websocket as websocket; +#[cfg(feature = "websocket-websys")] +#[doc(inline)] +pub use libp2p_websocket_websys as websocket_websys; +#[cfg(feature = "webtransport-websys")] +#[cfg_attr(docsrs, doc(cfg(feature = "webtransport-websys")))] +#[doc(inline)] +pub use libp2p_webtransport_websys as webtransport_websys; #[cfg(feature = "yamux")] #[doc(inline)] pub use libp2p_yamux as yamux; +mod builder; mod transport_ext; pub mod bandwidth; @@ -167,6 +149,7 @@ pub mod bandwidth; #[cfg(doc)] pub mod tutorials; +pub use self::builder::SwarmBuilder; pub use self::core::{ transport::TransportError, upgrade::{InboundUpgrade, OutboundUpgrade}, @@ -177,122 +160,4 @@ pub use self::swarm::Swarm; pub use self::transport_ext::TransportExt; pub use libp2p_identity as identity; pub use libp2p_identity::PeerId; -pub use libp2p_swarm::StreamProtocol; - -/// Builds a `Transport` based on TCP/IP that supports the most commonly-used features of libp2p: -/// -/// * DNS resolution. -/// * Noise protocol encryption. -/// * Websockets. -/// * Both Yamux and Mplex for substream multiplexing. -/// -/// All async I/O of the transport is based on `async-std`. -/// -/// > **Note**: This `Transport` is not suitable for production usage, as its implementation -/// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all( - not(target_arch = "wasm32"), - any( - all(feature = "tcp-async-io", feature = "dns-async-std"), - all(feature = "tcp", feature = "dns", feature = "async-std") - ), - feature = "websocket", - feature = "noise", - feature = "mplex", - feature = "yamux" -))] -#[cfg_attr( - all( - any(feature = "tcp-async-io", feature = "dns-async-std"), - not(feature = "async-std") - ), - deprecated( - since = "0.49.0", - note = "The `tcp-async-io` and `dns-async-std` features are deprecated. Use the new `tcp` and `dns` features together with the `async-std` feature." - ) -)] -pub async fn development_transport( - keypair: identity::Keypair, -) -> std::io::Result> { - let transport = { - let dns_tcp = dns::DnsConfig::system(tcp::async_io::Transport::new( - tcp::Config::new().nodelay(true), - )) - .await?; - let ws_dns_tcp = websocket::WsConfig::new( - dns::DnsConfig::system(tcp::async_io::Transport::new( - tcp::Config::new().nodelay(true), - )) - .await?, - ); - dns_tcp.or_transport(ws_dns_tcp) - }; - - Ok(transport - .upgrade(core::upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux::Config::default(), - #[allow(deprecated)] - mplex::MplexConfig::default(), - )) - .timeout(std::time::Duration::from_secs(20)) - .boxed()) -} - -/// Builds a `Transport` based on TCP/IP that supports the most commonly-used features of libp2p: -/// -/// * DNS resolution. -/// * Noise protocol encryption. -/// * Websockets. -/// * Both Yamux and Mplex for substream multiplexing. -/// -/// All async I/O of the transport is based on `tokio`. -/// -/// > **Note**: This `Transport` is not suitable for production usage, as its implementation -/// > reserves the right to support additional protocols or remove deprecated protocols. -#[cfg(all( - not(target_arch = "wasm32"), - any( - all(feature = "tcp-tokio", feature = "dns-tokio"), - all(feature = "tcp", feature = "dns", feature = "tokio") - ), - feature = "websocket", - feature = "noise", - feature = "mplex", - feature = "yamux" -))] -#[cfg_attr( - all( - any(feature = "tcp-tokio", feature = "dns-tokio"), - not(feature = "tokio") - ), - deprecated( - since = "0.49.0", - note = "The `tcp-tokio` and `dns-tokio` features are deprecated. Use the new `tcp` and `dns` feature together with the `tokio` feature." - ) -)] -pub fn tokio_development_transport( - keypair: identity::Keypair, -) -> std::io::Result> { - let transport = { - let dns_tcp = dns::TokioDnsConfig::system(tcp::tokio::Transport::new( - tcp::Config::new().nodelay(true), - ))?; - let ws_dns_tcp = websocket::WsConfig::new(dns::TokioDnsConfig::system( - tcp::tokio::Transport::new(tcp::Config::new().nodelay(true)), - )?); - dns_tcp.or_transport(ws_dns_tcp) - }; - - Ok(transport - .upgrade(core::upgrade::Version::V1) - .authenticate(noise::Config::new(&keypair).unwrap()) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux::Config::default(), - #[allow(deprecated)] - mplex::MplexConfig::default(), - )) - .timeout(std::time::Duration::from_secs(20)) - .boxed()) -} +pub use libp2p_swarm::{Stream, StreamProtocol}; diff --git a/libp2p/src/transport_ext.rs b/libp2p/src/transport_ext.rs index e66d7395a5c..4f07484fc1f 100644 --- a/libp2p/src/transport_ext.rs +++ b/libp2p/src/transport_ext.rs @@ -20,21 +20,20 @@ //! Provides the `TransportExt` trait. +#[allow(deprecated)] +use crate::bandwidth::{BandwidthLogging, BandwidthSinks}; use crate::core::{ muxing::{StreamMuxer, StreamMuxerBox}, transport::Boxed, }; -use crate::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - Transport, -}; +use crate::Transport; use libp2p_identity::PeerId; use std::sync::Arc; /// Trait automatically implemented on all objects that implement `Transport`. Provides some /// additional utilities. pub trait TransportExt: Transport { - /// Adds a layer on the `Transport` that logs all trafic that passes through the streams + /// Adds a layer on the `Transport` that logs all traffic that passes through the streams /// created by it. /// /// This method returns an `Arc` that can be used to retrieve the total number @@ -43,7 +42,7 @@ pub trait TransportExt: Transport { /// # Example /// /// ``` - /// use libp2p_mplex as mplex; + /// use libp2p_yamux as yamux; /// use libp2p_noise as noise; /// use libp2p_tcp as tcp; /// use libp2p::{ @@ -58,14 +57,18 @@ pub trait TransportExt: Transport { /// let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) /// .upgrade(upgrade::Version::V1) /// .authenticate( - /// noise::NoiseAuthenticated::xx(&id_keys) + /// noise::Config::new(&id_keys) /// .expect("Signing libp2p-noise static DH keypair failed."), /// ) - /// .multiplex(mplex::MplexConfig::new()) + /// .multiplex(yamux::Config::default()) /// .boxed(); /// /// let (transport, sinks) = transport.with_bandwidth_logging(); /// ``` + #[allow(deprecated)] + #[deprecated( + note = "Use `libp2p::SwarmBuilder::with_bandwidth_metrics` or `libp2p_metrics::BandwidthTransport` instead." + )] fn with_bandwidth_logging(self) -> (Boxed<(PeerId, StreamMuxerBox)>, Arc) where Self: Sized + Send + Unpin + 'static, diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 8c898f7f4f4..f9f42432ba4 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -25,7 +25,7 @@ //! post](https://blog.ipfs.io/2022-01-20-libp2p-hole-punching/) to familiarize yourself with libp2p's hole //! punching mechanism on a conceptual level. //! -//! We will be using the [Circuit Relay v2](crate::relay::v2) and the [Direct Connection +//! We will be using the [Circuit Relay](crate::relay) and the [Direct Connection //! Upgrade through Relay (DCUtR)](crate::dcutr) protocol. //! //! You will need 3 machines for this tutorial: @@ -54,16 +54,16 @@ //! //! ``` bash //! ## Inside the rust-libp2p repository. -//! cargo build --example relay_v2 -p libp2p-relay +//! cargo build --bin relay-server-example //! ``` //! -//! You can find the binary at `target/debug/examples/relay_v2`. In case you built it locally, copy +//! You can find the binary at `target/debug/relay-server-example`. In case you built it locally, copy //! it to your server. //! //! On your server, start the relay server binary: //! //! ``` bash -//! ./relay_v2 --port 4001 --secret-key-seed 0 +//! ./relay-server-example --port 4001 --secret-key-seed 0 //! ``` //! //! Now let's make sure that the server is public, in other words let's make sure one can reach it @@ -122,16 +122,16 @@ //! //! ``` bash //! ## Inside the rust-libp2p repository. -//! cargo build --example client -p libp2p-dcutr +//! cargo build --bin dcutr-example //! ``` //! -//! You can find the binary at `target/debug/examples/client`. In case you built it locally, copy +//! You can find the binary at `target/debug/dcutr-example`. In case you built it locally, copy //! it to your listening client machine. //! //! On the listening client machine: //! //! ``` bash -//! RUST_LOG=info ./client --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN +//! RUST_LOG=info ./dcutr-example --secret-key-seed 1 --mode listen --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN //! //! [2022-05-11T10:38:52Z INFO client] Local peer id: PeerId("XXX") //! [2022-05-11T10:38:52Z INFO client] Listening on "/ip4/127.0.0.1/tcp/44703" @@ -153,7 +153,7 @@ //! ## Connecting to the listening client from the dialing client //! //! ``` bash -//! RUST_LOG=info ./client --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X +//! RUST_LOG=info ./dcutr-example --secret-key-seed 2 --mode dial --relay-address /ip4/$RELAY_SERVER_IP/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN --remote-peer-id 12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X //! ``` //! //! You should see the following logs appear: @@ -166,18 +166,9 @@ //! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } //! ``` //! -//! 2. The listening client initiating a direct connection upgrade for the new relayed connection. -//! Reported by [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::RemoteInitiatedDirectConnectionUpgrade). +//! 2. The direct connection upgrade, also known as hole punch, succeeding. +//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] RemoteInitiatedDirectConnectionUpgrade { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), remote_relayed_addr: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" } -//! ``` -//! -//! 3. The direct connection upgrade, also known as hole punch, succeeding. Reported by -//! [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::DirectConnectionUpgradeSucceeded). -//! -//! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] DirectConnectionUpgradeSucceeded { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") } +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } //! ``` diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index 295e37e24b1..1413531cd72 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -55,9 +55,10 @@ //! edition = "2021" //! //! [dependencies] -//! libp2p = { version = "0.50", features = ["tcp", "dns", "async-std", "noise", "mplex", "yamux", "websocket", "ping", "macros"] } +//! libp2p = { version = "0.52", features = ["tcp", "tls", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } //! futures = "0.3.21" //! async-std = { version = "1.12.0", features = ["attributes"] } +//! tracing-subscriber = { version = "0.3", features = ["env-filter"] } //! ``` //! //! ## Network identity @@ -70,50 +71,44 @@ //! derived from their public key. Now, replace the contents of main.rs by: //! //! ```rust -//! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity(); //! //! Ok(()) //! } //! ``` //! -//! Go ahead and build and run the above code with: `cargo run`. A unique -//! [`PeerId`](crate::PeerId) should be displayed. +//! Go ahead and build and run the above code with: `cargo run`. Nothing happening thus far. //! //! ## Transport //! -//! Next up we need to construct a transport. A transport in libp2p provides -//! connection-oriented communication channels (e.g. TCP) as well as upgrades -//! on top of those like authentication and encryption protocols. Technically, -//! a libp2p transport is anything that implements the [`Transport`] trait. -//! -//! Instead of constructing a transport ourselves for this tutorial, we use the -//! convenience function [`development_transport`](crate::development_transport) -//! that creates a TCP transport with [`noise`](crate::noise) for authenticated -//! encryption. -//! -//! Furthermore, [`development_transport`] builds a multiplexed transport, -//! whereby multiple logical substreams can coexist on the same underlying (TCP) -//! connection. For further details on substream multiplexing, take a look at -//! [`crate::core::muxing`] and [`yamux`](crate::yamux). +//! Next up we need to construct a transport. Each transport in libp2p provides encrypted streams. +//! E.g. combining TCP to establish connections, TLS to encrypt these connections and Yamux to run +//! one or more streams on a connection. Another libp2p transport is QUIC, providing encrypted +//! streams out-of-the-box. We will stick to TCP for now. Each of these implement the [`Transport`] +//! trait. //! //! ```rust -//! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! -//! let transport = libp2p::development_transport(local_key).await?; +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )?; //! //! Ok(()) //! } @@ -124,89 +119,107 @@ //! Now it is time to look at another core trait of rust-libp2p: the //! [`NetworkBehaviour`]. While the previously introduced trait [`Transport`] //! defines _how_ to send bytes on the network, a [`NetworkBehaviour`] defines -//! _what_ bytes to send on the network. +//! _what_ bytes and to _whom_ to send on the network. //! //! To make this more concrete, let's take a look at a simple implementation of //! the [`NetworkBehaviour`] trait: the [`ping::Behaviour`](crate::ping::Behaviour). -//! As you might have guessed, similar to the good old `ping` network tool, +//! As you might have guessed, similar to the good old ICMP `ping` network tool, //! libp2p [`ping::Behaviour`](crate::ping::Behaviour) sends a ping to a peer and expects //! to receive a pong in turn. The [`ping::Behaviour`](crate::ping::Behaviour) does not care _how_ //! the ping and pong messages are sent on the network, whether they are sent via //! TCP, whether they are encrypted via [noise](crate::noise) or just in -//! [plaintext](crate::plaintext). It only cares about _what_ messages are sent -//! on the network. +//! [plaintext](crate::plaintext). It only cares about _what_ messages and to _whom_ to sent on the +//! network. //! //! The two traits [`Transport`] and [`NetworkBehaviour`] allow us to cleanly -//! separate _how_ to send bytes from _what_ bytes to send. +//! separate _how_ to send bytes from _what_ bytes and to _whom_ to send. //! //! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour}; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; +//! use tracing_subscriber::EnvFilter; //! use std::error::Error; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); -//! -//! let transport = libp2p::development_transport(local_key).await?; +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! -//! let behaviour = Behaviour::default(); +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())?; //! //! Ok(()) //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Swarm //! -//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we need -//! something that connects the two, allowing both to make progress. This job is -//! carried out by a [`Swarm`]. Put simply, a [`Swarm`] drives both a -//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the -//! [`NetworkBehaviour`] to the [`Transport`] as well as events from the -//! [`Transport`] to the [`NetworkBehaviour`]. +//! Now that we have a [`Transport`] and a [`NetworkBehaviour`], we can build the [`Swarm`] +//! which connects the two, allowing both to make progress. Put simply, a [`Swarm`] drives both a +//! [`Transport`] and a [`NetworkBehaviour`] forward, passing commands from the [`NetworkBehaviour`] +//! to the [`Transport`] as well as events from the [`Transport`] to the [`NetworkBehaviour`]. //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, Swarm}; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .build(); +//! +//! Ok(()) +//! } +//! ``` //! -//! let transport = libp2p::development_transport(local_key).await?; +//! ## Idle connection timeout //! -//! let behaviour = Behaviour::default(); +//! Now, for this example in particular, we need set the idle connection timeout. +//! Otherwise, the connection will be closed immediately. //! -//! let mut swarm = Swarm::with_async_std_executor(transport, behaviour, local_peer_id); +//! Whether you need to set this in your application too depends on your usecase. +//! Typically, connections are kept alive if they are "in use" by a certain protocol. +//! The ping protocol however is only an "auxiliary" kind of protocol. +//! Thus, without any other behaviour in place, we would not be able to observe the pings. //! -//! Ok(()) -//! } +//! ```rust +//! use libp2p::ping; +//! use std::error::Error; +//! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour:: -//! /// KeepAlive) behaviour so a continuous sequence of pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, +//! #[async_std::main] +//! async fn main() -> Result<(), Box> { +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. +//! .build(); +//! +//! Ok(()) //! } //! ``` //! @@ -237,21 +250,25 @@ //! remote peer. //! //! ```rust -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, Swarm}; -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; +//! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = Behaviour::default(); -//! -//! let mut swarm = Swarm::with_async_std_executor(transport, behaviour, local_peer_id); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. +//! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // port. @@ -267,16 +284,6 @@ //! //! Ok(()) //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Continuously polling the Swarm @@ -287,21 +294,26 @@ //! //! ```no_run //! use futures::prelude::*; -//! use libp2p::swarm::{keep_alive, NetworkBehaviour, Swarm, SwarmEvent}; -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::swarm::SwarmEvent; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; +//! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! let local_key = identity::Keypair::generate_ed25519(); -//! let local_peer_id = PeerId::from(local_key.public()); -//! println!("Local peer id: {local_peer_id:?}"); -//! -//! let transport = libp2p::development_transport(local_key).await?; -//! -//! let behaviour = Behaviour::default(); -//! -//! let mut swarm = Swarm::with_async_std_executor(transport, behaviour, local_peer_id); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! +//! let mut swarm = libp2p::SwarmBuilder::with_new_identity() +//! .with_async_std() +//! .with_tcp( +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, +//! )? +//! .with_behaviour(|_| ping::Behaviour::default())? +//! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. +//! .build(); //! //! // Tell the swarm to listen on all interfaces and a random, OS-assigned //! // port. @@ -323,16 +335,6 @@ //! } //! } //! } -//! -//! /// Our network behaviour. -//! /// -//! /// For illustrative purposes, this includes the [`KeepAlive`](behaviour::KeepAlive) behaviour so a continuous sequence of -//! /// pings can be observed. -//! #[derive(NetworkBehaviour, Default)] -//! struct Behaviour { -//! keep_alive: keep_alive::Behaviour, -//! ping: ping::Behaviour, -//! } //! ``` //! //! ## Running two nodes @@ -349,9 +351,8 @@ //! cargo run --example ping //! ``` //! -//! It will print the PeerId and the new listening addresses, e.g. +//! It will print the new listening addresses, e.g. //! ```sh -//! Local peer id: PeerId("12D3KooWT1As4mwh3KYBnNTw9bSrRbYQGJTm9SSte82JSumqgCQG") //! Listening on "/ip4/127.0.0.1/tcp/24915" //! Listening on "/ip4/192.168.178.25/tcp/24915" //! Listening on "/ip4/172.17.0.1/tcp/24915" @@ -378,4 +379,3 @@ //! [`Transport`]: crate::core::Transport //! [`PeerId`]: crate::core::PeerId //! [`Swarm`]: crate::swarm::Swarm -//! [`development_transport`]: crate::development_transport diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md index 72eea3460df..7778e924886 100644 --- a/misc/allow-block-list/CHANGELOG.md +++ b/misc/allow-block-list/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.2.0 - unreleased +## 0.3.0 + + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/allow-block-list/Cargo.toml b/misc/allow-block-list/Cargo.toml index 897087e3b0b..c620e7f4a2b 100644 --- a/misc/allow-block-list/Cargo.toml +++ b/misc/allow-block-list/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-allow-block-list" edition = "2021" rust-version = { workspace = true } description = "Allow/block list connection management for libp2p." -version = "0.2.0" +version = "0.3.0" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -17,5 +17,8 @@ void = "1" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -libp2p-swarm-derive = { workspace = true } -libp2p-swarm-test = { workspace = true } +libp2p-swarm-derive = { path = "../../swarm-derive" } +libp2p-swarm-test = { path = "../../swarm-test" } + +[lints] +workspace = true diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index d501ab73324..c1d31433db1 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -64,8 +64,8 @@ use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, CloseConnection, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashSet, VecDeque}; use std::fmt; @@ -191,7 +191,7 @@ where S: Enforce, { type ConnectionHandler = dummy::ConnectionHandler; - type OutEvent = Void; + type ToSwarm = Void; fn handle_established_inbound_connection( &mut self, @@ -231,22 +231,7 @@ where Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddr(_) => {} - FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -260,8 +245,7 @@ where fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(peer) = self.close_connections.pop_front() { return Poll::Ready(ToSwarm::CloseConnection { peer_id: peer, @@ -282,14 +266,11 @@ mod tests { #[async_std::test] async fn cannot_dial_blocked_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; - dialer - .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { panic!("unexpected dial error") @@ -299,17 +280,13 @@ mod tests { #[async_std::test] async fn can_dial_unblocked_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); dialer .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); - dialer - .behaviour_mut() - .list .unblock_peer(*listener.local_peer_id()); dial(&mut dialer, &listener).unwrap(); @@ -317,14 +294,11 @@ mod tests { #[async_std::test] async fn blocked_peer_cannot_dial_us() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; - listener - .behaviour_mut() - .list - .block_peer(*dialer.local_peer_id()); + listener.behaviour_mut().block_peer(*dialer.local_peer_id()); dial(&mut dialer, &listener).unwrap(); async_std::task::spawn(dialer.loop_on_next()); @@ -342,20 +316,24 @@ mod tests { #[async_std::test] async fn connections_get_closed_upon_blocked() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; dialer.connect(&mut listener).await; - dialer - .behaviour_mut() - .list - .block_peer(*listener.local_peer_id()); + dialer.behaviour_mut().block_peer(*listener.local_peer_id()); let ( - [SwarmEvent::ConnectionClosed { peer_id: closed_dialer_peer, .. }], - [SwarmEvent::ConnectionClosed { peer_id: closed_listener_peer, .. }] - ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await else { + [SwarmEvent::ConnectionClosed { + peer_id: closed_dialer_peer, + .. + }], + [SwarmEvent::ConnectionClosed { + peer_id: closed_listener_peer, + .. + }], + ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await + else { panic!("unexpected events") }; assert_eq!(closed_dialer_peer, *listener.local_peer_id()); @@ -364,35 +342,28 @@ mod tests { #[async_std::test] async fn cannot_dial_peer_unless_allowed() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { panic!("unexpected dial error") }; assert!(cause.downcast::().is_ok()); - dialer - .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); assert!(dial(&mut dialer, &listener).is_ok()); } #[async_std::test] async fn cannot_dial_disallowed_peer() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); dialer .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); - dialer - .behaviour_mut() - .list .disallow_peer(*listener.local_peer_id()); let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { @@ -403,28 +374,35 @@ mod tests { #[async_std::test] async fn not_allowed_peer_cannot_dial_us() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; dialer .dial( DialOpts::unknown_peer_id() - .address( - listener - .external_addresses() - .map(|a| a.addr.clone()) - .next() - .unwrap(), - ) + .address(listener.external_addresses().next().cloned().unwrap()) .build(), ) .unwrap(); let ( - [SwarmEvent::OutgoingConnectionError { error: DialError::Denied { cause: outgoing_cause }, .. }], - [_, _, _, SwarmEvent::IncomingConnectionError { error: ListenError::Denied { cause: incoming_cause }, .. }], - ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await else { + [SwarmEvent::OutgoingConnectionError { + error: + DialError::Denied { + cause: outgoing_cause, + }, + .. + }], + [_, SwarmEvent::IncomingConnectionError { + error: + ListenError::Denied { + cause: incoming_cause, + }, + .. + }], + ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await + else { panic!("unexpected events") }; assert!(outgoing_cause.downcast::().is_ok()); @@ -433,28 +411,28 @@ mod tests { #[async_std::test] async fn connections_get_closed_upon_disallow() { - let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::new()); - let mut listener = Swarm::new_ephemeral(|_| Behaviour::::new()); - listener.listen().await; - dialer - .behaviour_mut() - .list - .allow_peer(*listener.local_peer_id()); - listener - .behaviour_mut() - .list - .allow_peer(*dialer.local_peer_id()); + let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); + let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); + listener.listen().with_memory_addr_external().await; + dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); + listener.behaviour_mut().allow_peer(*dialer.local_peer_id()); dialer.connect(&mut listener).await; dialer .behaviour_mut() - .list .disallow_peer(*listener.local_peer_id()); let ( - [SwarmEvent::ConnectionClosed { peer_id: closed_dialer_peer, .. }], - [SwarmEvent::ConnectionClosed { peer_id: closed_listener_peer, .. }] - ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await else { + [SwarmEvent::ConnectionClosed { + peer_id: closed_dialer_peer, + .. + }], + [SwarmEvent::ConnectionClosed { + peer_id: closed_listener_peer, + .. + }], + ) = libp2p_swarm_test::drive(&mut dialer, &mut listener).await + else { panic!("unexpected events") }; assert_eq!(closed_dialer_peer, *listener.local_peer_id()); @@ -470,36 +448,8 @@ mod tests { { dialer.dial( DialOpts::peer_id(*listener.local_peer_id()) - .addresses( - listener - .external_addresses() - .map(|a| a.addr.clone()) - .collect(), - ) + .addresses(listener.external_addresses().cloned().collect()) .build(), ) } - - #[derive(libp2p_swarm_derive::NetworkBehaviour)] - #[behaviour(prelude = "libp2p_swarm::derive_prelude")] - struct Behaviour { - list: super::Behaviour, - keep_alive: libp2p_swarm::keep_alive::Behaviour, - } - - impl Behaviour - where - S: Default, - { - fn new() -> Self { - Self { - list: super::Behaviour { - waker: None, - close_connections: VecDeque::new(), - state: S::default(), - }, - keep_alive: libp2p_swarm::keep_alive::Behaviour, - } - } - } } diff --git a/misc/connection-limits/CHANGELOG.md b/misc/connection-limits/CHANGELOG.md index b3a1028e768..4654281a83e 100644 --- a/misc/connection-limits/CHANGELOG.md +++ b/misc/connection-limits/CHANGELOG.md @@ -1,4 +1,26 @@ -## 0.2.0 - unreleased +## 0.3.1 + +- Add function to mutate `ConnectionLimits`. + See [PR 4964](https://github.com/libp2p/rust-libp2p/pull/4964). + +## 0.3.0 + + +## 0.2.1 + +- Do not count a connection as established when it is denied by another sibling `NetworkBehaviour`. + In other words, do not increase established connection counter in `handle_established_outbound_connection` or `handle_established_inbound_connection`, but in `FromSwarm::ConnectionEstablished` instead. + + See [PR 4250]. + +- Decrease `pending_inbound_connections` on `FromSwarm::ListenFailure` and `pending_outbound_connections` on `FromSwarm::DialFailure`. + + See [PR 4250]. + +[PR 4250]: https://github.com/libp2p/rust-libp2p/pull/4250 + +## 0.2.0 + - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/connection-limits/Cargo.toml b/misc/connection-limits/Cargo.toml index dfc5bdad5ab..8ecb0005cb1 100644 --- a/misc/connection-limits/Cargo.toml +++ b/misc/connection-limits/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-connection-limits" edition = "2021" rust-version = { workspace = true } description = "Connection limits for libp2p." -version = "0.2.0" +version = "0.3.1" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] @@ -19,7 +19,10 @@ void = "1" async-std = { version = "1.12.0", features = ["attributes"] } libp2p-identify = { workspace = true } libp2p-ping = { workspace = true } -libp2p-swarm-derive = { workspace = true } -libp2p-swarm-test = { workspace = true } +libp2p-swarm-derive = { path = "../../swarm-derive" } +libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } rand = "0.8.5" + +[lints] +workspace = true diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index b781e83d92d..dbe68a8ad11 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -18,11 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{ConnectionEstablished, DialFailure, ListenFailure}, + dummy, ConnectionClosed, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -36,7 +37,7 @@ use void::Void; /// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) /// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. /// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant -/// contain a [`ConnectionDenied`](libp2p_swarm::ConnectionDenied) type that can be downcast to [`Exceeded`] error if (and only if) **this** +/// contain a [`ConnectionDenied`] type that can be downcast to [`Exceeded`] error if (and only if) **this** /// behaviour denied the connection. /// /// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. @@ -79,21 +80,22 @@ impl Behaviour { } } - fn check_limit( - &mut self, - limit: Option, - current: usize, - kind: Kind, - ) -> Result<(), ConnectionDenied> { - let limit = limit.unwrap_or(u32::MAX); - let current = current as u32; + /// Returns a mutable reference to [`ConnectionLimits`]. + /// > **Note**: A new limit will not be enforced against existing connections. + pub fn limits_mut(&mut self) -> &mut ConnectionLimits { + &mut self.limits + } +} - if current >= limit { - return Err(ConnectionDenied::new(Exceeded { limit, kind })); - } +fn check_limit(limit: Option, current: usize, kind: Kind) -> Result<(), ConnectionDenied> { + let limit = limit.unwrap_or(u32::MAX); + let current = current as u32; - Ok(()) + if current >= limit { + return Err(ConnectionDenied::new(Exceeded { limit, kind })); } + + Ok(()) } /// A connection limit has been exceeded. @@ -201,7 +203,7 @@ impl ConnectionLimits { impl NetworkBehaviour for Behaviour { type ConnectionHandler = dummy::ConnectionHandler; - type OutEvent = Void; + type ToSwarm = Void; fn handle_pending_inbound_connection( &mut self, @@ -209,7 +211,7 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, _: &Multiaddr, ) -> Result<(), ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_incoming, self.pending_inbound_connections.len(), Kind::PendingIncoming, @@ -229,12 +231,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_inbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_incoming, self.established_inbound_connections.len(), Kind::EstablishedIncoming, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -242,19 +244,13 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), Kind::EstablishedTotal, )?; - self.established_inbound_connections.insert(connection_id); - self.established_per_peer - .entry(peer) - .or_default() - .insert(connection_id); - Ok(dummy::ConnectionHandler) } @@ -265,7 +261,7 @@ impl NetworkBehaviour for Behaviour { _: &[Multiaddr], _: Endpoint, ) -> Result, ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_outgoing, self.pending_outbound_connections.len(), Kind::PendingOutgoing, @@ -285,12 +281,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_outbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_outgoing, self.established_outbound_connections.len(), Kind::EstablishedOutgoing, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -298,23 +294,17 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), Kind::EstablishedTotal, )?; - self.established_outbound_connections.insert(connection_id); - self.established_per_peer - .entry(peer) - .or_default() - .insert(connection_id); - Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionClosed(ConnectionClosed { peer_id, @@ -328,17 +318,33 @@ impl NetworkBehaviour for Behaviour { .or_default() .remove(&connection_id); } - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddr(_) => {} - FromSwarm::ExpiredExternalAddr(_) => {} + FromSwarm::ConnectionEstablished(ConnectionEstablished { + peer_id, + endpoint, + connection_id, + .. + }) => { + match endpoint { + ConnectedPoint::Listener { .. } => { + self.established_inbound_connections.insert(connection_id); + } + ConnectedPoint::Dialer { .. } => { + self.established_outbound_connections.insert(connection_id); + } + } + + self.established_per_peer + .entry(peer_id) + .or_default() + .insert(connection_id); + } + FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { + self.pending_outbound_connections.remove(&connection_id); + } + FromSwarm::ListenFailure(ListenFailure { connection_id, .. }) => { + self.pending_inbound_connections.remove(&connection_id); + } + _ => {} } } @@ -351,11 +357,7 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } } @@ -363,7 +365,10 @@ impl NetworkBehaviour for Behaviour { #[cfg(test)] mod tests { use super::*; - use libp2p_swarm::{dial_opts::DialOpts, DialError, ListenError, Swarm, SwarmEvent}; + use libp2p_swarm::{ + behaviour::toggle::Toggle, dial_opts::DialOpts, dial_opts::PeerCondition, DialError, + ListenError, Swarm, SwarmEvent, + }; use libp2p_swarm_test::SwarmExt; use quickcheck::*; @@ -386,6 +391,8 @@ mod tests { network .dial( DialOpts::peer_id(target) + // Dial always, even if already dialing or connected. + .condition(PeerCondition::Always) .addresses(vec![addr.clone()]) .build(), ) @@ -393,7 +400,12 @@ mod tests { } match network - .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) + .dial( + DialOpts::peer_id(target) + .condition(PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) .expect_err("Unexpected dialing success.") { DialError::Denied { cause } => { @@ -429,7 +441,7 @@ mod tests { }); async_std::task::block_on(async { - let (listen_addr, _) = swarm1.listen().await; + let (listen_addr, _) = swarm1.listen().with_memory_addr_external().await; for _ in 0..limit { swarm2.connect(&mut swarm1).await; @@ -465,19 +477,121 @@ mod tests { quickcheck(prop as fn(_)); } + /// Another sibling [`NetworkBehaviour`] implementation might deny established connections in + /// [`handle_established_outbound_connection`] or [`handle_established_inbound_connection`]. + /// [`Behaviour`] must not increase the established counters in + /// [`handle_established_outbound_connection`] or [`handle_established_inbound_connection`], but + /// in [`SwarmEvent::ConnectionEstablished`] as the connection might still be denied by a + /// sibling [`NetworkBehaviour`] in the former case. Only in the latter case + /// ([`SwarmEvent::ConnectionEstablished`]) can the connection be seen as established. + #[test] + fn support_other_behaviour_denying_connection() { + let mut swarm1 = Swarm::new_ephemeral(|_| { + Behaviour::new_with_connection_denier(ConnectionLimits::default()) + }); + let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(ConnectionLimits::default())); + + async_std::task::block_on(async { + // Have swarm2 dial swarm1. + let (listen_addr, _) = swarm1.listen().await; + swarm2.dial(listen_addr).unwrap(); + async_std::task::spawn(swarm2.loop_on_next()); + + // Wait for the ConnectionDenier of swarm1 to deny the established connection. + let cause = swarm1 + .wait(|event| match event { + SwarmEvent::IncomingConnectionError { + error: ListenError::Denied { cause }, + .. + } => Some(cause), + _ => None, + }) + .await; + + cause.downcast::().unwrap(); + + assert_eq!( + 0, + swarm1 + .behaviour_mut() + .limits + .established_inbound_connections + .len(), + "swarm1 connection limit behaviour to not count denied established connection as established connection" + ) + }); + } + #[derive(libp2p_swarm_derive::NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Behaviour { limits: super::Behaviour, - keep_alive: libp2p_swarm::keep_alive::Behaviour, + connection_denier: Toggle, } impl Behaviour { fn new(limits: ConnectionLimits) -> Self { Self { limits: super::Behaviour::new(limits), - keep_alive: libp2p_swarm::keep_alive::Behaviour, + connection_denier: None.into(), + } + } + fn new_with_connection_denier(limits: ConnectionLimits) -> Self { + Self { + limits: super::Behaviour::new(limits), + connection_denier: Some(ConnectionDenier {}).into(), } } } + + struct ConnectionDenier {} + + impl NetworkBehaviour for ConnectionDenier { + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = Void; + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, ConnectionDenied> { + Err(ConnectionDenied::new(std::io::Error::new( + std::io::ErrorKind::Other, + "ConnectionDenier", + ))) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: Endpoint, + ) -> Result, ConnectionDenied> { + Err(ConnectionDenied::new(std::io::Error::new( + std::io::ErrorKind::Other, + "ConnectionDenier", + ))) + } + + fn on_swarm_event(&mut self, _event: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + void::unreachable(event) + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll>> { + Poll::Pending + } + } } diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md new file mode 100644 index 00000000000..72b0b4f457d --- /dev/null +++ b/misc/futures-bounded/CHANGELOG.md @@ -0,0 +1,23 @@ +## 0.2.3 + +- Introduce `FuturesTupleSet`, holding tuples of a `Future` together with an arbitrary piece of data. + See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). + +## 0.2.2 + +- Fix an issue where `{Futures,Stream}Map` returns `Poll::Pending` despite being ready after an item has been replaced as part of `try_push`. + See [PR 4865](https://github.com/libp2p/rust-libp2p/pull/4865). + +## 0.2.1 + +- Add `.len()` getter to `FuturesMap`, `FuturesSet`, `StreamMap` and `StreamSet`. + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). + +## 0.2.0 + +- Add `StreamMap` type and remove `Future`-suffix from `PushError::ReplacedFuture` to reuse it for `StreamMap`. + See [PR 4616](https://github.com/libp2p/rust-libp2p/pull/4616). + +## 0.1.0 + +Initial release. diff --git a/misc/futures-bounded/Cargo.toml b/misc/futures-bounded/Cargo.toml new file mode 100644 index 00000000000..3262f90821b --- /dev/null +++ b/misc/futures-bounded/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "futures-bounded" +version = "0.2.3" +edition = "2021" +rust-version.workspace = true +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["futures", "async", "backpressure"] +categories = ["data-structures", "asynchronous"] +description = "Utilities for bounding futures in size and time." +publish = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +futures-util = { version = "0.3.30" } +futures-timer = "3.0.2" + +[dev-dependencies] +tokio = { version = "1.35.1", features = ["macros", "rt", "sync"] } +futures = "0.3.30" + +[lints] +workspace = true diff --git a/misc/futures-bounded/src/futures_map.rs b/misc/futures-bounded/src/futures_map.rs new file mode 100644 index 00000000000..fba3543f67b --- /dev/null +++ b/misc/futures-bounded/src/futures_map.rs @@ -0,0 +1,319 @@ +use std::future::Future; +use std::hash::Hash; +use std::pin::Pin; +use std::task::{Context, Poll, Waker}; +use std::time::Duration; +use std::{future, mem}; + +use futures_timer::Delay; +use futures_util::future::BoxFuture; +use futures_util::stream::FuturesUnordered; +use futures_util::{FutureExt, StreamExt}; + +use crate::{PushError, Timeout}; + +/// Represents a map of [`Future`]s. +/// +/// Each future must finish within the specified time and the map never outgrows its capacity. +pub struct FuturesMap { + timeout: Duration, + capacity: usize, + inner: FuturesUnordered>>>, + empty_waker: Option, + full_waker: Option, +} + +impl FuturesMap { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + timeout, + capacity, + inner: Default::default(), + empty_waker: None, + full_waker: None, + } + } +} + +impl FuturesMap +where + ID: Clone + Hash + Eq + Send + Unpin + 'static, + O: 'static, +{ + /// Push a future into the map. + /// + /// This method inserts the given future with defined `future_id` to the set. + /// If the length of the map is equal to the capacity, this method returns [PushError::BeyondCapacity], + /// that contains the passed future. In that case, the future is not inserted to the map. + /// If a future with the given `future_id` already exists, then the old future will be replaced by a new one. + /// In that case, the returned error [PushError::Replaced] contains the old future. + pub fn try_push(&mut self, future_id: ID, future: F) -> Result<(), PushError>> + where + F: Future + Send + 'static, + { + if self.inner.len() >= self.capacity { + return Err(PushError::BeyondCapacity(future.boxed())); + } + + if let Some(waker) = self.empty_waker.take() { + waker.wake(); + } + + let old = self.remove(future_id.clone()); + self.inner.push(TaggedFuture { + tag: future_id, + inner: TimeoutFuture { + inner: future.boxed(), + timeout: Delay::new(self.timeout), + cancelled: false, + }, + }); + match old { + None => Ok(()), + Some(old) => Err(PushError::Replaced(old)), + } + } + + pub fn remove(&mut self, id: ID) -> Option> { + let tagged = self.inner.iter_mut().find(|s| s.tag == id)?; + + let inner = mem::replace(&mut tagged.inner.inner, future::pending().boxed()); + tagged.inner.cancelled = true; + + Some(inner) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // &mut Context is idiomatic. + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + if self.inner.len() < self.capacity { + return Poll::Ready(()); + } + + self.full_waker = Some(cx.waker().clone()); + + Poll::Pending + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(ID, Result)> { + loop { + let maybe_result = futures_util::ready!(self.inner.poll_next_unpin(cx)); + + match maybe_result { + None => { + self.empty_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + Some((id, Ok(output))) => return Poll::Ready((id, Ok(output))), + Some((id, Err(TimeoutError::Timeout))) => { + return Poll::Ready((id, Err(Timeout::new(self.timeout)))) + } + Some((_, Err(TimeoutError::Cancelled))) => continue, + } + } + } +} + +struct TimeoutFuture { + inner: F, + timeout: Delay, + + cancelled: bool, +} + +impl Future for TimeoutFuture +where + F: Future + Unpin, +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.cancelled { + return Poll::Ready(Err(TimeoutError::Cancelled)); + } + + if self.timeout.poll_unpin(cx).is_ready() { + return Poll::Ready(Err(TimeoutError::Timeout)); + } + + self.inner.poll_unpin(cx).map(Ok) + } +} + +enum TimeoutError { + Timeout, + Cancelled, +} + +struct TaggedFuture { + tag: T, + inner: F, +} + +impl Future for TaggedFuture +where + T: Clone + Unpin, + F: Future + Unpin, +{ + type Output = (T, F::Output); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let output = futures_util::ready!(self.inner.poll_unpin(cx)); + + Poll::Ready((self.tag.clone(), output)) + } +} + +#[cfg(test)] +mod tests { + use futures::channel::oneshot; + use futures_util::task::noop_waker_ref; + use std::future::{pending, poll_fn, ready}; + use std::pin::Pin; + use std::time::Instant; + + use super::*; + + #[test] + fn cannot_push_more_than_capacity_tasks() { + let mut futures = FuturesMap::new(Duration::from_secs(10), 1); + + assert!(futures.try_push("ID_1", ready(())).is_ok()); + matches!( + futures.try_push("ID_2", ready(())), + Err(PushError::BeyondCapacity(_)) + ); + } + + #[test] + fn cannot_push_the_same_id_few_times() { + let mut futures = FuturesMap::new(Duration::from_secs(10), 5); + + assert!(futures.try_push("ID", ready(())).is_ok()); + matches!( + futures.try_push("ID", ready(())), + Err(PushError::Replaced(_)) + ); + } + + #[tokio::test] + async fn futures_timeout() { + let mut futures = FuturesMap::new(Duration::from_millis(100), 1); + + let _ = futures.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + let (_, result) = poll_fn(|cx| futures.poll_unpin(cx)).await; + + assert!(result.is_err()) + } + + #[test] + fn resources_of_removed_future_are_cleaned_up() { + let mut futures = FuturesMap::new(Duration::from_millis(100), 1); + + let _ = futures.try_push("ID", pending::<()>()); + futures.remove("ID"); + + let poll = futures.poll_unpin(&mut Context::from_waker(noop_waker_ref())); + assert!(poll.is_pending()); + + assert_eq!(futures.len(), 0); + } + + #[tokio::test] + async fn replaced_pending_future_is_polled() { + let mut streams = FuturesMap::new(Duration::from_millis(100), 3); + + let (_tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + + let _ = streams.try_push("ID1", rx1); + let _ = streams.try_push("ID2", rx2); + + let _ = tx2.send(2); + let (id, res) = poll_fn(|cx| streams.poll_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 2); + + let (new_tx1, new_rx1) = oneshot::channel(); + let replaced = streams.try_push("ID1", new_rx1); + assert!(matches!(replaced.unwrap_err(), PushError::Replaced(_))); + + let _ = new_tx1.send(4); + let (id, res) = poll_fn(|cx| streams.poll_unpin(cx)).await; + + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 4); + } + + // Each future causes a delay, `Task` only has a capacity of 1, meaning they must be processed in sequence. + // We stop after NUM_FUTURES tasks, meaning the overall execution must at least take DELAY * NUM_FUTURES. + #[tokio::test] + async fn backpressure() { + const DELAY: Duration = Duration::from_millis(100); + const NUM_FUTURES: u32 = 10; + + let start = Instant::now(); + Task::new(DELAY, NUM_FUTURES, 1).await; + let duration = start.elapsed(); + + assert!(duration >= DELAY * NUM_FUTURES); + } + + struct Task { + future: Duration, + num_futures: usize, + num_processed: usize, + inner: FuturesMap, + } + + impl Task { + fn new(future: Duration, num_futures: u32, capacity: usize) -> Self { + Self { + future, + num_futures: num_futures as usize, + num_processed: 0, + inner: FuturesMap::new(Duration::from_secs(60), capacity), + } + } + } + + impl Future for Task { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while this.num_processed < this.num_futures { + if let Poll::Ready((_, result)) = this.inner.poll_unpin(cx) { + if result.is_err() { + panic!("Timeout is great than future delay") + } + + this.num_processed += 1; + continue; + } + + if let Poll::Ready(()) = this.inner.poll_ready_unpin(cx) { + // We push the constant future's ID to prove that user can use the same ID + // if the future was finished + let maybe_future = this.inner.try_push(1u8, Delay::new(this.future)); + assert!(maybe_future.is_ok(), "we polled for readiness"); + + continue; + } + + return Poll::Pending; + } + + Poll::Ready(()) + } + } +} diff --git a/misc/futures-bounded/src/futures_set.rs b/misc/futures-bounded/src/futures_set.rs new file mode 100644 index 00000000000..af7cedfcc85 --- /dev/null +++ b/misc/futures-bounded/src/futures_set.rs @@ -0,0 +1,65 @@ +use std::future::Future; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use futures_util::future::BoxFuture; + +use crate::{FuturesMap, PushError, Timeout}; + +/// Represents a list of [Future]s. +/// +/// Each future must finish within the specified time and the list never outgrows its capacity. +pub struct FuturesSet { + id: u32, + inner: FuturesMap, +} + +impl FuturesSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: FuturesMap::new(timeout, capacity), + } + } +} + +impl FuturesSet +where + O: 'static, +{ + /// Push a future into the list. + /// + /// This method adds the given future to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed future. + /// In that case, the future is not added to the set. + pub fn try_push(&mut self, future: F) -> Result<(), BoxFuture> + where + F: Future + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, future) { + Ok(()) => Ok(()), + Err(PushError::BeyondCapacity(w)) => Err(w), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), + } + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll> { + let (_, res) = ready!(self.inner.poll_unpin(cx)); + + Poll::Ready(res) + } +} diff --git a/misc/futures-bounded/src/futures_tuple_set.rs b/misc/futures-bounded/src/futures_tuple_set.rs new file mode 100644 index 00000000000..e19b236aaf8 --- /dev/null +++ b/misc/futures-bounded/src/futures_tuple_set.rs @@ -0,0 +1,94 @@ +use std::collections::HashMap; +use std::future::Future; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use futures_util::future::BoxFuture; + +use crate::{FuturesMap, PushError, Timeout}; + +/// Represents a list of tuples of a [Future] and an associated piece of data. +/// +/// Each future must finish within the specified time and the list never outgrows its capacity. +pub struct FuturesTupleSet { + id: u32, + inner: FuturesMap, + data: HashMap, +} + +impl FuturesTupleSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: FuturesMap::new(timeout, capacity), + data: HashMap::new(), + } + } +} + +impl FuturesTupleSet +where + O: 'static, +{ + /// Push a future into the list. + /// + /// This method adds the given future to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed future. + /// In that case, the future is not added to the set. + pub fn try_push(&mut self, future: F, data: D) -> Result<(), (BoxFuture, D)> + where + F: Future + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, future) { + Ok(()) => {} + Err(PushError::BeyondCapacity(w)) => return Err((w, data)), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), + } + self.data.insert(self.id, data); + + Ok(()) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_unpin(&mut self, cx: &mut Context<'_>) -> Poll<(Result, D)> { + let (id, res) = ready!(self.inner.poll_unpin(cx)); + let data = self.data.remove(&id).expect("must have data for future"); + + Poll::Ready((res, data)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_util::future::poll_fn; + use futures_util::FutureExt; + use std::future::ready; + + #[test] + fn tracks_associated_data_of_future() { + let mut set = FuturesTupleSet::new(Duration::from_secs(10), 10); + + let _ = set.try_push(ready(1), 1); + let _ = set.try_push(ready(2), 2); + + let (res1, data1) = poll_fn(|cx| set.poll_unpin(cx)).now_or_never().unwrap(); + let (res2, data2) = poll_fn(|cx| set.poll_unpin(cx)).now_or_never().unwrap(); + + assert_eq!(res1.unwrap(), data1); + assert_eq!(res2.unwrap(), data2); + } +} diff --git a/misc/futures-bounded/src/lib.rs b/misc/futures-bounded/src/lib.rs new file mode 100644 index 00000000000..da8483a595f --- /dev/null +++ b/misc/futures-bounded/src/lib.rs @@ -0,0 +1,46 @@ +mod futures_map; +mod futures_set; +mod futures_tuple_set; +mod stream_map; +mod stream_set; + +pub use futures_map::FuturesMap; +pub use futures_set::FuturesSet; +pub use futures_tuple_set::FuturesTupleSet; +pub use stream_map::StreamMap; +pub use stream_set::StreamSet; + +use std::fmt; +use std::fmt::Formatter; +use std::time::Duration; + +/// A future failed to complete within the given timeout. +#[derive(Debug)] +pub struct Timeout { + limit: Duration, +} + +impl Timeout { + fn new(duration: Duration) -> Self { + Self { limit: duration } + } +} + +impl fmt::Display for Timeout { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "future failed to complete within {:?}", self.limit) + } +} + +/// Error of a future pushing +#[derive(PartialEq, Debug)] +pub enum PushError { + /// The length of the set is equal to the capacity + BeyondCapacity(T), + /// The map already contained an item with this key. + /// + /// The old item is returned. + Replaced(T), +} + +impl std::error::Error for Timeout {} diff --git a/misc/futures-bounded/src/stream_map.rs b/misc/futures-bounded/src/stream_map.rs new file mode 100644 index 00000000000..8464f432d02 --- /dev/null +++ b/misc/futures-bounded/src/stream_map.rs @@ -0,0 +1,362 @@ +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll, Waker}; +use std::time::Duration; + +use futures_timer::Delay; +use futures_util::stream::{BoxStream, SelectAll}; +use futures_util::{stream, FutureExt, Stream, StreamExt}; + +use crate::{PushError, Timeout}; + +/// Represents a map of [`Stream`]s. +/// +/// Each stream must finish within the specified time and the map never outgrows its capacity. +pub struct StreamMap { + timeout: Duration, + capacity: usize, + inner: SelectAll>>>, + empty_waker: Option, + full_waker: Option, +} + +impl StreamMap +where + ID: Clone + Unpin, +{ + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + timeout, + capacity, + inner: Default::default(), + empty_waker: None, + full_waker: None, + } + } +} + +impl StreamMap +where + ID: Clone + PartialEq + Send + Unpin + 'static, + O: Send + 'static, +{ + /// Push a stream into the map. + pub fn try_push(&mut self, id: ID, stream: F) -> Result<(), PushError>> + where + F: Stream + Send + 'static, + { + if self.inner.len() >= self.capacity { + return Err(PushError::BeyondCapacity(stream.boxed())); + } + + if let Some(waker) = self.empty_waker.take() { + waker.wake(); + } + + let old = self.remove(id.clone()); + self.inner.push(TaggedStream::new( + id, + TimeoutStream { + inner: stream.boxed(), + timeout: Delay::new(self.timeout), + }, + )); + + match old { + None => Ok(()), + Some(old) => Err(PushError::Replaced(old)), + } + } + + pub fn remove(&mut self, id: ID) -> Option> { + let tagged = self.inner.iter_mut().find(|s| s.key == id)?; + + let inner = mem::replace(&mut tagged.inner.inner, stream::pending().boxed()); + tagged.exhausted = true; // Setting this will emit `None` on the next poll and ensure `SelectAll` cleans up the resources. + + Some(inner) + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // &mut Context is idiomatic. + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + if self.inner.len() < self.capacity { + return Poll::Ready(()); + } + + self.full_waker = Some(cx.waker().clone()); + + Poll::Pending + } + + pub fn poll_next_unpin( + &mut self, + cx: &mut Context<'_>, + ) -> Poll<(ID, Option>)> { + match futures_util::ready!(self.inner.poll_next_unpin(cx)) { + None => { + self.empty_waker = Some(cx.waker().clone()); + Poll::Pending + } + Some((id, Some(Ok(output)))) => Poll::Ready((id, Some(Ok(output)))), + Some((id, Some(Err(())))) => { + self.remove(id.clone()); // Remove stream, otherwise we keep reporting the timeout. + + Poll::Ready((id, Some(Err(Timeout::new(self.timeout))))) + } + Some((id, None)) => Poll::Ready((id, None)), + } + } +} + +struct TimeoutStream { + inner: S, + timeout: Delay, +} + +impl Stream for TimeoutStream +where + F: Stream + Unpin, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.timeout.poll_unpin(cx).is_ready() { + return Poll::Ready(Some(Err(()))); + } + + self.inner.poll_next_unpin(cx).map(|a| a.map(Ok)) + } +} + +struct TaggedStream { + key: K, + inner: S, + + exhausted: bool, +} + +impl TaggedStream { + fn new(key: K, inner: S) -> Self { + Self { + key, + inner, + exhausted: false, + } + } +} + +impl Stream for TaggedStream +where + K: Clone + Unpin, + S: Stream + Unpin, +{ + type Item = (K, Option); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.exhausted { + return Poll::Ready(None); + } + + match futures_util::ready!(self.inner.poll_next_unpin(cx)) { + Some(item) => Poll::Ready(Some((self.key.clone(), Some(item)))), + None => { + self.exhausted = true; + + Poll::Ready(Some((self.key.clone(), None))) + } + } + } +} + +#[cfg(test)] +mod tests { + use futures::channel::mpsc; + use futures_util::stream::{once, pending}; + use futures_util::SinkExt; + use std::future::{poll_fn, ready, Future}; + use std::pin::Pin; + use std::time::Instant; + + use super::*; + + #[test] + fn cannot_push_more_than_capacity_tasks() { + let mut streams = StreamMap::new(Duration::from_secs(10), 1); + + assert!(streams.try_push("ID_1", once(ready(()))).is_ok()); + matches!( + streams.try_push("ID_2", once(ready(()))), + Err(PushError::BeyondCapacity(_)) + ); + } + + #[test] + fn cannot_push_the_same_id_few_times() { + let mut streams = StreamMap::new(Duration::from_secs(10), 5); + + assert!(streams.try_push("ID", once(ready(()))).is_ok()); + matches!( + streams.try_push("ID", once(ready(()))), + Err(PushError::Replaced(_)) + ); + } + + #[tokio::test] + async fn streams_timeout() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + let (_, result) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + assert!(result.unwrap().is_err()) + } + + #[tokio::test] + async fn timed_out_stream_gets_removed() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", pending::<()>()); + Delay::new(Duration::from_millis(150)).await; + poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + let poll = streams.poll_next_unpin(&mut Context::from_waker( + futures_util::task::noop_waker_ref(), + )); + assert!(poll.is_pending()) + } + + #[test] + fn removing_stream() { + let mut streams = StreamMap::new(Duration::from_millis(100), 1); + + let _ = streams.try_push("ID", stream::once(ready(()))); + + { + let cancelled_stream = streams.remove("ID"); + assert!(cancelled_stream.is_some()); + } + + let poll = streams.poll_next_unpin(&mut Context::from_waker( + futures_util::task::noop_waker_ref(), + )); + + assert!(poll.is_pending()); + assert_eq!( + streams.len(), + 0, + "resources of cancelled streams are cleaned up properly" + ); + } + + #[tokio::test] + async fn replaced_stream_is_still_registered() { + let mut streams = StreamMap::new(Duration::from_millis(100), 3); + + let (mut tx1, rx1) = mpsc::channel(5); + let (mut tx2, rx2) = mpsc::channel(5); + + let _ = streams.try_push("ID1", rx1); + let _ = streams.try_push("ID2", rx2); + + let _ = tx2.send(2).await; + let _ = tx1.send(1).await; + let _ = tx2.send(3).await; + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 1); + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 2); + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + assert_eq!(id, "ID2"); + assert_eq!(res.unwrap().unwrap(), 3); + + let (mut new_tx1, new_rx1) = mpsc::channel(5); + let replaced = streams.try_push("ID1", new_rx1); + assert!(matches!(replaced.unwrap_err(), PushError::Replaced(_))); + + let _ = new_tx1.send(4).await; + let (id, res) = poll_fn(|cx| streams.poll_next_unpin(cx)).await; + + assert_eq!(id, "ID1"); + assert_eq!(res.unwrap().unwrap(), 4); + } + + // Each stream emits 1 item with delay, `Task` only has a capacity of 1, meaning they must be processed in sequence. + // We stop after NUM_STREAMS tasks, meaning the overall execution must at least take DELAY * NUM_STREAMS. + #[tokio::test] + async fn backpressure() { + const DELAY: Duration = Duration::from_millis(100); + const NUM_STREAMS: u32 = 10; + + let start = Instant::now(); + Task::new(DELAY, NUM_STREAMS, 1).await; + let duration = start.elapsed(); + + assert!(duration >= DELAY * NUM_STREAMS); + } + + struct Task { + item_delay: Duration, + num_streams: usize, + num_processed: usize, + inner: StreamMap, + } + + impl Task { + fn new(item_delay: Duration, num_streams: u32, capacity: usize) -> Self { + Self { + item_delay, + num_streams: num_streams as usize, + num_processed: 0, + inner: StreamMap::new(Duration::from_secs(60), capacity), + } + } + } + + impl Future for Task { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + while this.num_processed < this.num_streams { + match this.inner.poll_next_unpin(cx) { + Poll::Ready((_, Some(result))) => { + if result.is_err() { + panic!("Timeout is great than item delay") + } + + this.num_processed += 1; + continue; + } + Poll::Ready((_, None)) => { + continue; + } + _ => {} + } + + if let Poll::Ready(()) = this.inner.poll_ready_unpin(cx) { + // We push the constant ID to prove that user can use the same ID if the stream was finished + let maybe_future = this.inner.try_push(1u8, once(Delay::new(this.item_delay))); + assert!(maybe_future.is_ok(), "we polled for readiness"); + + continue; + } + + return Poll::Pending; + } + + Poll::Ready(()) + } + } +} diff --git a/misc/futures-bounded/src/stream_set.rs b/misc/futures-bounded/src/stream_set.rs new file mode 100644 index 00000000000..bb32835065f --- /dev/null +++ b/misc/futures-bounded/src/stream_set.rs @@ -0,0 +1,64 @@ +use futures_util::stream::BoxStream; +use futures_util::Stream; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use crate::{PushError, StreamMap, Timeout}; + +/// Represents a set of [Stream]s. +/// +/// Each stream must finish within the specified time and the list never outgrows its capacity. +pub struct StreamSet { + id: u32, + inner: StreamMap, +} + +impl StreamSet { + pub fn new(timeout: Duration, capacity: usize) -> Self { + Self { + id: 0, + inner: StreamMap::new(timeout, capacity), + } + } +} + +impl StreamSet +where + O: Send + 'static, +{ + /// Push a stream into the list. + /// + /// This method adds the given stream to the list. + /// If the length of the list is equal to the capacity, this method returns a error that contains the passed stream. + /// In that case, the stream is not added to the set. + pub fn try_push(&mut self, stream: F) -> Result<(), BoxStream> + where + F: Stream + Send + 'static, + { + self.id = self.id.wrapping_add(1); + + match self.inner.try_push(self.id, stream) { + Ok(()) => Ok(()), + Err(PushError::BeyondCapacity(w)) => Err(w), + Err(PushError::Replaced(_)) => unreachable!("we never reuse IDs"), + } + } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + pub fn poll_ready_unpin(&mut self, cx: &mut Context<'_>) -> Poll<()> { + self.inner.poll_ready_unpin(cx) + } + + pub fn poll_next_unpin(&mut self, cx: &mut Context<'_>) -> Poll>> { + let (_, res) = ready!(self.inner.poll_next_unpin(cx)); + + Poll::Ready(res) + } +} diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index d32761f2021..2a4b7e28418 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -9,11 +9,17 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] publish = false +[package.metadata.release] +release = false + [dependencies] -clap = { version = "4.2.7", features = ["derive"] } +clap = { version = "4.4.11", features = ["derive"] } zeroize = "1" -serde = { version = "1.0.160", features = ["derive"] } -serde_json = "1.0.96" +serde = { version = "1.0.193", features = ["derive"] } +serde_json = "1.0.108" libp2p-core = { workspace = true } -base64 = "0.21.0" +base64 = "0.21.5" libp2p-identity = { workspace = true } + +[lints] +workspace = true diff --git a/misc/memory-connection-limits/CHANGELOG.md b/misc/memory-connection-limits/CHANGELOG.md new file mode 100644 index 00000000000..fc598872d50 --- /dev/null +++ b/misc/memory-connection-limits/CHANGELOG.md @@ -0,0 +1,6 @@ +## 0.2.0 + + +## 0.1.0 + +- Initial release. diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml new file mode 100644 index 00000000000..ae6bb386373 --- /dev/null +++ b/misc/memory-connection-limits/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "libp2p-memory-connection-limits" +edition = "2021" +rust-version = { workspace = true } +description = "Memory usage based connection limits for libp2p." +version = "0.2.0" +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +memory-stats = { version = "1", features = ["always_use_statm"] } +libp2p-core = { workspace = true } +libp2p-swarm = { workspace = true } +libp2p-identity = { workspace = true, features = ["peerid"] } +sysinfo = "0.29" +tracing = "0.1.37" +void = "1" + +[dev-dependencies] +async-std = { version = "1.12.0", features = ["attributes"] } +libp2p-identify = { workspace = true } +libp2p-swarm-derive = { path = "../../swarm-derive" } +libp2p-swarm-test = { path = "../../swarm-test" } +rand = "0.8.5" + +[lints] +workspace = true diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs new file mode 100644 index 00000000000..ac911654979 --- /dev/null +++ b/misc/memory-connection-limits/src/lib.rs @@ -0,0 +1,225 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; +use void::Void; + +use std::{ + fmt, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +/// A [`NetworkBehaviour`] that enforces a set of memory usage based limits. +/// +/// For these limits to take effect, this needs to be composed into the behaviour tree of your application. +/// +/// If a connection is denied due to a limit, either a [`SwarmEvent::IncomingConnectionError`](libp2p_swarm::SwarmEvent::IncomingConnectionError) +/// or [`SwarmEvent::OutgoingConnectionError`](libp2p_swarm::SwarmEvent::OutgoingConnectionError) will be emitted. +/// The [`ListenError::Denied`](libp2p_swarm::ListenError::Denied) and respectively the [`DialError::Denied`](libp2p_swarm::DialError::Denied) variant +/// contain a [`ConnectionDenied`] type that can be downcast to [`MemoryUsageLimitExceeded`] error if (and only if) **this** +/// behaviour denied the connection. +/// +/// If you employ multiple [`NetworkBehaviour`]s that manage connections, it may also be a different error. +/// +/// [Behaviour::with_max_bytes] and [Behaviour::with_max_percentage] are mutually exclusive. +/// If you need to employ both of them, compose two instances of [Behaviour] into your custom behaviour. +/// +/// # Example +/// +/// ```rust +/// # use libp2p_identify as identify; +/// # use libp2p_swarm_derive::NetworkBehaviour; +/// # use libp2p_memory_connection_limits as memory_connection_limits; +/// +/// #[derive(NetworkBehaviour)] +/// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] +/// struct MyBehaviour { +/// identify: identify::Behaviour, +/// limits: memory_connection_limits::Behaviour +/// } +/// ``` +pub struct Behaviour { + max_allowed_bytes: usize, + process_physical_memory_bytes: usize, + last_refreshed: Instant, +} + +/// The maximum duration for which the retrieved memory-stats of the process are allowed to be stale. +/// +/// Once exceeded, we will retrieve new stats. +const MAX_STALE_DURATION: Duration = Duration::from_millis(100); + +impl Behaviour { + /// Sets the process memory usage threshold in absolute bytes. + /// + /// New inbound and outbound connections will be denied when the threshold is reached. + pub fn with_max_bytes(max_allowed_bytes: usize) -> Self { + Self { + max_allowed_bytes, + process_physical_memory_bytes: memory_stats::memory_stats() + .map(|s| s.physical_mem) + .unwrap_or_default(), + last_refreshed: Instant::now(), + } + } + + /// Sets the process memory usage threshold in the percentage of the total physical memory. + /// + /// New inbound and outbound connections will be denied when the threshold is reached. + pub fn with_max_percentage(percentage: f64) -> Self { + use sysinfo::{RefreshKind, SystemExt}; + + let system_memory_bytes = + sysinfo::System::new_with_specifics(RefreshKind::new().with_memory()).total_memory(); + + Self::with_max_bytes((system_memory_bytes as f64 * percentage).round() as usize) + } + + /// Gets the process memory usage threshold in bytes. + pub fn max_allowed_bytes(&self) -> usize { + self.max_allowed_bytes + } + + fn check_limit(&mut self) -> Result<(), ConnectionDenied> { + self.refresh_memory_stats_if_needed(); + + if self.process_physical_memory_bytes > self.max_allowed_bytes { + return Err(ConnectionDenied::new(MemoryUsageLimitExceeded { + process_physical_memory_bytes: self.process_physical_memory_bytes, + max_allowed_bytes: self.max_allowed_bytes, + })); + } + + Ok(()) + } + + fn refresh_memory_stats_if_needed(&mut self) { + let now = Instant::now(); + + if self.last_refreshed + MAX_STALE_DURATION > now { + // Memory stats are reasonably recent, don't refresh. + return; + } + + let Some(stats) = memory_stats::memory_stats() else { + tracing::warn!("Failed to retrieve process memory stats"); + return; + }; + + self.last_refreshed = now; + self.process_physical_memory_bytes = stats.physical_mem; + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = Void; + + fn handle_pending_inbound_connection( + &mut self, + _: ConnectionId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result<(), ConnectionDenied> { + self.check_limit() + } + + fn handle_established_inbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_pending_outbound_connection( + &mut self, + _: ConnectionId, + _: Option, + _: &[Multiaddr], + _: Endpoint, + ) -> Result, ConnectionDenied> { + self.check_limit()?; + Ok(vec![]) + } + + fn handle_established_outbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, _: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + _id: PeerId, + _: ConnectionId, + event: THandlerOutEvent, + ) { + void::unreachable(event) + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + Poll::Pending + } +} + +/// A connection limit has been exceeded. +#[derive(Debug, Clone, Copy)] +pub struct MemoryUsageLimitExceeded { + process_physical_memory_bytes: usize, + max_allowed_bytes: usize, +} + +impl MemoryUsageLimitExceeded { + pub fn process_physical_memory_bytes(&self) -> usize { + self.process_physical_memory_bytes + } + + pub fn max_allowed_bytes(&self) -> usize { + self.max_allowed_bytes + } +} + +impl std::error::Error for MemoryUsageLimitExceeded {} + +impl fmt::Display for MemoryUsageLimitExceeded { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "process physical memory usage limit exceeded: process memory: {} bytes, max allowed: {} bytes", + self.process_physical_memory_bytes, + self.max_allowed_bytes, + ) + } +} diff --git a/misc/memory-connection-limits/tests/max_bytes.rs b/misc/memory-connection-limits/tests/max_bytes.rs new file mode 100644 index 00000000000..7f89e2c7a9a --- /dev/null +++ b/misc/memory-connection-limits/tests/max_bytes.rs @@ -0,0 +1,96 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +mod util; + +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_memory_connection_limits::*; +use std::time::Duration; +use util::*; + +use libp2p_swarm::{dial_opts::DialOpts, DialError, Swarm}; +use libp2p_swarm_test::SwarmExt; + +#[test] +fn max_bytes() { + const CONNECTION_LIMIT: usize = 20; + let max_allowed_bytes = CONNECTION_LIMIT * 1024 * 1024; + + let mut network = Swarm::new_ephemeral(|_| TestBehaviour { + connection_limits: Behaviour::with_max_bytes(max_allowed_bytes), + mem_consumer: ConsumeMemoryBehaviour1MBPending0Established::default(), + }); + + let addr: Multiaddr = "/memory/1234".parse().unwrap(); + let target = PeerId::random(); + + // Exercise `dial` function to get more stable memory stats later + network + .dial( + DialOpts::peer_id(target) + .addresses(vec![addr.clone()]) + .build(), + ) + .expect("Unexpected connection limit."); + + // Adds current mem usage to the limit and update + let max_allowed_bytes_plus_base_usage = + max_allowed_bytes + memory_stats::memory_stats().unwrap().physical_mem; + network.behaviour_mut().connection_limits = + Behaviour::with_max_bytes(max_allowed_bytes_plus_base_usage); + + for _ in 0..CONNECTION_LIMIT { + network + .dial( + DialOpts::peer_id(target) + // Always dial, even if connected or already dialing. + .condition(libp2p_swarm::dial_opts::PeerCondition::Always) + .addresses(vec![addr.clone()]) + .build(), + ) + .expect("Unexpected connection limit."); + } + + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + + match network + .dial( + DialOpts::peer_id(target) + .condition(libp2p_swarm::dial_opts::PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) + .expect_err("Unexpected dialing success.") + { + DialError::Denied { cause } => { + let exceeded = cause + .downcast::() + .expect("connection denied because of limit"); + + assert_eq!( + exceeded.max_allowed_bytes(), + max_allowed_bytes_plus_base_usage + ); + assert!(exceeded.process_physical_memory_bytes() >= exceeded.max_allowed_bytes()); + } + e => panic!("Unexpected error: {e:?}"), + } +} diff --git a/misc/memory-connection-limits/tests/max_percentage.rs b/misc/memory-connection-limits/tests/max_percentage.rs new file mode 100644 index 00000000000..daee20703ee --- /dev/null +++ b/misc/memory-connection-limits/tests/max_percentage.rs @@ -0,0 +1,98 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +mod util; + +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_memory_connection_limits::*; +use std::time::Duration; +use sysinfo::{RefreshKind, SystemExt}; +use util::*; + +use libp2p_swarm::{ + dial_opts::{DialOpts, PeerCondition}, + DialError, Swarm, +}; +use libp2p_swarm_test::SwarmExt; + +#[test] +fn max_percentage() { + const CONNECTION_LIMIT: usize = 20; + let system_info = sysinfo::System::new_with_specifics(RefreshKind::new().with_memory()); + + let mut network = Swarm::new_ephemeral(|_| TestBehaviour { + connection_limits: Behaviour::with_max_percentage(0.1), + mem_consumer: ConsumeMemoryBehaviour1MBPending0Established::default(), + }); + + let addr: Multiaddr = "/memory/1234".parse().unwrap(); + let target = PeerId::random(); + + // Exercise `dial` function to get more stable memory stats later + network + .dial( + DialOpts::peer_id(target) + .addresses(vec![addr.clone()]) + .build(), + ) + .expect("Unexpected connection limit."); + + // Adds current mem usage to the limit and update + let current_mem = memory_stats::memory_stats().unwrap().physical_mem; + let max_allowed_bytes = current_mem + CONNECTION_LIMIT * 1024 * 1024; + network.behaviour_mut().connection_limits = Behaviour::with_max_percentage( + max_allowed_bytes as f64 / system_info.total_memory() as f64, + ); + + for _ in 0..CONNECTION_LIMIT { + network + .dial( + DialOpts::peer_id(target) + // Always dial, even if already dialing or connected. + .condition(PeerCondition::Always) + .addresses(vec![addr.clone()]) + .build(), + ) + .expect("Unexpected connection limit."); + } + + std::thread::sleep(Duration::from_millis(100)); // Memory stats are only updated every 100ms internally, ensure they are up-to-date when we try to exceed it. + + match network + .dial( + DialOpts::peer_id(target) + .condition(PeerCondition::Always) + .addresses(vec![addr]) + .build(), + ) + .expect_err("Unexpected dialing success.") + { + DialError::Denied { cause } => { + let exceeded = cause + .downcast::() + .expect("connection denied because of limit"); + + assert_eq!(exceeded.max_allowed_bytes(), max_allowed_bytes); + assert!(exceeded.process_physical_memory_bytes() >= exceeded.max_allowed_bytes()); + } + e => panic!("Unexpected error: {e:?}"), + } +} diff --git a/misc/memory-connection-limits/tests/util.rs b/misc/memory-connection-limits/tests/util.rs new file mode 100644 index 00000000000..f40ce319929 --- /dev/null +++ b/misc/memory-connection-limits/tests/util.rs @@ -0,0 +1,124 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::task::{Context, Poll}; + +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + dummy, ConnectionDenied, ConnectionId, FromSwarm, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; +use void::Void; + +#[derive(libp2p_swarm_derive::NetworkBehaviour)] +#[behaviour(prelude = "libp2p_swarm::derive_prelude")] +pub(crate) struct TestBehaviour { + pub(crate) connection_limits: libp2p_memory_connection_limits::Behaviour, + pub(crate) mem_consumer: ConsumeMemoryBehaviour1MBPending0Established, +} + +pub(crate) type ConsumeMemoryBehaviour1MBPending0Established = + ConsumeMemoryBehaviour<{ 1024 * 1024 }, 0>; + +#[derive(Default)] +pub(crate) struct ConsumeMemoryBehaviour { + mem_pending: Vec>, + mem_established: Vec>, +} + +impl + ConsumeMemoryBehaviour +{ + fn handle_pending(&mut self) { + // 1MB + self.mem_pending.push(vec![1; MEM_PENDING]); + } + + fn handle_established(&mut self) { + // 1MB + self.mem_established.push(vec![1; MEM_ESTABLISHED]); + } +} + +impl NetworkBehaviour + for ConsumeMemoryBehaviour +{ + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = Void; + + fn handle_pending_inbound_connection( + &mut self, + _: ConnectionId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result<(), ConnectionDenied> { + self.handle_pending(); + Ok(()) + } + + fn handle_pending_outbound_connection( + &mut self, + _: ConnectionId, + _: Option, + _: &[Multiaddr], + _: Endpoint, + ) -> Result, ConnectionDenied> { + self.handle_pending(); + Ok(vec![]) + } + + fn handle_established_inbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + self.handle_established(); + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + self.handle_established(); + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, _: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + _id: PeerId, + _: ConnectionId, + event: THandlerOutEvent, + ) { + void::unreachable(event) + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + Poll::Pending + } +} diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index 4c653ca0051..67c304680db 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,9 +1,56 @@ -## 0.13.0 - unreleased +## 0.14.1 + +- Add `BandwidthTransport`, wrapping an existing `Transport`, exposing Prometheus bandwidth metrics. + See also `SwarmBuilder::with_bandwidth_metrics`. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.14.0 + +- Add metrics for `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}`. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). + +## 0.13.1 + +- Enable gossipsub related data-type fields when compiling for wasm. + See [PR 4217]. + +[PR 4217]: https://github.com/libp2p/rust-libp2p/pull/4217 + +## 0.13.0 + +- Previously `libp2p-metrics::identify` would increase a counter / gauge / histogram on each + received identify information. These metrics are misleading, as e.g. they depend on the identify + interval and don't represent the set of currently connected peers. With this change, identify + information is tracked for the currently connected peers only. Instead of an increase on each + received identify information, metrics represent the status quo (Gauge). + + Metrics removed: + - `libp2p_identify_protocols` + - `libp2p_identify_received_info_listen_addrs` + - `libp2p_identify_received_info_protocols` + - `libp2p_identify_listen_addresses` + + Metrics added: + - `libp2p_identify_remote_protocols` + - `libp2p_identify_remote_listen_addresses` + - `libp2p_identify_local_observed_addresses` + + See [PR 3325]. - Raise MSRV to 1.65. See [PR 3715]. +- Replace `libp2p_swarm_connections_closed` `Counter` with `libp2p_swarm_connections_duration` `Histogram` which additionally tracks the duration of a connection. + Note that you can use the `_count` metric of the `Histogram` as a replacement for the `Counter`. + See [PR 3927]. + +- Remove the `pong_received` counter because it is no longer exposed by `libp2p-ping`. + See [PR 3947]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3927]: https://github.com/libp2p/rust-libp2p/pull/3927 +[PR 3325]: https://github.com/libp2p/rust-libp2p/pull/3325 +[PR 3947]: https://github.com/libp2p/rust-libp2p/pull/3947 ## 0.12.0 diff --git a/misc/metrics/Cargo.toml b/misc/metrics/Cargo.toml index 9fafe680115..41eed8f2c36 100644 --- a/misc/metrics/Cargo.toml +++ b/misc/metrics/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "libp2p-metrics" edition = "2021" -rust-version = "1.65.0" +rust-version = { workspace = true } description = "Metrics for libp2p" -version = "0.13.0" +version = "0.14.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,30 +11,37 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [features] +dcutr = ["libp2p-dcutr"] gossipsub = ["libp2p-gossipsub"] identify = ["libp2p-identify"] kad = ["libp2p-kad"] ping = ["libp2p-ping"] relay = ["libp2p-relay"] -dcutr = ["libp2p-dcutr"] [dependencies] +futures = "0.3.30" +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-dcutr = { workspace = true, optional = true } +libp2p-gossipsub = { workspace = true, optional = true } libp2p-identify = { workspace = true, optional = true } +libp2p-identity = { workspace = true } libp2p-kad = { workspace = true, optional = true } libp2p-ping = { workspace = true, optional = true } libp2p-relay = { workspace = true, optional = true } libp2p-swarm = { workspace = true } -libp2p-identity = { workspace = true } -prometheus-client = "0.20.0" +pin-project = "1.0.0" +prometheus-client = { workspace = true } -[target.'cfg(not(target_os = "unknown"))'.dependencies] -libp2p-gossipsub = { workspace = true, optional = true } +[dev-dependencies] +libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true -rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/misc/metrics/src/bandwidth.rs b/misc/metrics/src/bandwidth.rs new file mode 100644 index 00000000000..2792e00612c --- /dev/null +++ b/misc/metrics/src/bandwidth.rs @@ -0,0 +1,312 @@ +use crate::protocol_stack; +use futures::{ + future::{MapOk, TryFutureExt}, + io::{IoSlice, IoSliceMut}, + prelude::*, + ready, +}; +use libp2p_core::{ + muxing::{StreamMuxer, StreamMuxerEvent}, + transport::{ListenerId, TransportError, TransportEvent}, + Multiaddr, +}; +use libp2p_identity::PeerId; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{counter::Counter, family::Family}, + registry::{Registry, Unit}, +}; +use std::{ + convert::TryFrom as _, + io, + pin::Pin, + task::{Context, Poll}, +}; + +#[derive(Debug, Clone)] +#[pin_project::pin_project] +pub struct Transport { + #[pin] + transport: T, + metrics: Family, +} + +impl Transport { + pub fn new(transport: T, registry: &mut Registry) -> Self { + let metrics = Family::::default(); + registry + .sub_registry_with_prefix("libp2p") + .register_with_unit( + "bandwidth", + "Bandwidth usage by direction and transport protocols", + Unit::Bytes, + metrics.clone(), + ); + + Transport { transport, metrics } + } +} + +#[derive(EncodeLabelSet, Hash, Clone, Eq, PartialEq, Debug)] +struct Labels { + protocols: String, + direction: Direction, +} + +#[derive(Clone, Hash, PartialEq, Eq, EncodeLabelValue, Debug)] +enum Direction { + Inbound, + Outbound, +} + +impl libp2p_core::Transport for Transport +where + T: libp2p_core::Transport, + M: StreamMuxer + Send + 'static, + M::Substream: Send + 'static, + M::Error: Send + Sync + 'static, +{ + type Output = (PeerId, Muxer); + type Error = T::Error; + type ListenerUpgrade = + MapOk (PeerId, Muxer) + Send>>; + type Dial = MapOk (PeerId, Muxer) + Send>>; + + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.transport.listen_on(id, addr) + } + + fn remove_listener(&mut self, id: ListenerId) -> bool { + self.transport.remove_listener(id) + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + let metrics = ConnectionMetrics::from_family_and_addr(&self.metrics, &addr); + Ok(self + .transport + .dial(addr.clone())? + .map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + }))) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + let metrics = ConnectionMetrics::from_family_and_addr(&self.metrics, &addr); + Ok(self + .transport + .dial_as_listener(addr.clone())? + .map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + }))) + } + + fn address_translation(&self, server: &Multiaddr, observed: &Multiaddr) -> Option { + self.transport.address_translation(server, observed) + } + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + match this.transport.poll(cx) { + Poll::Ready(TransportEvent::Incoming { + listener_id, + upgrade, + local_addr, + send_back_addr, + }) => { + let metrics = + ConnectionMetrics::from_family_and_addr(this.metrics, &send_back_addr); + Poll::Ready(TransportEvent::Incoming { + listener_id, + upgrade: upgrade.map_ok(Box::new(|(peer_id, stream_muxer)| { + (peer_id, Muxer::new(stream_muxer, metrics)) + })), + local_addr, + send_back_addr, + }) + } + Poll::Ready(other) => { + let mapped = other.map_upgrade(|_upgrade| unreachable!("case already matched")); + Poll::Ready(mapped) + } + Poll::Pending => Poll::Pending, + } + } +} + +#[derive(Clone, Debug)] +struct ConnectionMetrics { + outbound: Counter, + inbound: Counter, +} + +impl ConnectionMetrics { + fn from_family_and_addr(family: &Family, protocols: &Multiaddr) -> Self { + let protocols = protocol_stack::as_string(protocols); + + // Additional scope to make sure to drop the lock guard from `get_or_create`. + let outbound = { + let m = family.get_or_create(&Labels { + protocols: protocols.clone(), + direction: Direction::Outbound, + }); + m.clone() + }; + // Additional scope to make sure to drop the lock guard from `get_or_create`. + let inbound = { + let m = family.get_or_create(&Labels { + protocols, + direction: Direction::Inbound, + }); + m.clone() + }; + ConnectionMetrics { outbound, inbound } + } +} + +/// Wraps around a [`StreamMuxer`] and counts the number of bytes that go through all the opened +/// streams. +#[derive(Clone)] +#[pin_project::pin_project] +pub struct Muxer { + #[pin] + inner: SMInner, + metrics: ConnectionMetrics, +} + +impl Muxer { + /// Creates a new [`Muxer`] wrapping around the provided stream muxer. + fn new(inner: SMInner, metrics: ConnectionMetrics) -> Self { + Self { inner, metrics } + } +} + +impl StreamMuxer for Muxer +where + SMInner: StreamMuxer, +{ + type Substream = InstrumentedStream; + type Error = SMInner::Error; + + fn poll( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + this.inner.poll(cx) + } + + fn poll_inbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + let inner = ready!(this.inner.poll_inbound(cx)?); + let logged = InstrumentedStream { + inner, + metrics: this.metrics.clone(), + }; + Poll::Ready(Ok(logged)) + } + + fn poll_outbound( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let this = self.project(); + let inner = ready!(this.inner.poll_outbound(cx)?); + let logged = InstrumentedStream { + inner, + metrics: this.metrics.clone(), + }; + Poll::Ready(Ok(logged)) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_close(cx) + } +} + +/// Wraps around an [`AsyncRead`] + [`AsyncWrite`] and logs the bandwidth that goes through it. +#[pin_project::pin_project] +pub struct InstrumentedStream { + #[pin] + inner: SMInner, + metrics: ConnectionMetrics, +} + +impl AsyncRead for InstrumentedStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_read(cx, buf))?; + this.metrics + .inbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_read_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_read_vectored(cx, bufs))?; + this.metrics + .inbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } +} + +impl AsyncWrite for InstrumentedStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_write(cx, buf))?; + this.metrics + .outbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + let this = self.project(); + let num_bytes = ready!(this.inner.poll_write_vectored(cx, bufs))?; + this.metrics + .outbound + .inc_by(u64::try_from(num_bytes).unwrap_or(u64::max_value())); + Poll::Ready(Ok(num_bytes)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.inner.poll_close(cx) + } +} diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index 18ee8a14d1e..3e60dca2cab 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -49,8 +49,6 @@ struct EventLabels { #[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelValue)] enum EventType { - InitiateDirectConnectionUpgrade, - RemoteInitiatedDirectConnectionUpgrade, DirectConnectionUpgradeSucceeded, DirectConnectionUpgradeFailed, } @@ -58,20 +56,13 @@ enum EventType { impl From<&libp2p_dcutr::Event> for EventType { fn from(event: &libp2p_dcutr::Event) -> Self { match event { - libp2p_dcutr::Event::InitiatedDirectConnectionUpgrade { + libp2p_dcutr::Event { remote_peer_id: _, - local_relayed_addr: _, - } => EventType::InitiateDirectConnectionUpgrade, - libp2p_dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { + result: Ok(_), + } => EventType::DirectConnectionUpgradeSucceeded, + libp2p_dcutr::Event { remote_peer_id: _, - remote_relayed_addr: _, - } => EventType::RemoteInitiatedDirectConnectionUpgrade, - libp2p_dcutr::Event::DirectConnectionUpgradeSucceeded { remote_peer_id: _ } => { - EventType::DirectConnectionUpgradeSucceeded - } - libp2p_dcutr::Event::DirectConnectionUpgradeFailed { - remote_peer_id: _, - error: _, + result: Err(_), } => EventType::DirectConnectionUpgradeFailed, } } diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 81e2ace6279..b1d4e9f0c89 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -21,39 +21,46 @@ use crate::protocol_stack; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use prometheus_client::encoding::{EncodeLabelSet, EncodeMetric, MetricEncoder}; +use prometheus_client::collector::Collector; +use prometheus_client::encoding::{DescriptorEncoder, EncodeLabelSet, EncodeMetric}; use prometheus_client::metrics::counter::Counter; -use prometheus_client::metrics::family::Family; -use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; +use prometheus_client::metrics::gauge::ConstGauge; use prometheus_client::metrics::MetricType; use prometheus_client::registry::Registry; use std::collections::HashMap; -use std::iter; use std::sync::{Arc, Mutex}; +const ALLOWED_PROTOCOLS: &[StreamProtocol] = &[ + #[cfg(feature = "dcutr")] + libp2p_dcutr::PROTOCOL_NAME, + // #[cfg(feature = "gossipsub")] + // TODO: Add Gossipsub protocol name + libp2p_identify::PROTOCOL_NAME, + libp2p_identify::PUSH_PROTOCOL_NAME, + #[cfg(feature = "kad")] + libp2p_kad::PROTOCOL_NAME, + #[cfg(feature = "ping")] + libp2p_ping::PROTOCOL_NAME, + #[cfg(feature = "relay")] + libp2p_relay::STOP_PROTOCOL_NAME, + #[cfg(feature = "relay")] + libp2p_relay::HOP_PROTOCOL_NAME, +]; + pub(crate) struct Metrics { - protocols: Protocols, + peers: Peers, error: Counter, pushed: Counter, received: Counter, - received_info_listen_addrs: Histogram, - received_info_protocols: Histogram, sent: Counter, - listen_addresses: Family, } impl Metrics { pub(crate) fn new(registry: &mut Registry) -> Self { let sub_registry = registry.sub_registry_with_prefix("identify"); - let protocols = Protocols::default(); - sub_registry.register( - "protocols", - "Number of connected nodes supporting a specific protocol, with \ - \"unrecognized\" for each peer supporting one or more unrecognized \ - protocols", - protocols.clone(), - ); + let peers = Peers::default(); + sub_registry.register_collector(Box::new(peers.clone())); let error = Counter::default(); sub_registry.register( @@ -78,24 +85,6 @@ impl Metrics { received.clone(), ); - let received_info_listen_addrs = - Histogram::new(iter::once(0.0).chain(exponential_buckets(1.0, 2.0, 9))); - sub_registry.register( - "received_info_listen_addrs", - "Number of listen addresses for remote peer received in \ - identification information", - received_info_listen_addrs.clone(), - ); - - let received_info_protocols = - Histogram::new(iter::once(0.0).chain(exponential_buckets(1.0, 2.0, 9))); - sub_registry.register( - "received_info_protocols", - "Number of protocols supported by the remote peer received in \ - identification information", - received_info_protocols.clone(), - ); - let sent = Counter::default(); sub_registry.register( "sent", @@ -104,22 +93,12 @@ impl Metrics { sent.clone(), ); - let listen_addresses = Family::default(); - sub_registry.register( - "listen_addresses", - "Number of listen addresses for remote peer per protocol stack", - listen_addresses.clone(), - ); - Self { - protocols, + peers, error, pushed, received, - received_info_listen_addrs, - received_info_protocols, sent, - listen_addresses, } } } @@ -134,58 +113,8 @@ impl super::Recorder for Metrics { self.pushed.inc(); } libp2p_identify::Event::Received { peer_id, info, .. } => { - { - let mut protocols = info - .protocols - .iter() - .filter(|p| { - let allowed_protocols: &[StreamProtocol] = &[ - #[cfg(feature = "dcutr")] - libp2p_dcutr::PROTOCOL_NAME, - // #[cfg(feature = "gossipsub")] - // #[cfg(not(target_os = "unknown"))] - // TODO: Add Gossipsub protocol name - libp2p_identify::PROTOCOL_NAME, - libp2p_identify::PUSH_PROTOCOL_NAME, - #[cfg(feature = "kad")] - libp2p_kad::protocol::DEFAULT_PROTO_NAME, - #[cfg(feature = "ping")] - libp2p_ping::PROTOCOL_NAME, - #[cfg(feature = "relay")] - libp2p_relay::STOP_PROTOCOL_NAME, - #[cfg(feature = "relay")] - libp2p_relay::HOP_PROTOCOL_NAME, - ]; - - allowed_protocols.contains(p) - }) - .map(|p| p.to_string()) - .collect::>(); - - // Signal via an additional label value that one or more - // protocols of the remote peer have not been recognized. - if protocols.len() < info.protocols.len() { - protocols.push("unrecognized".to_string()); - } - - protocols.sort_unstable(); - protocols.dedup(); - - self.protocols.add(*peer_id, protocols); - } - self.received.inc(); - self.received_info_protocols - .observe(info.protocols.len() as f64); - self.received_info_listen_addrs - .observe(info.listen_addrs.len() as f64); - for listen_addr in &info.listen_addrs { - self.listen_addresses - .get_or_create(&AddressLabels { - protocols: protocol_stack::as_string(listen_addr), - }) - .inc(); - } + self.peers.record(*peer_id, info.clone()); } libp2p_identify::Event::Sent { .. } => { self.sent.inc(); @@ -194,8 +123,8 @@ impl super::Recorder for Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { if let libp2p_swarm::SwarmEvent::ConnectionClosed { peer_id, num_established, @@ -203,7 +132,7 @@ impl super::Recorder>>>, -} +#[derive(Default, Debug, Clone)] +struct Peers(Arc>>); -impl Protocols { - fn add(&self, peer: PeerId, protocols: Vec) { - self.peers - .lock() - .expect("Lock not to be poisoned") - .insert(peer, protocols); +impl Peers { + fn record(&self, peer_id: PeerId, info: libp2p_identify::Info) { + self.0.lock().unwrap().insert(peer_id, info); } - fn remove(&self, peer: PeerId) { - self.peers - .lock() - .expect("Lock not to be poisoned") - .remove(&peer); + fn remove(&self, peer_id: PeerId) { + self.0.lock().unwrap().remove(&peer_id); } } -impl EncodeMetric for Protocols { - fn encode(&self, mut encoder: MetricEncoder) -> Result<(), std::fmt::Error> { - let count_by_protocol = self - .peers - .lock() - .expect("Lock not to be poisoned") - .iter() - .fold( - HashMap::::default(), - |mut acc, (_, protocols)| { - for protocol in protocols { - let count = acc.entry(protocol.to_string()).or_default(); - *count += 1; - } - acc - }, - ); +impl Collector for Peers { + fn encode(&self, mut encoder: DescriptorEncoder) -> Result<(), std::fmt::Error> { + let mut count_by_protocols: HashMap = Default::default(); + let mut count_by_listen_addresses: HashMap = Default::default(); + let mut count_by_observed_addresses: HashMap = Default::default(); + + for (_, peer_info) in self.0.lock().unwrap().iter() { + { + let mut protocols: Vec<_> = peer_info + .protocols + .iter() + .map(|p| { + if ALLOWED_PROTOCOLS.contains(p) { + p.to_string() + } else { + "unrecognized".to_string() + } + }) + .collect(); + protocols.sort(); + protocols.dedup(); + + for protocol in protocols.into_iter() { + let count = count_by_protocols.entry(protocol).or_default(); + *count += 1; + } + } + + { + let mut addrs: Vec<_> = peer_info + .listen_addrs + .iter() + .map(protocol_stack::as_string) + .collect(); + addrs.sort(); + addrs.dedup(); + + for addr in addrs { + let count = count_by_listen_addresses.entry(addr).or_default(); + *count += 1; + } + } - for (protocol, count) in count_by_protocol { - encoder - .encode_family(&[("protocol", protocol)])? - .encode_gauge(&count)?; + { + let count = count_by_observed_addresses + .entry(protocol_stack::as_string(&peer_info.observed_addr)) + .or_default(); + *count += 1; + } } - Ok(()) - } + { + let mut family_encoder = encoder.encode_descriptor( + "remote_protocols", + "Number of connected nodes supporting a specific protocol, with \"unrecognized\" for each peer supporting one or more unrecognized protocols", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_protocols.into_iter() { + let labels = [("protocol", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + let metric = ConstGauge::new(count); + metric.encode(metric_encoder)?; + } + } + + { + let mut family_encoder = encoder.encode_descriptor( + "remote_listen_addresses", + "Number of connected nodes advertising a specific listen address", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_listen_addresses.into_iter() { + let labels = [("listen_address", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + ConstGauge::new(count).encode(metric_encoder)?; + } + } - fn metric_type(&self) -> MetricType { - MetricType::Gauge + { + let mut family_encoder = encoder.encode_descriptor( + "local_observed_addresses", + "Number of connected nodes observing the local node at a specific address", + None, + MetricType::Gauge, + )?; + for (protocol, count) in count_by_observed_addresses.into_iter() { + let labels = [("observed_address", protocol)]; + let metric_encoder = family_encoder.encode_family(&labels)?; + ConstGauge::new(count).encode(metric_encoder)?; + } + } + + Ok(()) } } diff --git a/misc/metrics/src/kad.rs b/misc/metrics/src/kad.rs index bc83146f937..bd5a6526737 100644 --- a/misc/metrics/src/kad.rs +++ b/misc/metrics/src/kad.rs @@ -51,42 +51,42 @@ impl Metrics { let query_result_get_record_ok = Counter::default(); sub_registry.register( "query_result_get_record_ok", - "Number of records returned by a successful Kademlia get record query.", + "Number of records returned by a successful Kademlia get record query", query_result_get_record_ok.clone(), ); let query_result_get_record_error = Family::default(); sub_registry.register( "query_result_get_record_error", - "Number of failed Kademlia get record queries.", + "Number of failed Kademlia get record queries", query_result_get_record_error.clone(), ); let query_result_get_closest_peers_ok = Histogram::new(exponential_buckets(1.0, 2.0, 10)); sub_registry.register( "query_result_get_closest_peers_ok", - "Number of closest peers returned by a successful Kademlia get closest peers query.", + "Number of closest peers returned by a successful Kademlia get closest peers query", query_result_get_closest_peers_ok.clone(), ); let query_result_get_closest_peers_error = Family::default(); sub_registry.register( "query_result_get_closest_peers_error", - "Number of failed Kademlia get closest peers queries.", + "Number of failed Kademlia get closest peers queries", query_result_get_closest_peers_error.clone(), ); let query_result_get_providers_ok = Histogram::new(exponential_buckets(1.0, 2.0, 10)); sub_registry.register( "query_result_get_providers_ok", - "Number of providers returned by a successful Kademlia get providers query.", + "Number of providers returned by a successful Kademlia get providers query", query_result_get_providers_ok.clone(), ); let query_result_get_providers_error = Family::default(); sub_registry.register( "query_result_get_providers_error", - "Number of failed Kademlia get providers queries.", + "Number of failed Kademlia get providers queries", query_result_get_providers_error.clone(), ); @@ -94,7 +94,7 @@ impl Metrics { Family::new_with_constructor(|| Histogram::new(exponential_buckets(1.0, 2.0, 10))); sub_registry.register( "query_result_num_requests", - "Number of requests started for a Kademlia query.", + "Number of requests started for a Kademlia query", query_result_num_requests.clone(), ); @@ -102,7 +102,7 @@ impl Metrics { Family::new_with_constructor(|| Histogram::new(exponential_buckets(1.0, 2.0, 10))); sub_registry.register( "query_result_num_success", - "Number of successful requests of a Kademlia query.", + "Number of successful requests of a Kademlia query", query_result_num_success.clone(), ); @@ -110,7 +110,7 @@ impl Metrics { Family::new_with_constructor(|| Histogram::new(exponential_buckets(1.0, 2.0, 10))); sub_registry.register( "query_result_num_failure", - "Number of failed requests of a Kademlia query.", + "Number of failed requests of a Kademlia query", query_result_num_failure.clone(), ); @@ -118,7 +118,7 @@ impl Metrics { Family::new_with_constructor(|| Histogram::new(exponential_buckets(0.1, 2.0, 10))); sub_registry.register_with_unit( "query_result_duration", - "Duration of a Kademlia query.", + "Duration of a Kademlia query", Unit::Seconds, query_result_duration.clone(), ); @@ -159,10 +159,10 @@ impl Metrics { } } -impl super::Recorder for Metrics { - fn record(&self, event: &libp2p_kad::KademliaEvent) { +impl super::Recorder for Metrics { + fn record(&self, event: &libp2p_kad::Event) { match event { - libp2p_kad::KademliaEvent::OutboundQueryProgressed { result, stats, .. } => { + libp2p_kad::Event::OutboundQueryProgressed { result, stats, .. } => { self.query_result_num_requests .get_or_create(&result.into()) .observe(stats.num_requests().into()); @@ -217,7 +217,7 @@ impl super::Recorder for Metrics { _ => {} } } - libp2p_kad::KademliaEvent::RoutingUpdated { + libp2p_kad::Event::RoutingUpdated { is_new_peer, old_peer, bucket_range: (low, _high), @@ -250,7 +250,7 @@ impl super::Recorder for Metrics { } } - libp2p_kad::KademliaEvent::InboundRequest { request } => { + libp2p_kad::Event::InboundRequest { request } => { self.inbound_requests.get_or_create(&request.into()).inc(); } _ => {} diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index ec166eebecc..74fd15e2181 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -27,10 +27,10 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod bandwidth; #[cfg(feature = "dcutr")] mod dcutr; #[cfg(feature = "gossipsub")] -#[cfg(not(target_os = "unknown"))] mod gossipsub; #[cfg(feature = "identify")] mod identify; @@ -43,14 +43,14 @@ mod protocol_stack; mod relay; mod swarm; -use prometheus_client::registry::Registry; +pub use bandwidth::Transport as BandwidthTransport; +pub use prometheus_client::registry::Registry; /// Set of Swarm and protocol metrics derived from emitted events. pub struct Metrics { #[cfg(feature = "dcutr")] dcutr: dcutr::Metrics, #[cfg(feature = "gossipsub")] - #[cfg(not(target_os = "unknown"))] gossipsub: gossipsub::Metrics, #[cfg(feature = "identify")] identify: identify::Metrics, @@ -78,7 +78,6 @@ impl Metrics { #[cfg(feature = "dcutr")] dcutr: dcutr::Metrics::new(sub_registry), #[cfg(feature = "gossipsub")] - #[cfg(not(target_os = "unknown"))] gossipsub: gossipsub::Metrics::new(sub_registry), #[cfg(feature = "identify")] identify: identify::Metrics::new(sub_registry), @@ -107,7 +106,6 @@ impl Recorder for Metrics { } #[cfg(feature = "gossipsub")] -#[cfg(not(target_os = "unknown"))] impl Recorder for Metrics { fn record(&self, event: &libp2p_gossipsub::Event) { self.gossipsub.record(event) @@ -122,8 +120,8 @@ impl Recorder for Metrics { } #[cfg(feature = "kad")] -impl Recorder for Metrics { - fn record(&self, event: &libp2p_kad::KademliaEvent) { +impl Recorder for Metrics { + fn record(&self, event: &libp2p_kad::Event) { self.kad.record(event) } } @@ -142,8 +140,8 @@ impl Recorder for Metrics { } } -impl Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { self.swarm.record(event); #[cfg(feature = "identify")] diff --git a/misc/metrics/src/ping.rs b/misc/metrics/src/ping.rs index 195cb302675..afdd05134a6 100644 --- a/misc/metrics/src/ping.rs +++ b/misc/metrics/src/ping.rs @@ -55,7 +55,6 @@ enum Failure { pub(crate) struct Metrics { rtt: Histogram, failure: Family, - pong_received: Counter, } impl Metrics { @@ -77,28 +76,14 @@ impl Metrics { failure.clone(), ); - let pong_received = Counter::default(); - sub_registry.register( - "pong_received", - "Number of 'pong's received", - pong_received.clone(), - ); - - Self { - rtt, - failure, - pong_received, - } + Self { rtt, failure } } } impl super::Recorder for Metrics { fn record(&self, event: &libp2p_ping::Event) { match &event.result { - Ok(libp2p_ping::Success::Pong) => { - self.pong_received.inc(); - } - Ok(libp2p_ping::Success::Ping { rtt }) => { + Ok(rtt) => { self.rtt.observe(rtt.as_secs_f64()); } Err(failure) => { diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 4b8f63588fe..607daf3f1e1 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -54,7 +54,6 @@ enum EventType { ReservationReqDenied, ReservationReqDenyFailed, ReservationTimedOut, - CircuitReqReceiveFailed, CircuitReqDenied, CircuitReqDenyFailed, CircuitReqOutboundConnectFailed, @@ -67,23 +66,25 @@ impl From<&libp2p_relay::Event> for EventType { fn from(event: &libp2p_relay::Event) -> Self { match event { libp2p_relay::Event::ReservationReqAccepted { .. } => EventType::ReservationReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqAcceptFailed { .. } => { EventType::ReservationReqAcceptFailed } libp2p_relay::Event::ReservationReqDenied { .. } => EventType::ReservationReqDenied, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqDenyFailed { .. } => { EventType::ReservationReqDenyFailed } libp2p_relay::Event::ReservationTimedOut { .. } => EventType::ReservationTimedOut, - libp2p_relay::Event::CircuitReqReceiveFailed { .. } => { - EventType::CircuitReqReceiveFailed - } libp2p_relay::Event::CircuitReqDenied { .. } => EventType::CircuitReqDenied, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqOutboundConnectFailed { .. } => { EventType::CircuitReqOutboundConnectFailed } + #[allow(deprecated)] libp2p_relay::Event::CircuitReqDenyFailed { .. } => EventType::CircuitReqDenyFailed, libp2p_relay::Event::CircuitReqAccepted { .. } => EventType::CircuitReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqAcceptFailed { .. } => EventType::CircuitReqAcceptFailed, libp2p_relay::Event::CircuitClosed { .. } => EventType::CircuitClosed, } diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index d04fd028a00..ad83401f316 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -18,30 +18,40 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; + use crate::protocol_stack; +use instant::Instant; +use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; -use prometheus_client::registry::Registry; +use prometheus_client::registry::{Registry, Unit}; pub(crate) struct Metrics { connections_incoming: Family, connections_incoming_error: Family, - connections_established: Family, - connections_establishment_duration: Family, - connections_closed: Family, + connections_established: Family, + connections_establishment_duration: Family, + connections_duration: Family, new_listen_addr: Family, expired_listen_addr: Family, + external_addr_candidates: Family, + external_addr_confirmed: Family, + external_addr_expired: Family, + listener_closed: Family, listener_error: Counter, dial_attempt: Counter, outgoing_connection_error: Family, - connected_to_banned_peer: Family, + + connections: Arc>>, } impl Metrics { @@ -76,6 +86,27 @@ impl Metrics { expired_listen_addr.clone(), ); + let external_addr_candidates = Family::default(); + sub_registry.register( + "external_addr_candidates", + "Number of new external address candidates", + external_addr_candidates.clone(), + ); + + let external_addr_confirmed = Family::default(); + sub_registry.register( + "external_addr_confirmed", + "Number of confirmed external addresses", + external_addr_confirmed.clone(), + ); + + let external_addr_expired = Family::default(); + sub_registry.register( + "external_addr_expired", + "Number of expired external addresses", + external_addr_expired.clone(), + ); + let listener_closed = Family::default(); sub_registry.register( "listener_closed", @@ -104,13 +135,6 @@ impl Metrics { outgoing_connection_error.clone(), ); - let connected_to_banned_peer = Family::default(); - sub_registry.register( - "connected_to_banned_peer", - "Number of connection attempts to banned peer", - connected_to_banned_peer.clone(), - ); - let connections_established = Family::default(); sub_registry.register( "connections_established", @@ -118,49 +142,60 @@ impl Metrics { connections_established.clone(), ); - let connections_closed = Family::default(); - sub_registry.register( - "connections_closed", - "Number of connections closed", - connections_closed.clone(), - ); - - let connections_establishment_duration = Family::new_with_constructor( - create_connection_establishment_duration_histogram as fn() -> Histogram, - ); + let connections_establishment_duration = { + let constructor: fn() -> Histogram = + || Histogram::new(exponential_buckets(0.01, 1.5, 20)); + Family::new_with_constructor(constructor) + }; sub_registry.register( "connections_establishment_duration", "Time it took (locally) to establish connections", connections_establishment_duration.clone(), ); + let connections_duration = { + let constructor: fn() -> Histogram = + || Histogram::new(exponential_buckets(0.01, 3.0, 20)); + Family::new_with_constructor(constructor) + }; + sub_registry.register_with_unit( + "connections_duration", + "Time a connection was alive", + Unit::Seconds, + connections_duration.clone(), + ); + Self { connections_incoming, connections_incoming_error, connections_established, - connections_closed, new_listen_addr, expired_listen_addr, + external_addr_candidates, + external_addr_confirmed, + external_addr_expired, listener_closed, listener_error, dial_attempt, outgoing_connection_error, - connected_to_banned_peer, connections_establishment_duration, + connections_duration, + connections: Default::default(), } } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &SwarmEvent) { match event { - libp2p_swarm::SwarmEvent::Behaviour(_) => {} - libp2p_swarm::SwarmEvent::ConnectionEstablished { + SwarmEvent::Behaviour(_) => {} + SwarmEvent::ConnectionEstablished { endpoint, established_in: time_taken, + connection_id, .. } => { - let labels = ConnectionEstablishedLabels { + let labels = ConnectionLabels { role: endpoint.into(), protocols: protocol_stack::as_string(endpoint.get_remote_address()), }; @@ -168,23 +203,42 @@ impl super::Recorder { - self.connections_closed - .get_or_create(&ConnectionClosedLabels { + SwarmEvent::ConnectionClosed { + endpoint, + connection_id, + cause, + .. + } => { + let labels = ConnectionClosedLabels { + connection: ConnectionLabels { role: endpoint.into(), protocols: protocol_stack::as_string(endpoint.get_remote_address()), - }) - .inc(); + }, + cause: cause.as_ref().map(Into::into), + }; + self.connections_duration.get_or_create(&labels).observe( + self.connections + .lock() + .expect("lock not to be poisoned") + .remove(connection_id) + .expect("closed connection to previously be established") + .elapsed() + .as_secs_f64(), + ); } - libp2p_swarm::SwarmEvent::IncomingConnection { send_back_addr, .. } => { + SwarmEvent::IncomingConnection { send_back_addr, .. } => { self.connections_incoming .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(send_back_addr), }) .inc(); } - libp2p_swarm::SwarmEvent::IncomingConnectionError { + SwarmEvent::IncomingConnectionError { error, send_back_addr, .. @@ -196,7 +250,7 @@ impl super::Recorder { + SwarmEvent::OutgoingConnectionError { error, peer_id, .. } => { let peer = match peer_id { Some(_) => PeerStatus::Known, None => PeerStatus::Unknown, @@ -209,7 +263,7 @@ impl super::Recorder { + DialError::Transport(errors) => { for (_multiaddr, error) in errors { match error { libp2p_core::transport::TransportError::MultiaddrNotSupported( @@ -223,56 +277,31 @@ impl super::Recorder record(OutgoingConnectionError::Banned), - #[allow(deprecated)] - libp2p_swarm::DialError::ConnectionLimit(_) => { - record(OutgoingConnectionError::ConnectionLimit) - } - libp2p_swarm::DialError::LocalPeerId { .. } => { - record(OutgoingConnectionError::LocalPeerId) - } - libp2p_swarm::DialError::NoAddresses => { - record(OutgoingConnectionError::NoAddresses) - } - libp2p_swarm::DialError::DialPeerConditionFalse(_) => { + DialError::LocalPeerId { .. } => record(OutgoingConnectionError::LocalPeerId), + DialError::NoAddresses => record(OutgoingConnectionError::NoAddresses), + DialError::DialPeerConditionFalse(_) => { record(OutgoingConnectionError::DialPeerConditionFalse) } - libp2p_swarm::DialError::Aborted => record(OutgoingConnectionError::Aborted), - libp2p_swarm::DialError::InvalidPeerId { .. } => { - record(OutgoingConnectionError::InvalidPeerId) - } - libp2p_swarm::DialError::WrongPeerId { .. } => { - record(OutgoingConnectionError::WrongPeerId) - } - libp2p_swarm::DialError::Denied { .. } => { - record(OutgoingConnectionError::Denied) - } + DialError::Aborted => record(OutgoingConnectionError::Aborted), + DialError::WrongPeerId { .. } => record(OutgoingConnectionError::WrongPeerId), + DialError::Denied { .. } => record(OutgoingConnectionError::Denied), }; } - #[allow(deprecated)] - libp2p_swarm::SwarmEvent::BannedPeer { endpoint, .. } => { - self.connected_to_banned_peer - .get_or_create(&AddressLabels { - protocols: protocol_stack::as_string(endpoint.get_remote_address()), - }) - .inc(); - } - libp2p_swarm::SwarmEvent::NewListenAddr { address, .. } => { + SwarmEvent::NewListenAddr { address, .. } => { self.new_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ExpiredListenAddr { address, .. } => { + SwarmEvent::ExpiredListenAddr { address, .. } => { self.expired_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ListenerClosed { addresses, .. } => { + SwarmEvent::ListenerClosed { addresses, .. } => { for address in addresses { self.listener_closed .get_or_create(&AddressLabels { @@ -281,28 +310,64 @@ impl super::Recorder { + SwarmEvent::ListenerError { .. } => { self.listener_error.inc(); } - libp2p_swarm::SwarmEvent::Dialing(_) => { + SwarmEvent::Dialing { .. } => { self.dial_attempt.inc(); } + SwarmEvent::NewExternalAddrCandidate { address } => { + self.external_addr_candidates + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrConfirmed { address } => { + self.external_addr_confirmed + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrExpired { address } => { + self.external_addr_expired + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + _ => {} } } } #[derive(EncodeLabelSet, Hash, Clone, Eq, PartialEq, Debug)] -struct ConnectionEstablishedLabels { +struct ConnectionLabels { role: Role, protocols: String, } -type ConnectionEstablishmentDurationLabels = ConnectionEstablishedLabels; - #[derive(EncodeLabelSet, Hash, Clone, Eq, PartialEq, Debug)] struct ConnectionClosedLabels { - role: Role, - protocols: String, + cause: Option, + #[prometheus(flatten)] + connection: ConnectionLabels, +} + +#[derive(EncodeLabelValue, Hash, Clone, Eq, PartialEq, Debug)] +enum ConnectionError { + Io, + KeepAliveTimeout, +} + +impl From<&libp2p_swarm::ConnectionError> for ConnectionError { + fn from(value: &libp2p_swarm::ConnectionError) -> Self { + match value { + libp2p_swarm::ConnectionError::IO(_) => ConnectionError::Io, + libp2p_swarm::ConnectionError::KeepAliveTimeout => ConnectionError::KeepAliveTimeout, + } + } } #[derive(EncodeLabelSet, Hash, Clone, Eq, PartialEq, Debug)] @@ -339,13 +404,10 @@ enum PeerStatus { #[derive(EncodeLabelValue, Hash, Clone, Eq, PartialEq, Debug)] enum OutgoingConnectionError { - Banned, - ConnectionLimit, LocalPeerId, NoAddresses, DialPeerConditionFalse, Aborted, - InvalidPeerId, WrongPeerId, TransportMultiaddrNotSupported, TransportOther, @@ -365,7 +427,6 @@ enum IncomingConnectionError { TransportErrorMultiaddrNotSupported, TransportErrorOther, Aborted, - ConnectionLimit, Denied, } @@ -373,10 +434,6 @@ impl From<&libp2p_swarm::ListenError> for IncomingConnectionError { fn from(error: &libp2p_swarm::ListenError) -> Self { match error { libp2p_swarm::ListenError::WrongPeerId { .. } => IncomingConnectionError::WrongPeerId, - #[allow(deprecated)] - libp2p_swarm::ListenError::ConnectionLimit(_) => { - IncomingConnectionError::ConnectionLimit - } libp2p_swarm::ListenError::LocalPeerId { .. } => IncomingConnectionError::LocalPeerId, libp2p_swarm::ListenError::Transport( libp2p_core::transport::TransportError::MultiaddrNotSupported(_), @@ -389,7 +446,3 @@ impl From<&libp2p_swarm::ListenError> for IncomingConnectionError { } } } - -fn create_connection_establishment_duration_histogram() -> Histogram { - Histogram::new(exponential_buckets(0.01, 1.5, 20)) -} diff --git a/misc/multiaddr/README.md b/misc/multiaddr/README.md deleted file mode 100644 index e745a6f601b..00000000000 --- a/misc/multiaddr/README.md +++ /dev/null @@ -1 +0,0 @@ -Moved to https://github.com/multiformats/rust-multiaddr. \ No newline at end of file diff --git a/misc/multistream-select/CHANGELOG.md b/misc/multistream-select/CHANGELOG.md index b893c5d19e3..b34eff7b06f 100644 --- a/misc/multistream-select/CHANGELOG.md +++ b/misc/multistream-select/CHANGELOG.md @@ -1,8 +1,13 @@ -## 0.13.0 - unreleased +## 0.13.0 + +- Don't wait for negotiation on `::poll_close`. + This can save one round-trip for protocols that use stream closing as an operation in ones protocol, e.g. using stream closing to signal the end of a request. + See [PR 4019] for details. - Raise MSRV to 1.65. See [PR 3715]. +[PR 4019]: https://github.com/libp2p/rust-libp2p/pull/4019 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 ## 0.12.1 diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index b8ebd0cab68..34b5b6a636c 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -13,26 +13,25 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" futures = "0.3" -log = "0.4" -pin-project = "1.0.0" -smallvec = "1.6.1" -unsigned-varint = "0.7" +tracing = "0.1.37" +pin-project = "1.1.3" +smallvec = "1.11.2" +unsigned-varint = { workspace = true } [dev-dependencies] -async-std = "1.6.2" -env_logger = "0.10" -libp2p-core = { workspace = true } -libp2p-mplex = { workspace = true } -libp2p-plaintext = { workspace = true } -libp2p-swarm = { workspace = true, features = ["async-std"] } -libp2p-identity = { workspace = true, features = ["ed25519"] } +async-std = { version = "1.6.2", features = ["attributes"] } +futures_ringbuf = "0.4.0" quickcheck = { workspace = true } rand = "0.8" rw-stream-sink = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } -# Passing arguments to the docsrs builder in order to properly document cfg's. +# Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index af9f79d876a..83bb4909041 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -131,7 +131,7 @@ where if let Err(err) = Pin::new(&mut io).start_send(Message::Protocol(p.clone())) { return Poll::Ready(Err(From::from(err))); } - log::debug!("Dialer: Proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Proposed protocol"); if this.protocols.peek().is_some() { *this.state = State::FlushProtocol { io, protocol } @@ -143,7 +143,7 @@ where // the dialer supports for this negotiation. Notably, // the dialer expects a regular `V1` response. Version::V1Lazy => { - log::debug!("Dialer: Expecting proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Expecting proposed protocol"); let hl = HeaderLine::from(Version::V1Lazy); let io = Negotiated::expecting(io.into_reader(), p, Some(hl)); return Poll::Ready(Ok((protocol, io))); @@ -180,14 +180,14 @@ where *this.state = State::AwaitProtocol { io, protocol }; } Message::Protocol(ref p) if p.as_ref() == protocol.as_ref() => { - log::debug!("Dialer: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Received confirmation for protocol"); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); } Message::NotAvailable => { - log::debug!( - "Dialer: Received rejection of protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Dialer: Received rejection of protocol" ); let protocol = this.protocols.next().ok_or(NegotiationError::Failed)?; *this.state = State::SendProtocol { io, protocol } @@ -201,3 +201,204 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::listener_select_proto; + use async_std::future::timeout; + use async_std::net::{TcpListener, TcpStream}; + use quickcheck::{Arbitrary, Gen, GenRange}; + use std::time::Duration; + use tracing::metadata::LevelFilter; + use tracing_subscriber::EnvFilter; + + #[test] + fn select_proto_basic() { + async fn run(version: Version) { + let (client_connection, server_connection) = futures_ringbuf::Endpoint::pair(100, 100); + + let server = async_std::task::spawn(async move { + let protos = vec!["/proto1", "/proto2"]; + let (proto, mut io) = listener_select_proto(server_connection, protos) + .await + .unwrap(); + assert_eq!(proto, "/proto2"); + + let mut out = vec![0; 32]; + let n = io.read(&mut out).await.unwrap(); + out.truncate(n); + assert_eq!(out, b"ping"); + + io.write_all(b"pong").await.unwrap(); + io.flush().await.unwrap(); + }); + + let client = async_std::task::spawn(async move { + let protos = vec!["/proto3", "/proto2"]; + let (proto, mut io) = dialer_select_proto(client_connection, protos, version) + .await + .unwrap(); + assert_eq!(proto, "/proto2"); + + io.write_all(b"ping").await.unwrap(); + io.flush().await.unwrap(); + + let mut out = vec![0; 32]; + let n = io.read(&mut out).await.unwrap(); + out.truncate(n); + assert_eq!(out, b"pong"); + }); + + server.await; + client.await; + } + + async_std::task::block_on(run(Version::V1)); + async_std::task::block_on(run(Version::V1Lazy)); + } + + /// Tests the expected behaviour of failed negotiations. + #[test] + fn negotiation_failed() { + fn prop( + version: Version, + DialerProtos(dial_protos): DialerProtos, + ListenerProtos(listen_protos): ListenerProtos, + DialPayload(dial_payload): DialPayload, + ) { + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env_lossy(), + ) + .try_init(); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + + let server = async_std::task::spawn(async move { + let server_connection = listener.accept().await.unwrap().0; + + let io = match timeout( + Duration::from_secs(2), + listener_select_proto(server_connection, listen_protos), + ) + .await + .unwrap() + { + Ok((_, io)) => io, + Err(NegotiationError::Failed) => return, + Err(NegotiationError::ProtocolError(e)) => { + panic!("Unexpected protocol error {e}") + } + }; + match io.complete().await { + Err(NegotiationError::Failed) => {} + _ => panic!(), + } + }); + + let client = async_std::task::spawn(async move { + let client_connection = TcpStream::connect(addr).await.unwrap(); + + let mut io = match timeout( + Duration::from_secs(2), + dialer_select_proto(client_connection, dial_protos, version), + ) + .await + .unwrap() + { + Err(NegotiationError::Failed) => return, + Ok((_, io)) => io, + Err(_) => panic!(), + }; + // The dialer may write a payload that is even sent before it + // got confirmation of the last proposed protocol, when `V1Lazy` + // is used. + + tracing::info!("Writing early data"); + + io.write_all(&dial_payload).await.unwrap(); + match io.complete().await { + Err(NegotiationError::Failed) => {} + _ => panic!(), + } + }); + + server.await; + client.await; + + tracing::info!("---------------------------------------") + }); + } + + quickcheck::QuickCheck::new() + .tests(1000) + .quickcheck(prop as fn(_, _, _, _)); + } + + #[async_std::test] + async fn v1_lazy_do_not_wait_for_negotiation_on_poll_close() { + let (client_connection, _server_connection) = + futures_ringbuf::Endpoint::pair(1024 * 1024, 1); + + let client = async_std::task::spawn(async move { + // Single protocol to allow for lazy (or optimistic) protocol negotiation. + let protos = vec!["/proto1"]; + let (proto, mut io) = dialer_select_proto(client_connection, protos, Version::V1Lazy) + .await + .unwrap(); + assert_eq!(proto, "/proto1"); + + // client can close the connection even though protocol negotiation is not yet done, i.e. + // `_server_connection` had been untouched. + io.close().await.unwrap(); + }); + + async_std::future::timeout(Duration::from_secs(10), client) + .await + .unwrap(); + } + + #[derive(Clone, Debug)] + struct DialerProtos(Vec<&'static str>); + + impl Arbitrary for DialerProtos { + fn arbitrary(g: &mut Gen) -> Self { + if bool::arbitrary(g) { + DialerProtos(vec!["/proto1"]) + } else { + DialerProtos(vec!["/proto1", "/proto2"]) + } + } + } + + #[derive(Clone, Debug)] + struct ListenerProtos(Vec<&'static str>); + + impl Arbitrary for ListenerProtos { + fn arbitrary(g: &mut Gen) -> Self { + if bool::arbitrary(g) { + ListenerProtos(vec!["/proto3"]) + } else { + ListenerProtos(vec!["/proto3", "/proto4"]) + } + } + } + + #[derive(Clone, Debug)] + struct DialPayload(Vec); + + impl Arbitrary for DialPayload { + fn arbitrary(g: &mut Gen) -> Self { + DialPayload( + (0..g.gen_range(0..2u8)) + .map(|_| g.gen_range(1..255)) // We can generate 0 as that will produce a different error. + .collect(), + ) + } + } +} diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index d1de7cd292e..6515d00c717 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -170,7 +170,7 @@ where if (buf[*pos - 1] & 0x80) == 0 { // MSB is not set, indicating the end of the length prefix. let (len, _) = unsigned_varint::decode::u16(buf).map_err(|e| { - log::debug!("invalid length prefix: {}", e); + tracing::debug!("invalid length prefix: {e}"); io::Error::new(io::ErrorKind::InvalidData, "invalid length prefix") })?; @@ -385,7 +385,6 @@ where #[cfg(test)] mod tests { use crate::length_delimited::LengthDelimited; - use async_std::net::{TcpListener, TcpStream}; use futures::{io::Cursor, prelude::*}; use quickcheck::*; use std::io::ErrorKind; @@ -412,7 +411,7 @@ mod tests { assert!(len < (1 << 15)); let frame = (0..len).map(|n| (n & 0xff) as u8).collect::>(); let mut data = vec![(len & 0x7f) as u8 | 0x80, (len >> 7) as u8]; - data.extend(frame.clone().into_iter()); + data.extend(frame.clone()); let mut framed = LengthDelimited::new(Cursor::new(data)); let recved = futures::executor::block_on(async move { framed.next().await }).unwrap(); assert_eq!(recved.unwrap(), frame); @@ -488,15 +487,13 @@ mod tests { #[test] fn writing_reading() { fn prop(frames: Vec>) -> TestResult { - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); + let (client_connection, server_connection) = futures_ringbuf::Endpoint::pair(100, 100); + async_std::task::block_on(async move { let expected_frames = frames.clone(); let server = async_std::task::spawn(async move { - let socket = listener.accept().await.unwrap().0; let mut connec = - rw_stream_sink::RwStreamSink::new(LengthDelimited::new(socket)); + rw_stream_sink::RwStreamSink::new(LengthDelimited::new(server_connection)); let mut buf = vec![0u8; 0]; for expected in expected_frames { @@ -512,8 +509,7 @@ mod tests { }); let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(&listener_addr).await.unwrap(); - let mut connec = LengthDelimited::new(socket); + let mut connec = LengthDelimited::new(client_connection); for frame in frames { connec.send(From::from(frame)).await.unwrap(); } diff --git a/misc/multistream-select/src/lib.rs b/misc/multistream-select/src/lib.rs index ec62023a861..5565623f25e 100644 --- a/misc/multistream-select/src/lib.rs +++ b/misc/multistream-select/src/lib.rs @@ -40,13 +40,12 @@ //! echoing the same protocol) or reject (by responding with a message stating //! "not available"). If a suggested protocol is not available, the dialer may //! suggest another protocol. This process continues until a protocol is agreed upon, -//! yielding a [`Negotiated`](self::Negotiated) stream, or the dialer has run out of +//! yielding a [`Negotiated`] stream, or the dialer has run out of //! alternatives. //! -//! See [`dialer_select_proto`](self::dialer_select_proto) and -//! [`listener_select_proto`](self::listener_select_proto). +//! See [`dialer_select_proto`] and [`listener_select_proto`]. //! -//! ## [`Negotiated`](self::Negotiated) +//! ## [`Negotiated`] //! //! A `Negotiated` represents an I/O stream that has settled on a protocol //! to use. By default, with [`Version::V1`], protocol negotiation is always @@ -55,7 +54,7 @@ //! a variant [`Version::V1Lazy`] that permits 0-RTT negotiation if the //! dialer only supports a single protocol. In that case, when a dialer //! settles on a protocol to use, the [`DialerSelectFuture`] yields a -//! [`Negotiated`](self::Negotiated) I/O stream before the negotiation +//! [`Negotiated`] I/O stream before the negotiation //! data has been flushed. It is then expecting confirmation for that protocol //! as the first messages read from the stream. This behaviour allows the dialer //! to immediately send data relating to the negotiated protocol together with the @@ -63,8 +62,7 @@ //! multiple 0-RTT negotiations in sequence for different protocols layered on //! top of each other may trigger undesirable behaviour for a listener not //! supporting one of the intermediate protocols. See -//! [`dialer_select_proto`](self::dialer_select_proto) and the documentation -//! of [`Version::V1Lazy`] for further details. +//! [`dialer_select_proto`] and the documentation of [`Version::V1Lazy`] for further details. //! //! ## Examples //! @@ -142,3 +140,11 @@ pub enum Version { // Draft: https://github.com/libp2p/specs/pull/95 // V2, } + +#[cfg(test)] +impl quickcheck::Arbitrary for Version { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + *g.choose(&[Version::V1, Version::V1Lazy]) + .expect("slice not empty") + } +} diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index 5386114fab8..21c507096e2 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -52,7 +52,7 @@ where .filter_map(|n| match Protocol::try_from(n.as_ref()) { Ok(p) => Some((n, p)), Err(e) => { - log::warn!( + tracing::warn!( "Listener: Ignoring invalid protocol: {} due to {}", n.as_ref(), e @@ -124,9 +124,9 @@ where match mem::replace(this.state, State::Done) { State::RecvHeader { mut io } => { match io.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(Message::Header(h)))) => match h { - HeaderLine::V1 => *this.state = State::SendHeader { io }, - }, + Poll::Ready(Some(Ok(Message::Header(HeaderLine::V1)))) => { + *this.state = State::SendHeader { io } + } Poll::Ready(Some(Ok(_))) => { return Poll::Ready(Err(ProtocolError::InvalidMessage.into())) } @@ -186,7 +186,7 @@ where // the dialer also raises `NegotiationError::Failed` when finally // reading the `N/A` response. if let ProtocolError::InvalidMessage = &err { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with invalid \ message after protocol rejection." ); @@ -194,7 +194,7 @@ where } if let ProtocolError::IoError(e) = &err { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with EOF \ after protocol rejection." ); @@ -228,10 +228,10 @@ where }); let message = if protocol.is_some() { - log::debug!("Listener: confirming protocol: {}", p); + tracing::debug!(protocol=%p, "Listener: confirming protocol"); Message::Protocol(p.clone()) } else { - log::debug!("Listener: rejecting protocol: {}", p.as_ref()); + tracing::debug!(protocol=%p.as_ref(), "Listener: rejecting protocol"); Message::NotAvailable }; @@ -287,9 +287,9 @@ where // Otherwise expect to receive another message. match protocol { Some(protocol) => { - log::debug!( - "Listener: sent confirmed protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Listener: sent confirmed protocol" ); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index dabcec4f605..a24014a4f5f 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -171,7 +171,7 @@ impl Negotiated { if let Message::Protocol(p) = &msg { if p.as_ref() == protocol.as_ref() { - log::debug!("Negotiated: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Negotiated: Received confirmation for protocol"); *this.state = State::Completed { io: io.into_inner(), }; @@ -305,9 +305,7 @@ where } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // Ensure all data has been flushed and expected negotiation messages - // have been received. - ready!(self.as_mut().poll(cx).map_err(Into::::into)?); + // Ensure all data has been flushed, including optimistic multistream-select messages. ready!(self .as_mut() .poll_flush(cx) @@ -316,7 +314,13 @@ where // Continue with the shutdown of the underlying I/O stream. match self.project().state.project() { StateProj::Completed { io, .. } => io.poll_close(cx), - StateProj::Expecting { io, .. } => io.poll_close(cx), + StateProj::Expecting { io, .. } => { + let close_poll = io.poll_close(cx); + if let Poll::Ready(Ok(())) = close_poll { + tracing::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending") + } + close_poll + } StateProj::Invalid => panic!("Negotiated: Invalid state"), } } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index be2f3122da0..d5c2bfa773a 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -403,7 +403,7 @@ where return Poll::Ready(None); }; - log::trace!("Received message: {:?}", msg); + tracing::trace!(message=?msg, "Received message"); Poll::Ready(Some(Ok(msg))) } diff --git a/misc/multistream-select/tests/dialer_select.rs b/misc/multistream-select/tests/dialer_select.rs deleted file mode 100644 index f080730b939..00000000000 --- a/misc/multistream-select/tests/dialer_select.rs +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Integration tests for protocol negotiation. - -use async_std::net::{TcpListener, TcpStream}; -use futures::prelude::*; -use multistream_select::{dialer_select_proto, listener_select_proto, NegotiationError, Version}; - -#[test] -fn select_proto_basic() { - async fn run(version: Version) { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); - - let server = async_std::task::spawn(async move { - let connec = listener.accept().await.unwrap().0; - let protos = vec!["/proto1", "/proto2"]; - let (proto, mut io) = listener_select_proto(connec, protos).await.unwrap(); - assert_eq!(proto, "/proto2"); - - let mut out = vec![0; 32]; - let n = io.read(&mut out).await.unwrap(); - out.truncate(n); - assert_eq!(out, b"ping"); - - io.write_all(b"pong").await.unwrap(); - io.flush().await.unwrap(); - }); - - let client = async_std::task::spawn(async move { - let connec = TcpStream::connect(&listener_addr).await.unwrap(); - let protos = vec!["/proto3", "/proto2"]; - let (proto, mut io) = dialer_select_proto(connec, protos.into_iter(), version) - .await - .unwrap(); - assert_eq!(proto, "/proto2"); - - io.write_all(b"ping").await.unwrap(); - io.flush().await.unwrap(); - - let mut out = vec![0; 32]; - let n = io.read(&mut out).await.unwrap(); - out.truncate(n); - assert_eq!(out, b"pong"); - }); - - server.await; - client.await; - } - - async_std::task::block_on(run(Version::V1)); - async_std::task::block_on(run(Version::V1Lazy)); -} - -/// Tests the expected behaviour of failed negotiations. -#[test] -fn negotiation_failed() { - let _ = env_logger::try_init(); - - async fn run( - Test { - version, - listen_protos, - dial_protos, - dial_payload, - }: Test, - ) { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - let listener_addr = listener.local_addr().unwrap(); - - let server = async_std::task::spawn(async move { - let connec = listener.accept().await.unwrap().0; - let io = match listener_select_proto(connec, listen_protos).await { - Ok((_, io)) => io, - Err(NegotiationError::Failed) => return, - Err(NegotiationError::ProtocolError(e)) => { - panic!("Unexpected protocol error {e}") - } - }; - match io.complete().await { - Err(NegotiationError::Failed) => {} - _ => panic!(), - } - }); - - let client = async_std::task::spawn(async move { - let connec = TcpStream::connect(&listener_addr).await.unwrap(); - let mut io = match dialer_select_proto(connec, dial_protos.into_iter(), version).await { - Err(NegotiationError::Failed) => return, - Ok((_, io)) => io, - Err(_) => panic!(), - }; - // The dialer may write a payload that is even sent before it - // got confirmation of the last proposed protocol, when `V1Lazy` - // is used. - io.write_all(&dial_payload).await.unwrap(); - match io.complete().await { - Err(NegotiationError::Failed) => {} - _ => panic!(), - } - }); - - server.await; - client.await; - } - - /// Parameters for a single test run. - #[derive(Clone)] - struct Test { - version: Version, - listen_protos: Vec<&'static str>, - dial_protos: Vec<&'static str>, - dial_payload: Vec, - } - - // Disjunct combinations of listen and dial protocols to test. - // - // The choices here cover the main distinction between a single - // and multiple protocols. - let protos = vec![ - (vec!["/proto1"], vec!["/proto2"]), - (vec!["/proto1", "/proto2"], vec!["/proto3", "/proto4"]), - ]; - - // The payloads that the dialer sends after "successful" negotiation, - // which may be sent even before the dialer got protocol confirmation - // when `V1Lazy` is used. - // - // The choices here cover the specific situations that can arise with - // `V1Lazy` and which must nevertheless behave identically to `V1` w.r.t. - // the outcome of the negotiation. - let payloads = vec![ - // No payload, in which case all versions should behave identically - // in any case, i.e. the baseline test. - vec![], - // With this payload and `V1Lazy`, the listener interprets the first - // `1` as a message length and encounters an invalid message (the - // second `1`). The listener is nevertheless expected to fail - // negotiation normally, just like with `V1`. - vec![1, 1], - // With this payload and `V1Lazy`, the listener interprets the first - // `42` as a message length and encounters unexpected EOF trying to - // read a message of that length. The listener is nevertheless expected - // to fail negotiation normally, just like with `V1` - vec![42, 1], - ]; - - for (listen_protos, dial_protos) in protos { - for dial_payload in payloads.clone() { - for &version in &[Version::V1, Version::V1Lazy] { - async_std::task::block_on(run(Test { - version, - listen_protos: listen_protos.clone(), - dial_protos: dial_protos.clone(), - dial_payload: dial_payload.clone(), - })) - } - } - } -} diff --git a/misc/multistream-select/tests/transport.rs b/misc/multistream-select/tests/transport.rs deleted file mode 100644 index e4517e27168..00000000000 --- a/misc/multistream-select/tests/transport.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::{channel::oneshot, prelude::*, ready}; -use libp2p_core::{ - multiaddr::Protocol, - muxing::StreamMuxerBox, - transport::{self, MemoryTransport}, - upgrade, Multiaddr, Transport, -}; -use libp2p_identity as identity; -use libp2p_identity::PeerId; -use libp2p_mplex::MplexConfig; -use libp2p_plaintext::PlainText2Config; -use libp2p_swarm::{dummy, SwarmBuilder, SwarmEvent}; -use rand::random; -use std::task::Poll; - -type TestTransport = transport::Boxed<(PeerId, StreamMuxerBox)>; - -fn mk_transport(up: upgrade::Version) -> (PeerId, TestTransport) { - let keys = identity::Keypair::generate_ed25519(); - let id = keys.public().to_peer_id(); - ( - id, - MemoryTransport::default() - .upgrade(up) - .authenticate(PlainText2Config { - local_public_key: keys.public(), - }) - .multiplex(MplexConfig::default()) - .boxed(), - ) -} - -/// Tests the transport upgrade process with all supported -/// upgrade protocol versions. -#[test] -fn transport_upgrade() { - let _ = env_logger::try_init(); - - fn run(up: upgrade::Version) { - let (dialer_id, dialer_transport) = mk_transport(up); - let (listener_id, listener_transport) = mk_transport(up); - - let listen_addr = Multiaddr::from(Protocol::Memory(random::())); - - let mut dialer = - SwarmBuilder::with_async_std_executor(dialer_transport, dummy::Behaviour, dialer_id) - .build(); - let mut listener = SwarmBuilder::with_async_std_executor( - listener_transport, - dummy::Behaviour, - listener_id, - ) - .build(); - - listener.listen_on(listen_addr).unwrap(); - let (addr_sender, addr_receiver) = oneshot::channel(); - - let client = async move { - let addr = addr_receiver.await.unwrap(); - dialer.dial(addr).unwrap(); - futures::future::poll_fn(move |cx| loop { - if let SwarmEvent::ConnectionEstablished { .. } = - ready!(dialer.poll_next_unpin(cx)).unwrap() - { - return Poll::Ready(()); - } - }) - .await - }; - - let mut addr_sender = Some(addr_sender); - let server = futures::future::poll_fn(move |cx| loop { - match ready!(listener.poll_next_unpin(cx)).unwrap() { - SwarmEvent::NewListenAddr { address, .. } => { - addr_sender.take().unwrap().send(address).unwrap(); - } - SwarmEvent::IncomingConnection { .. } => {} - SwarmEvent::ConnectionEstablished { .. } => return Poll::Ready(()), - _ => {} - } - }); - - async_std::task::block_on(future::select(Box::pin(server), Box::pin(client))); - } - - run(upgrade::Version::V1); - run(upgrade::Version::V1Lazy); -} diff --git a/misc/quick-protobuf-codec/CHANGELOG.md b/misc/quick-protobuf-codec/CHANGELOG.md index 6591928f82d..a301293621f 100644 --- a/misc/quick-protobuf-codec/CHANGELOG.md +++ b/misc/quick-protobuf-codec/CHANGELOG.md @@ -1,4 +1,14 @@ -## 0.2.0 - unreleased +## 0.3.1 + +- Reduce allocations during encoding. + See [PR 4782](https://github.com/libp2p/rust-libp2p/pull/4782). + +## 0.3.0 + +- Update to `asynchronous-codec` `v0.7.0`. + See [PR 4636](https://github.com/libp2p/rust-libp2p/pull/4636). + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/quick-protobuf-codec/Cargo.toml b/misc/quick-protobuf-codec/Cargo.toml index 37cdd07f225..bc07b86b427 100644 --- a/misc/quick-protobuf-codec/Cargo.toml +++ b/misc/quick-protobuf-codec/Cargo.toml @@ -3,7 +3,7 @@ name = "quick-protobuf-codec" edition = "2021" rust-version = { workspace = true } description = "Asynchronous de-/encoding of Protobuf structs using asynchronous-codec, unsigned-varint and quick-protobuf." -version = "0.2.0" +version = "0.3.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,15 +11,27 @@ keywords = ["networking"] categories = ["asynchronous"] [dependencies] -asynchronous-codec = { version = "0.6" } +asynchronous-codec = { workspace = true } bytes = { version = "1" } thiserror = "1.0" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +unsigned-varint = { workspace = true, features = ["std"] } quick-protobuf = "0.8" +[dev-dependencies] +criterion = "0.5.1" +futures = "0.3.30" +quickcheck = { workspace = true } + +[[bench]] +name = "codec" +harness = false + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/misc/quick-protobuf-codec/benches/codec.rs b/misc/quick-protobuf-codec/benches/codec.rs new file mode 100644 index 00000000000..0f6ce9469c5 --- /dev/null +++ b/misc/quick-protobuf-codec/benches/codec.rs @@ -0,0 +1,28 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use quick_protobuf_codec::{proto, Codec}; + +pub fn benchmark(c: &mut Criterion) { + for size in [1000, 10_000, 100_000, 1_000_000, 10_000_000] { + c.bench_with_input(BenchmarkId::new("encode", size), &size, |b, i| { + b.iter_batched( + || { + let mut out = BytesMut::new(); + out.reserve(i + 100); + let codec = Codec::::new(i + 100); + let msg = proto::Message { + data: vec![0; size], + }; + + (codec, out, msg) + }, + |(mut codec, mut out, msg)| codec.encode(msg, &mut out).unwrap(), + BatchSize::SmallInput, + ); + }); + } +} + +criterion_group!(benches, benchmark); +criterion_main!(benches); diff --git a/protocols/identify/src/mod.rs b/misc/quick-protobuf-codec/src/generated/mod.rs similarity index 66% rename from protocols/identify/src/mod.rs rename to misc/quick-protobuf-codec/src/generated/mod.rs index e52c5a80bc0..b9f982f8dfd 100644 --- a/protocols/identify/src/mod.rs +++ b/misc/quick-protobuf-codec/src/generated/mod.rs @@ -1,2 +1,2 @@ // Automatically generated mod.rs -pub mod structs; +pub mod test; diff --git a/misc/quick-protobuf-codec/src/generated/test.proto b/misc/quick-protobuf-codec/src/generated/test.proto new file mode 100644 index 00000000000..5b1f46c0bfa --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package test; + +message Message { + bytes data = 1; +} diff --git a/misc/quick-protobuf-codec/src/generated/test.rs b/misc/quick-protobuf-codec/src/generated/test.rs new file mode 100644 index 00000000000..b353e6d9183 --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.rs @@ -0,0 +1,47 @@ +// Automatically generated rust module for 'test.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub data: Vec, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.data = r.read_bytes(bytes)?.to_owned(), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + if self.data.is_empty() { 0 } else { 1 + sizeof_len((&self.data).len()) } + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if !self.data.is_empty() { w.write_with_tag(10, |w| w.write_bytes(&**&self.data))?; } + Ok(()) + } +} + diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index 54785fbe209..c50b1264af6 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,16 +1,21 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use asynchronous_codec::{Decoder, Encoder}; -use bytes::{Bytes, BytesMut}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use bytes::{Buf, BufMut, BytesMut}; +use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; +use std::io; use std::marker::PhantomData; -use unsigned_varint::codec::UviBytes; + +mod generated; + +#[doc(hidden)] // NOT public API. Do not use. +pub use generated::test as proto; /// [`Codec`] implements [`Encoder`] and [`Decoder`], uses [`unsigned_varint`] /// to prefix messages with their length and uses [`quick_protobuf`] and a provided /// `struct` implementing [`MessageRead`] and [`MessageWrite`] to do the encoding. pub struct Codec { - uvi: UviBytes, + max_message_len_bytes: usize, phantom: PhantomData<(In, Out)>, } @@ -21,30 +26,44 @@ impl Codec { /// Protobuf message. The limit does not include the bytes needed for the /// [`unsigned_varint`]. pub fn new(max_message_len_bytes: usize) -> Self { - let mut uvi = UviBytes::default(); - uvi.set_max_len(max_message_len_bytes); Self { - uvi, - phantom: PhantomData::default(), + max_message_len_bytes, + phantom: PhantomData, } } } impl Encoder for Codec { - type Item = In; + type Item<'a> = In; type Error = Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { - let mut encoded_msg = Vec::new(); - let mut writer = Writer::new(&mut encoded_msg); - item.write_message(&mut writer) - .expect("Encoding to succeed"); - self.uvi.encode(Bytes::from(encoded_msg), dst)?; + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + write_length(&item, dst); + write_message(&item, dst)?; Ok(()) } } +/// Write the message's length (i.e. `size`) to `dst` as a variable-length integer. +fn write_length(message: &impl MessageWrite, dst: &mut BytesMut) { + let message_length = message.get_size(); + + let mut uvi_buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(message_length, &mut uvi_buf); + + dst.extend_from_slice(encoded_length); +} + +/// Write the message itself to `dst`. +fn write_message(item: &impl MessageWrite, dst: &mut BytesMut) -> io::Result<()> { + let mut writer = Writer::new(BytesMutWriterBackend::new(dst)); + item.write_message(&mut writer) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + Ok(()) +} + impl Decoder for Codec where Out: for<'a> MessageRead<'a>, @@ -53,24 +72,203 @@ where type Error = Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let msg = match self.uvi.decode(src)? { - None => return Ok(None), - Some(msg) => msg, + let (message_length, remaining) = match unsigned_varint::decode::usize(src) { + Ok((len, remaining)) => (len, remaining), + Err(unsigned_varint::decode::Error::Insufficient) => return Ok(None), + Err(e) => return Err(Error(io::Error::new(io::ErrorKind::InvalidData, e))), }; - let mut reader = BytesReader::from_bytes(&msg); - let message = Self::Item::from_reader(&mut reader, &msg) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + if message_length > self.max_message_len_bytes { + return Err(Error(io::Error::new( + io::ErrorKind::PermissionDenied, + format!( + "message with {message_length}b exceeds maximum of {}b", + self.max_message_len_bytes + ), + ))); + } + + // Compute how many bytes the varint itself consumed. + let varint_length = src.len() - remaining.len(); + + // Ensure we can read an entire message. + if src.len() < (message_length + varint_length) { + return Ok(None); + } + + // Safe to advance buffer now. + src.advance(varint_length); + + let message = src.split_to(message_length); + + let mut reader = BytesReader::from_bytes(&message); + let message = Self::Item::from_reader(&mut reader, &message) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(Some(message)) } } +struct BytesMutWriterBackend<'a> { + dst: &'a mut BytesMut, +} + +impl<'a> BytesMutWriterBackend<'a> { + fn new(dst: &'a mut BytesMut) -> Self { + Self { dst } + } +} + +impl<'a> WriterBackend for BytesMutWriterBackend<'a> { + fn pb_write_u8(&mut self, x: u8) -> quick_protobuf::Result<()> { + self.dst.put_u8(x); + + Ok(()) + } + + fn pb_write_u32(&mut self, x: u32) -> quick_protobuf::Result<()> { + self.dst.put_u32_le(x); + + Ok(()) + } + + fn pb_write_i32(&mut self, x: i32) -> quick_protobuf::Result<()> { + self.dst.put_i32_le(x); + + Ok(()) + } + + fn pb_write_f32(&mut self, x: f32) -> quick_protobuf::Result<()> { + self.dst.put_f32_le(x); + + Ok(()) + } + + fn pb_write_u64(&mut self, x: u64) -> quick_protobuf::Result<()> { + self.dst.put_u64_le(x); + + Ok(()) + } + + fn pb_write_i64(&mut self, x: i64) -> quick_protobuf::Result<()> { + self.dst.put_i64_le(x); + + Ok(()) + } + + fn pb_write_f64(&mut self, x: f64) -> quick_protobuf::Result<()> { + self.dst.put_f64_le(x); + + Ok(()) + } + + fn pb_write_all(&mut self, buf: &[u8]) -> quick_protobuf::Result<()> { + self.dst.put_slice(buf); + + Ok(()) + } +} + #[derive(thiserror::Error, Debug)] #[error("Failed to encode/decode message")] -pub struct Error(#[from] std::io::Error); +pub struct Error(#[from] io::Error); -impl From for std::io::Error { +impl From for io::Error { fn from(e: Error) -> Self { e.0 } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto; + use asynchronous_codec::FramedRead; + use futures::io::Cursor; + use futures::{FutureExt, StreamExt}; + use quickcheck::{Arbitrary, Gen, QuickCheck}; + use std::error::Error; + + #[test] + fn honors_max_message_length() { + let codec = Codec::::new(1); + let mut src = varint_zeroes(100); + + let mut read = FramedRead::new(Cursor::new(&mut src), codec); + let err = read.next().now_or_never().unwrap().unwrap().unwrap_err(); + + assert_eq!( + err.source().unwrap().to_string(), + "message with 100b exceeds maximum of 1b" + ) + } + + #[test] + fn empty_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let mut src = varint_zeroes(100); + src.truncate(50); + + let result = codec.decode(&mut src); + + assert!(result.unwrap().is_none()); + assert_eq!( + src.len(), + 50, + "to not modify `src` if we cannot read a full message" + ) + } + + #[test] + fn only_partial_message_in_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let result = codec.decode(&mut BytesMut::new()); + + assert!(result.unwrap().is_none()); + } + + #[test] + fn handles_arbitrary_initial_capacity() { + fn prop(message: proto::Message, initial_capacity: u16) { + let mut buffer = BytesMut::with_capacity(initial_capacity as usize); + let mut codec = Codec::::new(u32::MAX as usize); + + codec.encode(message.clone(), &mut buffer).unwrap(); + let decoded = codec.decode(&mut buffer).unwrap().unwrap(); + + assert_eq!(message, decoded); + } + + QuickCheck::new().quickcheck(prop as fn(_, _) -> _) + } + + /// Constructs a [`BytesMut`] of the provided length where the message is all zeros. + fn varint_zeroes(length: usize) -> BytesMut { + let mut buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(length, &mut buf); + + let mut src = BytesMut::new(); + src.extend_from_slice(encoded_length); + src.extend(std::iter::repeat(0).take(length)); + src + } + + impl Arbitrary for proto::Message { + fn arbitrary(g: &mut Gen) -> Self { + Self { + data: Vec::arbitrary(g), + } + } + } + + #[derive(Debug)] + struct Dummy; + + impl<'a> MessageRead<'a> for Dummy { + fn from_reader(_: &mut BytesReader, _: &'a [u8]) -> quick_protobuf::Result { + todo!() + } + } +} diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs new file mode 100644 index 00000000000..65dafe065d1 --- /dev/null +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -0,0 +1,16 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use quick_protobuf_codec::proto; +use quick_protobuf_codec::Codec; + +#[test] +fn encode_large_message() { + let mut codec = Codec::::new(1_001_000); + let mut dst = BytesMut::new(); + dst.reserve(1_001_000); + let message = proto::Message { + data: vec![0; 1_000_000], + }; + + codec.encode(message, &mut dst).unwrap(); +} diff --git a/misc/quickcheck-ext/Cargo.toml b/misc/quickcheck-ext/Cargo.toml index 0c427dc4fc9..9fe3cbf25c1 100644 --- a/misc/quickcheck-ext/Cargo.toml +++ b/misc/quickcheck-ext/Cargo.toml @@ -5,6 +5,12 @@ edition = "2021" publish = false license = "Unlicense/MIT" +[package.metadata.release] +release = false + [dependencies] quickcheck = "1" num-traits = "0.2" + +[lints] +workspace = true diff --git a/misc/rw-stream-sink/CHANGELOG.md b/misc/rw-stream-sink/CHANGELOG.md index 89047a25849..a97fc5f3d14 100644 --- a/misc/rw-stream-sink/CHANGELOG.md +++ b/misc/rw-stream-sink/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.4.0 - unreleased +## 0.4.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/rw-stream-sink/Cargo.toml b/misc/rw-stream-sink/Cargo.toml index 85871d3671e..8e093ac3db0 100644 --- a/misc/rw-stream-sink/Cargo.toml +++ b/misc/rw-stream-sink/Cargo.toml @@ -11,8 +11,8 @@ keywords = ["networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" -pin-project = "1.0.10" +futures = "0.3.30" +pin-project = "1.1.3" static_assertions = "1" [dev-dependencies] @@ -24,3 +24,6 @@ async-std = "1.0" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md new file mode 100644 index 00000000000..e4c5dd4a103 --- /dev/null +++ b/misc/server/CHANGELOG.md @@ -0,0 +1,80 @@ +## 0.12.5 + +### Added + +- Add `/wss` support. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + +## 0.12.4 + +### Added + +- Expose `libp2p_bandwidth_bytes` Prometheus metrics. + See [PR 4727](https://github.com/libp2p/rust-libp2p/pull/4727). + +## 0.12.3 + +### Changed +- Add libp2p-lookup to Dockerfile to enable healthchecks. + +### Fixed + +- Disable QUIC `draft-29` support. + Listening on `/quic` and `/quic-v1` addresses with the same port would otherwise result in an "Address already in use" error by the OS. + See [PR 4467]. + +[PR 4467]: https://github.com/libp2p/rust-libp2p/pull/4467 + +## 0.12.2 +### Fixed +- Adhere to `--metrics-path` flag and listen on `0.0.0.0:8888` (default IPFS metrics port). + [PR 4392] + +[PR 4392]: https://github.com/libp2p/rust-libp2p/pull/4392 + +## 0.12.1 +### Changed +- Move to tokio and hyper. + See [PR 4311]. +- Move to distroless Docker base image. + See [PR 4311]. + +[PR 4311]: https://github.com/libp2p/rust-libp2p/pull/4311 + +## 0.8.0 +### Changed +- Remove mplex support. + +## 0.7.0 +### Changed +- Update to libp2p v0.47.0. + +## 0.6.0 - 2022-05-05 +### Changed +- Update to libp2p v0.44.0. + +## 0.5.4 - 2022-01-11 +### Changed +- Pull latest autonat changes. + +## 0.5.3 - 2021-12-25 +### Changed +- Update dependencies. +- Pull in autonat fixes. + +## 0.5.2 - 2021-12-20 +### Added +- Add support for libp2p autonat protocol via `--enable-autonat`. + +## 0.5.1 - 2021-12-20 +### Fixed +- Update dependencies. +- Fix typo in command line flag `--enable-kademlia`. + +## 0.5.0 - 2021-11-18 +### Changed +- Disable Kademlia protocol by default. + +## 0.4.0 - 2021-11-18 +### Fixed +- Update dependencies. diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml new file mode 100644 index 00000000000..e23660311bd --- /dev/null +++ b/misc/server/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "libp2p-server" +version = "0.12.5" +authors = ["Max Inden "] +edition = "2021" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +description = "A rust-libp2p server binary." +license = "MIT" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +base64 = "0.21" +clap = { version = "4.4.11", features = ["derive"] } +futures = "0.3" +futures-timer = "3" +hyper = { version = "0.14", features = ["server", "tcp", "http1"] } +libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic", "websocket"] } +prometheus-client = { workspace = true } +serde = "1.0.193" +serde_derive = "1.0.125" +serde_json = "1.0" +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +zeroize = "1" + +[lints] +workspace = true diff --git a/misc/server/Dockerfile b/misc/server/Dockerfile new file mode 100644 index 00000000000..9d2742f97e8 --- /dev/null +++ b/misc/server/Dockerfile @@ -0,0 +1,21 @@ +# syntax=docker/dockerfile:1.5-labs +FROM rust:1.73.0 as chef +RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin +RUN cargo install --locked --root /usr/local libp2p-lookup --version 0.6.4 +WORKDIR /app + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --package libp2p-server --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --package libp2p-server + +FROM gcr.io/distroless/cc +COPY --from=builder /usr/local/bin/libp2p-server /usr/local/bin/libp2p-lookup /usr/local/bin/ +CMD ["libp2p-server"] diff --git a/misc/server/README.md b/misc/server/README.md new file mode 100644 index 00000000000..0da1bd8abd9 --- /dev/null +++ b/misc/server/README.md @@ -0,0 +1,41 @@ +# Rust libp2p Server + +A rust-libp2p based server implementation running: + +- the [Kademlia protocol](https://github.com/libp2p/specs/tree/master/kad-dht) + +- the [Circuit Relay v2 protocol](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) + +- the [AutoNAT protocol](https://github.com/libp2p/specs/blob/master/autonat/README.md) + +## Usage + +``` +cargo run -- --help + +A rust-libp2p server binary. + +Usage: libp2p-server [OPTIONS] --config + +Options: + --config Path to IPFS config file + --metrics-path Metric endpoint path [default: /metrics] + --enable-kademlia Whether to run the libp2p Kademlia protocol and join the IPFS DHT + --enable-autonat Whether to run the libp2p Autonat protocol + -h, --help Print help +``` + + +``` +cargo run -- --config ~/.ipfs/config + +Local peer id: PeerId("12D3KooWSa1YEeQVSwvoqAMhwjKQ6kqZQckhWPb3RWEGV3sZGU6Z") +Listening on "/ip4/127.0.0.1/udp/4001/quic" +[...] +``` + +The Docker container includes [libp2-lookup](https://github.com/mxinden/libp2p-lookup/) to enable adding a proper healthcheck for container startup, e.g. + +``` shell +docker run --health-cmd 'libp2p-lookup direct --address /ip4/127.0.0.1/tcp/4001/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa' /home/ipfs/.ipfs:/ipfs ghcr.io/libp2p/rust-libp2p-server --config /ipfs/config +``` diff --git a/misc/server/src/behaviour.rs b/misc/server/src/behaviour.rs new file mode 100644 index 00000000000..ec025e02129 --- /dev/null +++ b/misc/server/src/behaviour.rs @@ -0,0 +1,78 @@ +use libp2p::autonat; +use libp2p::identify; +use libp2p::kad; +use libp2p::ping; +use libp2p::relay; +use libp2p::swarm::behaviour::toggle::Toggle; +use libp2p::{identity, swarm::NetworkBehaviour, Multiaddr, PeerId}; +use std::str::FromStr; +use std::time::Duration; + +const BOOTNODES: [&str; 4] = [ + "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", +]; + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + relay: relay::Behaviour, + ping: ping::Behaviour, + identify: identify::Behaviour, + pub(crate) kademlia: Toggle>, + autonat: Toggle, +} + +impl Behaviour { + pub(crate) fn new( + pub_key: identity::PublicKey, + enable_kademlia: bool, + enable_autonat: bool, + ) -> Self { + let kademlia = if enable_kademlia { + let mut kademlia_config = kad::Config::default(); + // Instantly remove records and provider records. + // + // TODO: Replace hack with option to disable both. + kademlia_config.set_record_ttl(Some(Duration::from_secs(0))); + kademlia_config.set_provider_record_ttl(Some(Duration::from_secs(0))); + let mut kademlia = kad::Behaviour::with_config( + pub_key.to_peer_id(), + kad::store::MemoryStore::new(pub_key.to_peer_id()), + kademlia_config, + ); + let bootaddr = Multiaddr::from_str("/dnsaddr/bootstrap.libp2p.io").unwrap(); + for peer in &BOOTNODES { + kademlia.add_address(&PeerId::from_str(peer).unwrap(), bootaddr.clone()); + } + kademlia.bootstrap().unwrap(); + Some(kademlia) + } else { + None + } + .into(); + + let autonat = if enable_autonat { + Some(autonat::Behaviour::new( + PeerId::from(pub_key.clone()), + Default::default(), + )) + } else { + None + } + .into(); + + Self { + relay: relay::Behaviour::new(PeerId::from(pub_key.clone()), Default::default()), + ping: ping::Behaviour::new(ping::Config::new()), + identify: identify::Behaviour::new( + identify::Config::new("ipfs/0.1.0".to_string(), pub_key).with_agent_version( + format!("rust-libp2p-server/{}", env!("CARGO_PKG_VERSION")), + ), + ), + kademlia, + autonat, + } + } +} diff --git a/misc/server/src/config.rs b/misc/server/src/config.rs new file mode 100644 index 00000000000..c3e3ec529c1 --- /dev/null +++ b/misc/server/src/config.rs @@ -0,0 +1,39 @@ +use libp2p::Multiaddr; +use serde_derive::Deserialize; +use std::error::Error; +use std::path::Path; + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Config { + pub(crate) identity: Identity, + pub(crate) addresses: Addresses, +} + +impl Config { + pub(crate) fn from_file(path: &Path) -> Result> { + Ok(serde_json::from_str(&std::fs::read_to_string(path)?)?) + } +} + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Identity { + #[serde(rename = "PeerID")] + pub(crate) peer_id: String, + pub(crate) priv_key: String, +} + +#[derive(Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub(crate) struct Addresses { + pub(crate) swarm: Vec, + pub(crate) append_announce: Vec, +} + +impl zeroize::Zeroize for Config { + fn zeroize(&mut self) { + self.identity.peer_id.zeroize(); + self.identity.priv_key.zeroize(); + } +} diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs new file mode 100644 index 00000000000..7905933fbf5 --- /dev/null +++ b/misc/server/src/http_service.rs @@ -0,0 +1,132 @@ +// Copyright 2022 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use hyper::http::StatusCode; +use hyper::service::Service; +use hyper::{Body, Method, Request, Response, Server}; +use prometheus_client::encoding::text::encode; +use prometheus_client::registry::Registry; +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; + +const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; +pub(crate) async fn metrics_server( + registry: Registry, + metrics_path: String, +) -> Result<(), hyper::Error> { + // Serve on localhost. + let addr = ([0, 0, 0, 0], 8888).into(); + + let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone())); + tracing::info!(metrics_server=%format!("http://{}{}", server.local_addr(), metrics_path)); + server.await?; + Ok(()) +} +pub(crate) struct MetricService { + reg: Arc>, + metrics_path: String, +} + +type SharedRegistry = Arc>; + +impl MetricService { + fn get_reg(&mut self) -> SharedRegistry { + Arc::clone(&self.reg) + } + fn respond_with_metrics(&mut self) -> Response { + let mut response: Response = Response::default(); + + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + METRICS_CONTENT_TYPE.try_into().unwrap(), + ); + + let reg = self.get_reg(); + encode(&mut response.body_mut(), ®.lock().unwrap()).unwrap(); + + *response.status_mut() = StatusCode::OK; + + response + } + fn respond_with_404_not_found(&mut self) -> Response { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(format!( + "Not found try localhost:[port]/{}", + self.metrics_path + )) + .unwrap() + } +} + +impl Service> for MetricService { + type Response = Response; + type Error = hyper::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let req_path = req.uri().path(); + let req_method = req.method(); + let resp = if (req_method == Method::GET) && (req_path == self.metrics_path) { + // Encode and serve metrics from registry. + self.respond_with_metrics() + } else { + self.respond_with_404_not_found() + }; + Box::pin(async { Ok(resp) }) + } +} + +pub(crate) struct MakeMetricService { + reg: SharedRegistry, + metrics_path: String, +} + +impl MakeMetricService { + pub(crate) fn new(registry: Registry, metrics_path: String) -> MakeMetricService { + MakeMetricService { + reg: Arc::new(Mutex::new(registry)), + metrics_path, + } + } +} + +impl Service for MakeMetricService { + type Response = MetricService; + type Error = hyper::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: T) -> Self::Future { + let reg = self.reg.clone(); + let metrics_path = self.metrics_path.clone(); + let fut = async move { Ok(MetricService { reg, metrics_path }) }; + Box::pin(fut) + } +} diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs new file mode 100644 index 00000000000..959e80b60ed --- /dev/null +++ b/misc/server/src/main.rs @@ -0,0 +1,193 @@ +use base64::Engine; +use clap::Parser; +use futures::stream::StreamExt; +use futures_timer::Delay; +use libp2p::identity; +use libp2p::identity::PeerId; +use libp2p::kad; +use libp2p::metrics::{Metrics, Recorder}; +use libp2p::swarm::SwarmEvent; +use libp2p::tcp; +use libp2p::{identify, noise, yamux}; +use prometheus_client::metrics::info::Info; +use prometheus_client::registry::Registry; +use std::error::Error; +use std::path::PathBuf; +use std::str::FromStr; +use std::task::Poll; +use std::time::Duration; +use tracing_subscriber::EnvFilter; +use zeroize::Zeroizing; + +mod behaviour; +mod config; +mod http_service; + +const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(5 * 60); + +#[derive(Debug, Parser)] +#[clap(name = "libp2p server", about = "A rust-libp2p server binary.")] +struct Opts { + /// Path to IPFS config file. + #[clap(long)] + config: PathBuf, + + /// Metric endpoint path. + #[clap(long, default_value = "/metrics")] + metrics_path: String, + + /// Whether to run the libp2p Kademlia protocol and join the IPFS DHT. + #[clap(long)] + enable_kademlia: bool, + + /// Whether to run the libp2p Autonat protocol. + #[clap(long)] + enable_autonat: bool, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let opt = Opts::parse(); + + let config = Zeroizing::new(config::Config::from_file(opt.config.as_path())?); + + let mut metric_registry = Registry::default(); + + let local_keypair = { + let keypair = identity::Keypair::try_decode_protobuf(&Zeroizing::new( + base64::engine::general_purpose::STANDARD + .decode(config.identity.priv_key.as_bytes())?, + ))?; + + let peer_id = keypair.public().into(); + assert_eq!( + PeerId::from_str(&config.identity.peer_id)?, + peer_id, + "Expect peer id derived from private key and peer id retrieved from config to match." + ); + + keypair + }; + + let mut swarm = libp2p::SwarmBuilder::with_existing_identity(local_keypair) + .with_tokio() + .with_tcp( + tcp::Config::default().port_reuse(true).nodelay(true), + noise::Config::new, + yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_websocket(noise::Config::new, yamux::Config::default) + .await? + .with_bandwidth_metrics(&mut metric_registry) + .with_behaviour(|key| { + behaviour::Behaviour::new(key.public(), opt.enable_kademlia, opt.enable_autonat) + })? + .build(); + + if config.addresses.swarm.is_empty() { + tracing::warn!("No listen addresses configured"); + } + for address in &config.addresses.swarm { + match swarm.listen_on(address.clone()) { + Ok(_) => {} + Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => { + tracing::warn!(%address, "Failed to listen on address, continuing anyways, {e}") + } + Err(e) => return Err(e.into()), + } + } + + if config.addresses.append_announce.is_empty() { + tracing::warn!("No external addresses configured"); + } + for address in &config.addresses.append_announce { + swarm.add_external_address(address.clone()) + } + tracing::info!( + "External addresses: {:?}", + swarm.external_addresses().collect::>() + ); + + let metrics = Metrics::new(&mut metric_registry); + let build_info = Info::new(vec![("version".to_string(), env!("CARGO_PKG_VERSION"))]); + metric_registry.register( + "build", + "A metric with a constant '1' value labeled by version", + build_info, + ); + tokio::spawn(async move { + if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await { + tracing::error!("Metrics server failed: {e}"); + } + }); + + let mut bootstrap_timer = Delay::new(BOOTSTRAP_INTERVAL); + + loop { + if let Poll::Ready(()) = futures::poll!(&mut bootstrap_timer) { + bootstrap_timer.reset(BOOTSTRAP_INTERVAL); + let _ = swarm + .behaviour_mut() + .kademlia + .as_mut() + .map(|k| k.bootstrap()); + } + + let event = swarm.next().await.expect("Swarm not to terminate."); + metrics.record(&event); + match event { + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => { + tracing::info!("{:?}", e); + metrics.record(&e); + + if let identify::Event::Received { + peer_id, + info: + identify::Info { + listen_addrs, + protocols, + .. + }, + } = e + { + if protocols.iter().any(|p| *p == kad::PROTOCOL_NAME) { + for addr in listen_addrs { + swarm + .behaviour_mut() + .kademlia + .as_mut() + .map(|k| k.add_address(&peer_id, addr)); + } + } + } + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => { + tracing::debug!("{:?}", e); + metrics.record(&e); + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => { + tracing::debug!("{:?}", e); + metrics.record(&e); + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => { + tracing::info!("{:?}", e); + metrics.record(&e) + } + SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => { + tracing::info!("{:?}", e); + // TODO: Add metric recording for `NatStatus`. + // metrics.record(&e) + } + SwarmEvent::NewListenAddr { address, .. } => { + tracing::info!(%address, "Listening on address"); + } + _ => {} + } + } +} diff --git a/misc/webrtc-utils/CHANGELOG.md b/misc/webrtc-utils/CHANGELOG.md new file mode 100644 index 00000000000..6949113a377 --- /dev/null +++ b/misc/webrtc-utils/CHANGELOG.md @@ -0,0 +1,11 @@ +## 0.2.0 + +- Update to latest version of `libp2p-noise`. + See [PR 4968](https://github.com/libp2p/rust-libp2p/pull/4968). + +## 0.1.0 + +- Initial release. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml new file mode 100644 index 00000000000..7173dedae7b --- /dev/null +++ b/misc/webrtc-utils/Cargo.toml @@ -0,0 +1,34 @@ +[package] +authors = ["Doug Anderson "] +categories = ["network-programming"] +description = "Utilities for WebRTC in libp2p" +edition = "2021" +license = "MIT" +name = "libp2p-webrtc-utils" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.2.0" +publish = true + +[dependencies] +asynchronous-codec = { workspace = true } +bytes = "1" +futures = "0.3" +hex = "0.4" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-noise = { workspace = true } +quick-protobuf = "0.8" +quick-protobuf-codec = { workspace = true } +rand = "0.8" +serde = { version = "1.0", features = ["derive"] } +sha2 = "0.10.8" +thiserror = "1" +tinytemplate = "1.2" +tracing = "0.1.37" + +[dev-dependencies] +hex-literal = "0.4" + +[lints] +workspace = true diff --git a/misc/webrtc-utils/src/fingerprint.rs b/misc/webrtc-utils/src/fingerprint.rs new file mode 100644 index 00000000000..a02c4d1116d --- /dev/null +++ b/misc/webrtc-utils/src/fingerprint.rs @@ -0,0 +1,109 @@ +// Copyright 2023 Doug Anderson. +// Copyright 2022 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use libp2p_core::multihash; +use sha2::Digest as _; +use std::fmt; + +pub const SHA256: &str = "sha-256"; +const MULTIHASH_SHA256_CODE: u64 = 0x12; + +type Multihash = multihash::Multihash<64>; + +/// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. +#[derive(Eq, PartialEq, Copy, Clone)] +pub struct Fingerprint([u8; 32]); + +impl Fingerprint { + pub const FF: Fingerprint = Fingerprint([0xFF; 32]); + + pub const fn raw(digest: [u8; 32]) -> Self { + Fingerprint(digest) + } + + /// Creates a new [Fingerprint] from a raw certificate by hashing the given bytes with SHA256. + pub fn from_certificate(bytes: &[u8]) -> Self { + Fingerprint(sha2::Sha256::digest(bytes).into()) + } + + /// Converts [`Multihash`](multihash::Multihash) to [`Fingerprint`]. + pub fn try_from_multihash(hash: Multihash) -> Option { + if hash.code() != MULTIHASH_SHA256_CODE { + // Only support SHA256 for now. + return None; + } + + let bytes = hash.digest().try_into().ok()?; + + Some(Self(bytes)) + } + + /// Converts this fingerprint to [`Multihash`](multihash::Multihash). + pub fn to_multihash(self) -> Multihash { + Multihash::wrap(MULTIHASH_SHA256_CODE, &self.0).expect("fingerprint's len to be 32 bytes") + } + + /// Formats this fingerprint as uppercase hex, separated by colons (`:`). + /// + /// This is the format described in . + pub fn to_sdp_format(self) -> String { + self.0.map(|byte| format!("{byte:02X}")).join(":") + } + + /// Returns the algorithm used (e.g. "sha-256"). + /// See + pub fn algorithm(&self) -> String { + SHA256.to_owned() + } +} + +impl fmt::Debug for Fingerprint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const SDP_FORMAT: &str = "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC"; + const REGULAR_FORMAT: [u8; 32] = + hex_literal::hex!("7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC"); + + #[test] + fn sdp_format() { + let fp = Fingerprint::raw(REGULAR_FORMAT); + + let formatted = fp.to_sdp_format(); + + assert_eq!(formatted, SDP_FORMAT) + } + + #[test] + fn from_sdp() { + let mut bytes = [0; 32]; + bytes.copy_from_slice(&hex::decode(SDP_FORMAT.replace(':', "")).unwrap()); + + let fp = Fingerprint::raw(bytes); + assert_eq!(fp, Fingerprint::raw(REGULAR_FORMAT)); + } +} diff --git a/transports/webrtc/src/generated/message.proto b/misc/webrtc-utils/src/generated/message.proto similarity index 100% rename from transports/webrtc/src/generated/message.proto rename to misc/webrtc-utils/src/generated/message.proto diff --git a/transports/webrtc/src/generated/mod.rs b/misc/webrtc-utils/src/generated/mod.rs similarity index 100% rename from transports/webrtc/src/generated/mod.rs rename to misc/webrtc-utils/src/generated/mod.rs diff --git a/transports/webrtc/src/generated/webrtc/mod.rs b/misc/webrtc-utils/src/generated/webrtc/mod.rs similarity index 100% rename from transports/webrtc/src/generated/webrtc/mod.rs rename to misc/webrtc-utils/src/generated/webrtc/mod.rs diff --git a/transports/webrtc/src/generated/webrtc/pb.rs b/misc/webrtc-utils/src/generated/webrtc/pb.rs similarity index 100% rename from transports/webrtc/src/generated/webrtc/pb.rs rename to misc/webrtc-utils/src/generated/webrtc/pb.rs diff --git a/misc/webrtc-utils/src/lib.rs b/misc/webrtc-utils/src/lib.rs new file mode 100644 index 00000000000..c744634de30 --- /dev/null +++ b/misc/webrtc-utils/src/lib.rs @@ -0,0 +1,15 @@ +mod proto { + #![allow(unreachable_pub)] + include!("generated/mod.rs"); + pub use self::webrtc::pb::{mod_Message::Flag, Message}; +} + +mod fingerprint; +pub mod noise; +pub mod sdp; +mod stream; +mod transport; + +pub use fingerprint::{Fingerprint, SHA256}; +pub use stream::{DropListener, Stream, MAX_MSG_LEN}; +pub use transport::parse_webrtc_dial_addr; diff --git a/transports/webrtc/src/tokio/upgrade/noise.rs b/misc/webrtc-utils/src/noise.rs similarity index 94% rename from transports/webrtc/src/tokio/upgrade/noise.rs rename to misc/webrtc-utils/src/noise.rs index 34e3526a2fe..9180acfc1ca 100644 --- a/transports/webrtc/src/tokio/upgrade/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -19,15 +19,17 @@ // DEALINGS IN THE SOFTWARE. use futures::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; -use crate::tokio::fingerprint::Fingerprint; -use crate::tokio::Error; +use crate::fingerprint::Fingerprint; -pub(crate) async fn inbound( +pub use noise::Error; + +pub async fn inbound( id_keys: identity::Keypair, stream: T, client_fingerprint: Fingerprint, @@ -49,7 +51,7 @@ where Ok(peer_id) } -pub(crate) async fn outbound( +pub async fn outbound( id_keys: identity::Keypair, stream: T, server_fingerprint: Fingerprint, diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs new file mode 100644 index 00000000000..0796548f449 --- /dev/null +++ b/misc/webrtc-utils/src/sdp.rs @@ -0,0 +1,157 @@ +// Copyright 2023 Doug Anderson +// Copyright 2022 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +use crate::fingerprint::Fingerprint; +use serde::Serialize; +use std::net::{IpAddr, SocketAddr}; +use tinytemplate::TinyTemplate; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: &str) -> String { + let answer = render_description( + SERVER_SESSION_DESCRIPTION, + addr, + server_fingerprint, + client_ufrag, + ); + + tracing::trace!(%answer, "Created SDP answer"); + + answer +} + +// See [`CLIENT_SESSION_DESCRIPTION`]. +// +// a=ice-lite +// +// A lite implementation is only appropriate for devices that will *always* be connected to +// the public Internet and have a public IP address at which it can receive packets from any +// correspondent. ICE will not function when a lite implementation is placed behind a NAT +// (RFC8445). +// +// a=tls-id: +// +// "TLS ID" uniquely identifies a TLS association. +// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS connection +// must be reopened in case of ICE renegotiation. Considering that ICE renegotiations +// never happen in our use case, we can simply put a random value and not care about +// it. Note however that the TLS ID in the answer must be present if and only if the +// offer contains one. (RFC8842) +// TODO: is it true that renegotiations never happen? what about a connection closing? +// "tls-id" attribute MUST be present in the initial offer and respective answer (RFC8839). +// XXX: but right now browsers don't send it. +// +// a=setup:passive +// +// "passive" indicates that the remote DTLS server will only listen for incoming +// connections. (RFC5763) +// The answerer (server) MUST not be located behind a NAT (RFC6135). +// +// The answerer MUST use either a setup attribute value of setup:active or setup:passive. +// Note that if the answerer uses setup:passive, then the DTLS handshake will not begin until +// the answerer is received, which adds additional latency. setup:active allows the answer and +// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. +// +// a=candidate: +// +// A transport address for a candidate that can be used for connectivity checks (RFC8839). +// +// a=end-of-candidates +const SERVER_SESSION_DESCRIPTION: &str = "v=0 +o=- 0 0 IN {ip_version} {target_ip} +s=- +t=0 0 +a=ice-lite +m=application {target_port} UDP/DTLS/SCTP webrtc-datachannel +c=IN {ip_version} {target_ip} +a=mid:0 +a=ice-options:ice2 +a=ice-ufrag:{ufrag} +a=ice-pwd:{pwd} +a=fingerprint:{fingerprint_algorithm} {fingerprint_value} +a=setup:passive +a=sctp-port:5000 +a=max-message-size:16384 +a=candidate:1467250027 1 UDP 1467250027 {target_ip} {target_port} typ host +a=end-of-candidates +"; + +/// Indicates the IP version used in WebRTC: `IP4` or `IP6`. +#[derive(Serialize)] +enum IpVersion { + IP4, + IP6, +} + +/// Context passed to the templating engine, which replaces the above placeholders (e.g. +/// `{IP_VERSION}`) with real values. +#[derive(Serialize)] +struct DescriptionContext { + pub(crate) ip_version: IpVersion, + pub(crate) target_ip: IpAddr, + pub(crate) target_port: u16, + pub(crate) fingerprint_algorithm: String, + pub(crate) fingerprint_value: String, + pub(crate) ufrag: String, + pub(crate) pwd: String, +} + +/// Renders a [`TinyTemplate`] description using the provided arguments. +pub fn render_description( + description: &str, + addr: SocketAddr, + fingerprint: Fingerprint, + ufrag: &str, +) -> String { + let mut tt = TinyTemplate::new(); + tt.add_template("description", description).unwrap(); + + let context = DescriptionContext { + ip_version: { + if addr.is_ipv4() { + IpVersion::IP4 + } else { + IpVersion::IP6 + } + }, + target_ip: addr.ip(), + target_port: addr.port(), + fingerprint_algorithm: fingerprint.algorithm(), + fingerprint_value: fingerprint.to_sdp_format(), + // NOTE: ufrag is equal to pwd. + ufrag: ufrag.to_owned(), + pwd: ufrag.to_owned(), + }; + tt.render("description", &context).unwrap() +} + +/// Generates a random ufrag and adds a prefix according to the spec. +pub fn random_ufrag() -> String { + format!( + "libp2p+webrtc+v1/{}", + thread_rng() + .sample_iter(&Alphanumeric) + .take(64) + .map(char::from) + .collect::() + ) +} diff --git a/transports/webrtc/src/tokio/substream.rs b/misc/webrtc-utils/src/stream.rs similarity index 82% rename from transports/webrtc/src/tokio/substream.rs rename to misc/webrtc-utils/src/stream.rs index 89e52376a48..0e1496eb640 100644 --- a/transports/webrtc/src/tokio/substream.rs +++ b/misc/webrtc-utils/src/stream.rs @@ -1,4 +1,5 @@ // Copyright 2022 Parity Technologies (UK) Ltd. +// Copyright 2023 Protocol Labs. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), @@ -18,24 +19,20 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use asynchronous_codec::Framed; use bytes::Bytes; use futures::{channel::oneshot, prelude::*, ready}; -use tokio_util::compat::Compat; -use webrtc::data::data_channel::{DataChannel, PollDataChannel}; use std::{ io, pin::Pin, - sync::Arc, task::{Context, Poll}, }; use crate::proto::{Flag, Message}; -use crate::tokio::{ - substream::drop_listener::GracefullyClosed, - substream::framed_dc::FramedDc, - substream::state::{Closing, State}, +use crate::{ + stream::drop_listener::GracefullyClosed, + stream::framed_dc::FramedDc, + stream::state::{Closing, State}, }; mod drop_listener; @@ -47,7 +44,7 @@ mod state; /// "As long as message interleaving is not supported, the sender SHOULD limit the maximum message /// size to 16 KB to avoid monopolization." /// Source: -const MAX_MSG_LEN: usize = 16384; // 16kiB +pub const MAX_MSG_LEN: usize = 16 * 1024; /// Length of varint, in bytes. const VARINT_LEN: usize = 2; /// Overhead of the protobuf encoding, in bytes. @@ -55,26 +52,28 @@ const PROTO_OVERHEAD: usize = 5; /// Maximum length of data, in bytes. const MAX_DATA_LEN: usize = MAX_MSG_LEN - VARINT_LEN - PROTO_OVERHEAD; -pub(crate) use drop_listener::DropListener; -/// A substream on top of a WebRTC data channel. +pub use drop_listener::DropListener; +/// A stream backed by a WebRTC data channel. /// -/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well +/// To be a proper libp2p stream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well /// as support a half-closed state which we do by framing messages in a protobuf envelope. -pub struct Substream { - io: FramedDc, +pub struct Stream { + io: FramedDc, state: State, read_buffer: Bytes, /// Dropping this will close the oneshot and notify the receiver by emitting `Canceled`. drop_notifier: Option>, } -impl Substream { - /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream - /// is dropped. - pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { +impl Stream +where + T: AsyncRead + AsyncWrite + Unpin + Clone, +{ + /// Returns a new [`Stream`] and a [`DropListener`], which will notify the receiver when/if the stream is dropped. + pub fn new(data_channel: T) -> (Self, DropListener) { let (sender, receiver) = oneshot::channel(); - let substream = Self { + let stream = Self { io: framed_dc::new(data_channel.clone()), state: State::Open, read_buffer: Bytes::default(), @@ -82,10 +81,10 @@ impl Substream { }; let listener = DropListener::new(framed_dc::new(data_channel), receiver); - (substream, listener) + (stream, listener) } - /// Gracefully closes the "read-half" of the substream. + /// Gracefully closes the "read-half" of the stream. pub fn poll_close_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { match self.state.close_read_barrier()? { @@ -113,7 +112,10 @@ impl Substream { } } -impl AsyncRead for Substream { +impl AsyncRead for Stream +where + T: AsyncRead + AsyncWrite + Unpin, +{ fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -157,7 +159,10 @@ impl AsyncRead for Substream { } } -impl AsyncWrite for Substream { +impl AsyncWrite for Stream +where + T: AsyncRead + AsyncWrite + Unpin, +{ fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -236,10 +241,13 @@ impl AsyncWrite for Substream { } } -fn io_poll_next( - io: &mut Framed, quick_protobuf_codec::Codec>, +fn io_poll_next( + io: &mut FramedDc, cx: &mut Context<'_>, -) -> Poll, Option>)>>> { +) -> Poll, Option>)>>> +where + T: AsyncRead + AsyncWrite + Unpin, +{ match ready!(io.poll_next_unpin(cx)) .transpose() .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))? @@ -252,36 +260,27 @@ fn io_poll_next( #[cfg(test)] mod tests { use super::*; + use crate::stream::framed_dc::codec; use asynchronous_codec::Encoder; use bytes::BytesMut; - use quick_protobuf::{MessageWrite, Writer}; - use unsigned_varint::codec::UviBytes; #[test] fn max_data_len() { // Largest possible message. let message = [0; MAX_DATA_LEN]; - let protobuf = crate::proto::Message { - flag: Some(crate::proto::Flag::FIN), + let protobuf = Message { + flag: Some(Flag::FIN), message: Some(message.to_vec()), }; - let mut encoded_msg = Vec::new(); - let mut writer = Writer::new(&mut encoded_msg); - protobuf - .write_message(&mut writer) - .expect("Encoding to succeed"); - assert_eq!(encoded_msg.len(), message.len() + PROTO_OVERHEAD); + let mut codec = codec(); - let mut uvi = UviBytes::default(); let mut dst = BytesMut::new(); - uvi.encode(encoded_msg.as_slice(), &mut dst).unwrap(); + codec.encode(protobuf, &mut dst).unwrap(); // Ensure the varint prefixed and protobuf encoded largest message is no longer than the // maximum limit specified in the libp2p WebRTC specification. assert_eq!(dst.len(), MAX_MSG_LEN); - - assert_eq!(dst.len() - encoded_msg.len(), VARINT_LEN); } } diff --git a/transports/webrtc/src/tokio/substream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs similarity index 77% rename from transports/webrtc/src/tokio/substream/drop_listener.rs rename to misc/webrtc-utils/src/stream/drop_listener.rs index 735240456fe..9745e3d4364 100644 --- a/transports/webrtc/src/tokio/substream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -20,7 +20,7 @@ use futures::channel::oneshot; use futures::channel::oneshot::Canceled; -use futures::{FutureExt, SinkExt}; +use futures::{AsyncRead, AsyncWrite, FutureExt, SinkExt}; use std::future::Future; use std::io; @@ -28,46 +28,42 @@ use std::pin::Pin; use std::task::{Context, Poll}; use crate::proto::{Flag, Message}; -use crate::tokio::substream::framed_dc::FramedDc; +use crate::stream::framed_dc::FramedDc; #[must_use] -pub(crate) struct DropListener { - state: State, +pub struct DropListener { + state: State, } -impl DropListener { - pub(crate) fn new(stream: FramedDc, receiver: oneshot::Receiver) -> Self { - let substream_id = stream.get_ref().stream_identifier(); - +impl DropListener { + pub fn new(stream: FramedDc, receiver: oneshot::Receiver) -> Self { Self { - state: State::Idle { - stream, - receiver, - substream_id, - }, + state: State::Idle { stream, receiver }, } } } -enum State { +enum State { /// The [`DropListener`] is idle and waiting to be activated. Idle { - stream: FramedDc, + stream: FramedDc, receiver: oneshot::Receiver, - substream_id: u16, }, /// The stream got dropped and we are sending a reset flag. SendingReset { - stream: FramedDc, + stream: FramedDc, }, Flushing { - stream: FramedDc, + stream: FramedDc, }, /// Bad state transition. Poisoned, } -impl Future for DropListener { +impl Future for DropListener +where + T: AsyncRead + AsyncWrite + Unpin, +{ type Output = io::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -77,23 +73,18 @@ impl Future for DropListener { match std::mem::replace(state, State::Poisoned) { State::Idle { stream, - substream_id, mut receiver, } => match receiver.poll_unpin(cx) { Poll::Ready(Ok(GracefullyClosed {})) => { return Poll::Ready(Ok(())); } Poll::Ready(Err(Canceled)) => { - log::info!("Substream {substream_id} dropped without graceful close, sending Reset"); + tracing::info!("Stream dropped without graceful close, sending Reset"); *state = State::SendingReset { stream }; continue; } Poll::Pending => { - *state = State::Idle { - stream, - substream_id, - receiver, - }; + *state = State::Idle { stream, receiver }; return Poll::Pending; } }, @@ -126,5 +117,5 @@ impl Future for DropListener { } } -/// Indicates that our substream got gracefully closed. -pub(crate) struct GracefullyClosed {} +/// Indicates that our stream got gracefully closed. +pub struct GracefullyClosed {} diff --git a/transports/webrtc/src/tokio/substream/framed_dc.rs b/misc/webrtc-utils/src/stream/framed_dc.rs similarity index 69% rename from transports/webrtc/src/tokio/substream/framed_dc.rs rename to misc/webrtc-utils/src/stream/framed_dc.rs index 1b3860b662b..721178fdcd3 100644 --- a/transports/webrtc/src/tokio/substream/framed_dc.rs +++ b/misc/webrtc-utils/src/stream/framed_dc.rs @@ -19,26 +19,23 @@ // DEALINGS IN THE SOFTWARE. use asynchronous_codec::Framed; -use tokio_util::compat::Compat; -use tokio_util::compat::TokioAsyncReadCompatExt; -use webrtc::data::data_channel::{DataChannel, PollDataChannel}; +use futures::{AsyncRead, AsyncWrite}; -use std::sync::Arc; - -use super::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; use crate::proto::Message; +use crate::stream::{MAX_DATA_LEN, MAX_MSG_LEN, VARINT_LEN}; -pub(crate) type FramedDc = Framed, quick_protobuf_codec::Codec>; -pub(crate) fn new(data_channel: Arc) -> FramedDc { - let mut inner = PollDataChannel::new(data_channel); - inner.set_read_buf_capacity(MAX_MSG_LEN); - - let mut framed = Framed::new( - inner.compat(), - quick_protobuf_codec::Codec::new(MAX_MSG_LEN - VARINT_LEN), - ); +pub(crate) type FramedDc = Framed>; +pub(crate) fn new(inner: T) -> FramedDc +where + T: AsyncRead + AsyncWrite, +{ + let mut framed = Framed::new(inner, codec()); // If not set, `Framed` buffers up to 131kB of data before sending, which leads to "outbound // packet larger than maximum message size" error in webrtc-rs. framed.set_send_high_water_mark(MAX_DATA_LEN); framed } + +pub(crate) fn codec() -> quick_protobuf_codec::Codec { + quick_protobuf_codec::Codec::new(MAX_MSG_LEN - VARINT_LEN) +} diff --git a/transports/webrtc/src/tokio/substream/state.rs b/misc/webrtc-utils/src/stream/state.rs similarity index 99% rename from transports/webrtc/src/tokio/substream/state.rs rename to misc/webrtc-utils/src/stream/state.rs index b1768aa2165..082325e4d47 100644 --- a/transports/webrtc/src/tokio/substream/state.rs +++ b/misc/webrtc-utils/src/stream/state.rs @@ -277,7 +277,7 @@ impl State { } } - /// Acts as a "barrier" for [`Substream::poll_close_read`](super::Substream::poll_close_read). + /// Acts as a "barrier" for [`Stream::poll_close_read`](super::Stream::poll_close_read). pub(crate) fn close_read_barrier(&mut self) -> io::Result> { loop { match self { diff --git a/misc/webrtc-utils/src/transport.rs b/misc/webrtc-utils/src/transport.rs new file mode 100644 index 00000000000..440ad73ed02 --- /dev/null +++ b/misc/webrtc-utils/src/transport.rs @@ -0,0 +1,101 @@ +use crate::fingerprint::Fingerprint; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use std::net::{IpAddr, SocketAddr}; + +/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. +pub fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { + let mut iter = addr.iter(); + + let ip = match iter.next()? { + Protocol::Ip4(ip) => IpAddr::from(ip), + Protocol::Ip6(ip) => IpAddr::from(ip), + _ => return None, + }; + + let port = iter.next()?; + let webrtc = iter.next()?; + let certhash = iter.next()?; + + let (port, fingerprint) = match (port, webrtc, certhash) { + (Protocol::Udp(port), Protocol::WebRTCDirect, Protocol::Certhash(cert_hash)) => { + let fingerprint = Fingerprint::try_from_multihash(cert_hash)?; + + (port, fingerprint) + } + _ => return None, + }; + + match iter.next() { + Some(Protocol::P2p(_)) => {} + // peer ID is optional + None => {} + // unexpected protocol + Some(_) => return None, + } + + Some((SocketAddr::new(ip, port), fingerprint)) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn parse_valid_address_with_certhash_and_p2p() { + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } + + #[test] + fn peer_id_is_not_required() { + let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } + + #[test] + fn parse_ipv6() { + let addr = + "/ip6/::1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" + .parse() + .unwrap(); + + let maybe_parsed = parse_webrtc_dial_addr(&addr); + + assert_eq!( + maybe_parsed, + Some(( + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 12345), + Fingerprint::raw(hex_literal::hex!( + "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" + )) + )) + ); + } +} diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 1c28d2828ae..48ab616e131 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,4 +1,9 @@ -## 0.40.0 - unreleased +## 0.41.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index c51e3956adc..726e8a8434f 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-mplex" edition = "2021" rust-version = { workspace = true } description = "Mplex multiplexing protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,26 +12,27 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" -futures = "0.3.28" -asynchronous-codec = "0.6" +futures = "0.3.30" +asynchronous-codec = { workspace = true } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" rand = "0.8" -smallvec = "1.6.1" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +smallvec = "1.11.2" +tracing = "0.1.37" +unsigned-varint = { workspace = true, features = ["asynchronous_codec"] } [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } -criterion = "0.4" -env_logger = "0.10" +criterion = "0.5" futures = "0.3" -libp2p-muxer-test-harness = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-muxer-test-harness = { path = "../test-harness" } libp2p-plaintext = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[bench]] name = "split_send_size" @@ -43,3 +44,6 @@ harness = false all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 56bd934f302..9a9814d2f2a 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -27,13 +27,15 @@ use futures::future::poll_fn; use futures::prelude::*; use futures::{channel::oneshot, future::join}; use libp2p_core::muxing::StreamMuxerExt; +use libp2p_core::transport::ListenerId; use libp2p_core::{multiaddr::multiaddr, muxing, transport, upgrade, Multiaddr, Transport}; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_mplex as mplex; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use std::pin::Pin; use std::time::Duration; +use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -50,7 +52,9 @@ const BENCH_SIZES: [usize; 8] = [ ]; fn prepare(c: &mut Criterion) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let payload: Vec = vec![1; 1024 * 1024]; @@ -100,7 +104,9 @@ fn run( payload: &Vec, listen_addr: &Multiaddr, ) { - receiver_trans.listen_on(listen_addr.clone()).unwrap(); + receiver_trans + .listen_on(ListenerId::next(), listen_addr.clone()) + .unwrap(); let (addr_sender, addr_receiver) = oneshot::channel(); let mut addr_sender = Some(addr_sender); let payload_len = payload.len(); @@ -163,30 +169,28 @@ fn run( } fn tcp_transport(split_send_size: usize) -> BenchTransport { - let key = identity::Keypair::generate_ed25519(); - let local_public_key = key.public(); - let mut mplex = mplex::MplexConfig::default(); mplex.set_split_send_size(split_send_size); libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::default().nodelay(true)) .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new( + &identity::Keypair::generate_ed25519(), + )) .multiplex(mplex) .timeout(Duration::from_secs(5)) .boxed() } fn mem_transport(split_send_size: usize) -> BenchTransport { - let key = identity::Keypair::generate_ed25519(); - let local_public_key = key.public(); - let mut mplex = mplex::MplexConfig::default(); mplex.set_split_send_size(split_send_size); transport::MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new( + &identity::Keypair::generate_ed25519(), + )) .multiplex(mplex) .timeout(Duration::from_secs(5)) .boxed() diff --git a/muxers/mplex/src/codec.rs b/muxers/mplex/src/codec.rs index ec605edc6a7..014ee899280 100644 --- a/muxers/mplex/src/codec.rs +++ b/muxers/mplex/src/codec.rs @@ -285,10 +285,10 @@ impl Decoder for Codec { } impl Encoder for Codec { - type Item = Frame; + type Item<'a> = Frame; type Error = io::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { let (header, data) = match item { Frame::Open { stream_id } => (stream_id.num << 3, Bytes::new()), Frame::Data { diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 85b58820823..753294a7845 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -24,7 +24,6 @@ use asynchronous_codec::Framed; use bytes::Bytes; use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; use futures::{prelude::*, ready, stream::Fuse}; -use log::{debug, trace}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; @@ -117,7 +116,7 @@ where /// Creates a new multiplexed I/O stream. pub(crate) fn new(io: C, config: MplexConfig) -> Self { let id = ConnectionId(rand::random()); - debug!("New multiplexed connection: {}", id); + tracing::debug!(connection=%id, "New multiplexed connection"); Multiplexed { id, config, @@ -143,7 +142,7 @@ where } /// Flushes the underlying I/O stream. - pub(crate) fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + pub(crate) fn poll_flush(&mut self, cx: &Context<'_>) -> Poll> { match &self.status { Status::Closed => return Poll::Ready(Ok(())), Status::Err(e) => return Poll::Ready(Err(io::Error::new(e.kind(), e.to_string()))), @@ -169,7 +168,7 @@ where /// > **Note**: No `Close` or `Reset` frames are sent on open substreams /// > before closing the underlying connection. However, the connection /// > close implies a flush of any frames already sent. - pub(crate) fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + pub(crate) fn poll_close(&mut self, cx: &Context<'_>) -> Poll> { match &self.status { Status::Closed => return Poll::Ready(Ok(())), Status::Err(e) => return Poll::Ready(Err(io::Error::new(e.kind(), e.to_string()))), @@ -208,10 +207,7 @@ where /// [`MaxBufferBehaviour::Block`] is used, this method is blocked /// (i.e. `Pending`) on some task reading from the substream whose /// buffer is full. - pub(crate) fn poll_next_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { + pub(crate) fn poll_next_stream(&mut self, cx: &Context<'_>) -> Poll> { self.guard_open()?; // Try to read from the buffer first. @@ -252,17 +248,16 @@ where } /// Creates a new (outbound) substream, returning the allocated stream ID. - pub(crate) fn poll_open_stream( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { + pub(crate) fn poll_open_stream(&mut self, cx: &Context<'_>) -> Poll> { self.guard_open()?; // Check the stream limits. if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams reached ({})", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + total_substreams=%self.substreams.len(), + max_substreams=%self.config.max_substreams, + "Maximum number of substreams reached" ); self.notifier_open.register(cx.waker()); return Poll::Pending; @@ -282,11 +277,11 @@ where buf: Default::default(), }, ); - debug!( - "{}: New outbound substream: {} (total {})", - self.id, - stream_id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%stream_id, + total_substreams=%self.substreams.len(), + "New outbound substream" ); // The flush is delayed and the `Open` frame may be sent // together with other frames in the same transport packet. @@ -354,7 +349,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending close for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending close for substream" + ); self.pending_frames .push_front(Frame::Close { stream_id: id }); } @@ -362,7 +361,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending reset for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending reset for substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -374,7 +377,7 @@ where /// Writes data to a substream. pub(crate) fn poll_write_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, buf: &[u8], ) -> Poll> { @@ -424,7 +427,7 @@ where /// Inbound substreams received in excess of that limit are immediately reset. pub(crate) fn poll_read_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll>> { self.guard_open()?; @@ -482,11 +485,11 @@ where frame @ Frame::Open { .. } => { if let Some(id) = self.on_open(frame.remote_id())? { self.open_buffer.push_front(id); - trace!( - "{}: Buffered new inbound stream {} (total: {})", - self.id, - id, - self.open_buffer.len() + tracing::trace!( + connection=%self.id, + inbound_stream=%id, + inbound_buffer_len=%self.open_buffer.len(), + "Buffered new inbound stream" ); self.notifier_read.wake_next_stream(); } @@ -516,13 +519,17 @@ where /// > the underlying I/O stream is already closed. pub(crate) fn poll_flush_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll> { self.guard_open()?; ready!(self.poll_flush(cx))?; - trace!("{}: Flushed substream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Flushed substream" + ); Poll::Ready(Ok(())) } @@ -532,7 +539,7 @@ where /// > **Note**: As opposed to `poll_close()`, a flush it not implied. pub(crate) fn poll_close_stream( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, id: LocalStreamId, ) -> Poll> { self.guard_open()?; @@ -560,7 +567,11 @@ where self.substreams.insert(id, SubstreamState::Open { buf }); Poll::Pending } else { - debug!("{}: Closed substream {} (half-close)", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream (half-close)" + ); self.substreams .insert(id, SubstreamState::SendClosed { buf }); Poll::Ready(Ok(())) @@ -575,7 +586,11 @@ where .insert(id, SubstreamState::RecvClosed { buf }); Poll::Pending } else { - debug!("{}: Closed substream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream" + ); self.substreams.insert(id, SubstreamState::Closed { buf }); Poll::Ready(Ok(())) } @@ -587,7 +602,7 @@ where /// /// The frame is only constructed if the underlying sink is ready to /// send another frame. - fn poll_send_frame(&mut self, cx: &mut Context<'_>, frame: F) -> Poll> + fn poll_send_frame(&mut self, cx: &Context<'_>, frame: F) -> Poll> where F: FnOnce() -> Frame, { @@ -595,7 +610,7 @@ where match ready!(self.io.poll_ready_unpin(&mut Context::from_waker(&waker))) { Ok(()) => { let frame = frame(); - trace!("{}: Sending {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Sending frame"); match self.io.start_send_unpin(frame) { Ok(()) => Poll::Ready(Ok(())), Err(e) => Poll::Ready(self.on_error(e)), @@ -613,7 +628,7 @@ where /// frames for any substream. fn poll_read_frame( &mut self, - cx: &mut Context<'_>, + cx: &Context<'_>, stream_id: Option, ) -> Poll>> { // Try to send pending frames, if there are any, without blocking, @@ -624,7 +639,11 @@ where // Perform any pending flush before reading. if let Some(id) = &stream_id { if self.pending_flush_open.contains(id) { - trace!("{}: Executing pending flush for {}.", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Executing pending flush for substream" + ); ready!(self.poll_flush(cx))?; self.pending_flush_open = Default::default(); } @@ -640,9 +659,9 @@ where if !self.notifier_read.wake_read_stream(*blocked_id) { // No task dedicated to the blocked stream woken, so schedule // this task again to have a chance at progress. - trace!( - "{}: No task to read from blocked stream. Waking current task.", - self.id + tracing::trace!( + connection=%self.id, + "No task to read from blocked stream. Waking current task." ); cx.waker().clone().wake(); } else if let Some(id) = stream_id { @@ -670,7 +689,7 @@ where }; match ready!(self.io.poll_next_unpin(&mut Context::from_waker(&waker))) { Some(Ok(frame)) => { - trace!("{}: Received {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Received frame"); Poll::Ready(Ok(frame)) } Some(Err(e)) => Poll::Ready(self.on_error(e)), @@ -683,9 +702,10 @@ where let id = id.into_local(); if self.substreams.contains_key(&id) { - debug!( - "{}: Received unexpected `Open` frame for open substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Received unexpected `Open` frame for open substream", ); return self.on_error(io::Error::new( io::ErrorKind::Other, @@ -694,12 +714,17 @@ where } if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams exceeded: {}", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + max_substreams=%self.config.max_substreams, + "Maximum number of substreams exceeded" ); self.check_max_pending_frames()?; - debug!("{}: Pending reset for new stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for new substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); return Ok(None); @@ -712,11 +737,11 @@ where }, ); - debug!( - "{}: New inbound substream: {} (total {})", - self.id, - id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%id, + total_substreams=%self.substreams.len(), + "New inbound substream" ); Ok(Some(id)) @@ -727,23 +752,27 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::Closed { .. } => { - trace!( - "{}: Ignoring reset for mutually closed substream {}.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring reset for mutually closed substream" ); } SubstreamState::Reset { .. } => { - trace!( - "{}: Ignoring redundant reset for already reset substream {}", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring redundant reset for already reset substream" ); } SubstreamState::RecvClosed { buf } | SubstreamState::SendClosed { buf } | SubstreamState::Open { buf } => { - debug!("{}: Substream {} reset by remote.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream reset by remote" + ); self.substreams.insert(id, SubstreamState::Reset { buf }); // Notify tasks interested in reading from that stream, // so they may read the EOF. @@ -751,10 +780,10 @@ where } } } else { - trace!( - "{}: Ignoring `Reset` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Reset` for unknown substream, possibly dropped earlier" ); } } @@ -764,32 +793,36 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::RecvClosed { .. } | SubstreamState::Closed { .. } => { - debug!( - "{}: Ignoring `Close` frame for closed substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for closed substream" ); self.substreams.insert(id, state); } SubstreamState::Reset { buf } => { - debug!( - "{}: Ignoring `Close` frame for already reset substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for already reset substream" ); self.substreams.insert(id, SubstreamState::Reset { buf }); } SubstreamState::SendClosed { buf } => { - debug!( - "{}: Substream {} closed by remote (SendClosed -> Closed).", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (SendClosed -> Closed)" ); self.substreams.insert(id, SubstreamState::Closed { buf }); // Notify tasks interested in reading, so they may read the EOF. self.notifier_read.wake_read_stream(id); } SubstreamState::Open { buf } => { - debug!( - "{}: Substream {} closed by remote (Open -> RecvClosed)", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (Open -> RecvClosed)" ); self.substreams .insert(id, SubstreamState::RecvClosed { buf }); @@ -798,10 +831,10 @@ where } } } else { - trace!( - "{}: Ignoring `Close` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Close` for unknown substream, possibly dropped earlier." ); } } @@ -822,7 +855,7 @@ where } /// Sends pending frames, without flushing. - fn send_pending_frames(&mut self, cx: &mut Context<'_>) -> Poll> { + fn send_pending_frames(&mut self, cx: &Context<'_>) -> Poll> { while let Some(frame) = self.pending_frames.pop_back() { if self.poll_send_frame(cx, || frame.clone())?.is_pending() { self.pending_frames.push_back(frame); @@ -835,7 +868,11 @@ where /// Records a fatal error for the multiplexed I/O stream. fn on_error(&mut self, e: io::Error) -> io::Result { - debug!("{}: Multiplexed connection failed: {:?}", self.id, e); + tracing::debug!( + connection=%self.id, + "Multiplexed connection failed: {:?}", + e + ); self.status = Status::Err(io::Error::new(e.kind(), e.to_string())); self.pending_frames = Default::default(); self.substreams = Default::default(); @@ -875,48 +912,52 @@ where /// Fails the entire multiplexed stream if too many pending `Reset` /// frames accumulate when using [`MaxBufferBehaviour::ResetStream`]. fn buffer(&mut self, id: LocalStreamId, data: Bytes) -> io::Result<()> { - let state = if let Some(state) = self.substreams.get_mut(&id) { - state - } else { - trace!( - "{}: Dropping data {:?} for unknown substream {}", - self.id, - data, - id + let Some(state) = self.substreams.get_mut(&id) else { + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for unknown substream" ); return Ok(()); }; - let buf = if let Some(buf) = state.recv_buf_open() { - buf - } else { - trace!( - "{}: Dropping data {:?} for closed or reset substream {}", - self.id, - data, - id + let Some(buf) = state.recv_buf_open() else { + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for closed or reset substream", ); return Ok(()); }; debug_assert!(buf.len() <= self.config.max_buffer_len); - trace!( - "{}: Buffering {:?} for stream {} (total: {})", - self.id, - data, - id, - buf.len() + 1 + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + data_buffer=%buf.len() + 1, + "Buffering data for substream" ); buf.push(data); self.notifier_read.wake_read_stream(id); if buf.len() > self.config.max_buffer_len { - debug!("{}: Frame buffer of stream {} is full.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Frame buffer of substream is full" + ); match self.config.max_buffer_behaviour { MaxBufferBehaviour::ResetStream => { let buf = buf.clone(); self.check_max_pending_frames()?; self.substreams.insert(id, SubstreamState::Reset { buf }); - debug!("{}: Pending reset for stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for stream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -1185,7 +1226,10 @@ mod tests { #[test] fn max_buffer_behaviour() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, overflow: NonZeroU8) { let mut r_buf = BytesMut::new(); @@ -1320,7 +1364,10 @@ mod tests { #[test] fn close_on_error() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, num_streams: NonZeroU8) { let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize); diff --git a/muxers/mplex/src/lib.rs b/muxers/mplex/src/lib.rs index fa36fecfefb..c67e0e3baec 100644 --- a/muxers/mplex/src/lib.rs +++ b/muxers/mplex/src/lib.rs @@ -32,7 +32,7 @@ use bytes::Bytes; use codec::LocalStreamId; use futures::{future, prelude::*, ready}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; use parking_lot::Mutex; use std::{cmp, iter, pin::Pin, sync::Arc, task::Context, task::Poll}; @@ -45,7 +45,7 @@ impl UpgradeInfo for MplexConfig { } } -impl InboundUpgrade for MplexConfig +impl InboundConnectionUpgrade for MplexConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -55,12 +55,13 @@ where fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { future::ready(Ok(Multiplex { + #[allow(unknown_lints, clippy::arc_with_non_send_sync)] // `T` is not enforced to be `Send` but we don't want to constrain it either. io: Arc::new(Mutex::new(io::Multiplexed::new(socket, self))), })) } } -impl OutboundUpgrade for MplexConfig +impl OutboundConnectionUpgrade for MplexConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -70,6 +71,7 @@ where fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { future::ready(Ok(Multiplex { + #[allow(unknown_lints, clippy::arc_with_non_send_sync)] // `T` is not enforced to be `Send` but we don't want to constrain it either. io: Arc::new(Mutex::new(io::Multiplexed::new(socket, self))), })) } diff --git a/muxers/test-harness/Cargo.toml b/muxers/test-harness/Cargo.toml index 02e9bdd2372..7aad5f1985a 100644 --- a/muxers/test-harness/Cargo.toml +++ b/muxers/test-harness/Cargo.toml @@ -5,11 +5,17 @@ edition = "2021" publish = false license = "MIT" +[package.metadata.release] +release = false + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] libp2p-core = { workspace = true } -futures = "0.3.28" -log = "0.4" +futures = "0.3.30" futures-timer = "3.0.2" -futures_ringbuf = "0.3.1" +futures_ringbuf = "0.4.0" +tracing = "0.1.37" + +[lints] +workspace = true diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index 544e057c108..16c71f414f0 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -3,7 +3,8 @@ use futures::{future, AsyncRead, AsyncWrite}; use futures::{AsyncReadExt, Stream}; use futures::{AsyncWriteExt, StreamExt}; use libp2p_core::muxing::StreamMuxerExt; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, StreamMuxer, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::{StreamMuxer, UpgradeInfo}; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; @@ -12,15 +13,15 @@ use std::{fmt, mem}; pub async fn connected_muxers_on_memory_ring_buffer() -> (M, M) where - MC: InboundUpgrade - + OutboundUpgrade + MC: InboundConnectionUpgrade + + OutboundConnectionUpgrade + Send + 'static + Default, ::Info: Send, <::InfoIter as IntoIterator>::IntoIter: Send, - >::Future: Send, - >::Future: Send, + >::Future: Send, + >::Future: Send, E: std::error::Error + Send + Sync + 'static, { let (alice, bob) = futures_ringbuf::Endpoint::pair(100, 100); @@ -148,20 +149,20 @@ async fn run( loop { match futures::future::select(dialer.next(), listener.next()).await { Either::Left((Some(Event::SetupComplete), _)) => { - log::info!("Dialer opened outbound stream"); + tracing::info!("Dialer opened outbound stream"); } Either::Left((Some(Event::ProtocolComplete), _)) => { - log::info!("Dialer completed protocol"); + tracing::info!("Dialer completed protocol"); dialer_complete = true } Either::Left((Some(Event::Timeout), _)) => { panic!("Dialer protocol timed out"); } Either::Right((Some(Event::SetupComplete), _)) => { - log::info!("Listener received inbound stream"); + tracing::info!("Listener received inbound stream"); } Either::Right((Some(Event::ProtocolComplete), _)) => { - log::info!("Listener completed protocol"); + tracing::info!("Listener completed protocol"); listener_complete = true } Either::Right((Some(Event::Timeout), _)) => { diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index 4d0015aad4a..de608b195f8 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,9 +1,43 @@ -## 0.44.0 - unreleased +## 0.45.1 + +- Deprecate `WindowUpdateMode::on_receive`. + It does not enforce flow-control, i.e. breaks backpressure. + Use `WindowUpdateMode::on_read` instead. + See `yamux` crate version `v0.12.1` and [Yamux PR #177](https://github.com/libp2p/rust-yamux/pull/177). +- `yamux` `v0.13` enables auto-tuning for the Yamux stream receive window. + While preserving small buffers on low-latency and/or low-bandwidth connections, this change allows for high-latency and/or high-bandwidth connections to exhaust the available bandwidth on a single stream. + Have `libp2p-yamux` use `yamux` `v0.13` (new version) by default and fall back to `yamux` `v0.12` (old version) when setting any configuration options. + Thus default users benefit from the increased performance, while power users with custom configurations maintain the old behavior. + `libp2p-yamux` will switch over to `yamux` `v0.13` entirely with the next breaking release. + See [PR 4970](https://github.com/libp2p/rust-libp2p/pull/4970). + +## 0.45.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.44.1 + +- Update to `yamux` `v0.12` which brings performance improvements and introduces an ACK backlog of 256 inbound streams. + When interacting with other libp2p nodes that are also running this or a newer version, the creation of inbound streams will be backpressured once the ACK backlog is hit. + See [PR 3013]. + +[PR 3013]: https://github.com/libp2p/rust-libp2p/pull/3013 + +## 0.44.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove deprecated items. + See [PR 3897]. + +- Remove `Incoming`, `LocalIncoming` and `LocalConfig` as well as anything from the underlying `yamux` crate from the public API. + See [PR 3908]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3897]: https://github.com/libp2p/rust-libp2p/pull/3897 +[PR 3908]: https://github.com/libp2p/rust-libp2p/pull/3908 ## 0.43.1 diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index 7a024e3af3b..14a5c0fe145 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-yamux" edition = "2021" rust-version = { workspace = true } description = "Yamux multiplexing protocol for libp2p" -version = "0.44.0" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,15 +11,17 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" +either = "1" +futures = "0.3.30" libp2p-core = { workspace = true } thiserror = "1.0" -yamux = "0.10.0" -log = "0.4" +yamux012 = { version = "0.12.1", package = "yamux" } +yamux013 = { version = "0.13.1", package = "yamux" } +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } -libp2p-muxer-test-harness = { workspace = true } +libp2p-muxer-test-harness = { path = "../test-harness" } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -27,3 +29,6 @@ libp2p-muxer-test-harness = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 64ae8de436a..2b5eb52a11e 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -22,107 +22,66 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use futures::{ - future, - prelude::*, - ready, - stream::{BoxStream, LocalBoxStream}, -}; +use either::Either; +use futures::{future, prelude::*, ready}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeInfo}; use std::collections::VecDeque; +use std::io::{IoSlice, IoSliceMut}; use std::task::Waker; use std::{ - fmt, io, iter, mem, + io, iter, pin::Pin, task::{Context, Poll}, }; use thiserror::Error; -use yamux::ConnectionError; - -#[deprecated(note = "Import the `yamux` module instead and refer to this type as `yamux::Muxer`.")] -pub type Yamux = Muxer; /// A Yamux connection. -pub struct Muxer { - /// The [`futures::stream::Stream`] of incoming substreams. - incoming: S, - /// Handle to control the connection. - control: yamux::Control, +#[derive(Debug)] +pub struct Muxer { + connection: Either, yamux013::Connection>, /// Temporarily buffers inbound streams in case our node is performing backpressure on the remote. /// - /// The only way how yamux can make progress is by driving the [`Incoming`] stream. However, the + /// The only way how yamux can make progress is by calling [`yamux013::Connection::poll_next_inbound`]. However, the /// [`StreamMuxer`] interface is designed to allow a caller to selectively make progress via /// [`StreamMuxer::poll_inbound`] and [`StreamMuxer::poll_outbound`] whilst the more general /// [`StreamMuxer::poll`] is designed to make progress on existing streams etc. /// /// This buffer stores inbound streams that are created whilst [`StreamMuxer::poll`] is called. /// Once the buffer is full, new inbound streams are dropped. - inbound_stream_buffer: VecDeque, + inbound_stream_buffer: VecDeque, /// Waker to be called when new inbound streams are available. inbound_stream_waker: Option, } -const MAX_BUFFERED_INBOUND_STREAMS: usize = 25; - -impl fmt::Debug for Muxer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Yamux") - } -} +/// How many streams to buffer before we start resetting them. +/// +/// This is equal to the ACK BACKLOG in `rust-yamux`. +/// Thus, for peers running on a recent version of `rust-libp2p`, we should never need to reset streams because they'll voluntarily stop opening them once they hit the ACK backlog. +const MAX_BUFFERED_INBOUND_STREAMS: usize = 256; -impl Muxer> +impl Muxer where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { /// Create a new Yamux connection. - fn new(io: C, cfg: yamux::Config, mode: yamux::Mode) -> Self { - let conn = yamux::Connection::new(io, cfg, mode); - let ctrl = conn.control(); - - Self { - incoming: Incoming { - stream: yamux::into_stream(conn).err_into().boxed(), - _marker: std::marker::PhantomData, - }, - control: ctrl, + fn new(connection: Either, yamux013::Connection>) -> Self { + Muxer { + connection, inbound_stream_buffer: VecDeque::default(), inbound_stream_waker: None, } } } -impl Muxer> +impl StreamMuxer for Muxer where C: AsyncRead + AsyncWrite + Unpin + 'static, { - /// Create a new Yamux connection (which is ![`Send`]). - fn local(io: C, cfg: yamux::Config, mode: yamux::Mode) -> Self { - let conn = yamux::Connection::new(io, cfg, mode); - let ctrl = conn.control(); - - Self { - incoming: LocalIncoming { - stream: yamux::into_stream(conn).err_into().boxed_local(), - _marker: std::marker::PhantomData, - }, - control: ctrl, - inbound_stream_buffer: VecDeque::default(), - inbound_stream_waker: None, - } - } -} - -#[deprecated(note = "Use `Result` instead.")] -pub type YamuxResult = Result; - -impl StreamMuxer for Muxer -where - S: Stream> + Unpin, -{ - type Substream = yamux::Stream; + type Substream = Stream; type Error = Error; + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_inbound", skip(self, cx))] fn poll_inbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -131,20 +90,39 @@ where return Poll::Ready(Ok(stream)); } - self.inbound_stream_waker = Some(cx.waker().clone()); + if let Poll::Ready(res) = self.poll_inner(cx) { + return Poll::Ready(res); + } - self.poll_inner(cx) + self.inbound_stream_waker = Some(cx.waker().clone()); + Poll::Pending } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_outbound", skip(self, cx))] fn poll_outbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - Pin::new(&mut self.control) - .poll_open_stream(cx) - .map_err(Error) + let stream = match self.connection.as_mut() { + Either::Left(c) => ready!(c.poll_new_outbound(cx)) + .map_err(|e| Error(Either::Left(e))) + .map(|s| Stream(Either::Left(s))), + Either::Right(c) => ready!(c.poll_new_outbound(cx)) + .map_err(|e| Error(Either::Right(e))) + .map(|s| Stream(Either::Right(s))), + }?; + Poll::Ready(Ok(stream)) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_close", skip(self, cx))] + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.connection.as_mut() { + Either::Left(c) => c.poll_close(cx).map_err(|e| Error(Either::Left(e))), + Either::Right(c) => c.poll_close(cx).map_err(|e| Error(Either::Right(e))), + } + } + + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll", skip(self, cx))] fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -154,7 +132,10 @@ where let inbound_stream = ready!(this.poll_inner(cx))?; if this.inbound_stream_buffer.len() >= MAX_BUFFERED_INBOUND_STREAMS { - log::warn!("dropping {inbound_stream} because buffer is full"); + tracing::warn!( + stream=%inbound_stream.0, + "dropping stream because buffer is full" + ); drop(inbound_stream); } else { this.inbound_stream_buffer.push_back(inbound_stream); @@ -168,51 +149,105 @@ where cx.waker().wake_by_ref(); Poll::Pending } +} - fn poll_close(mut self: Pin<&mut Self>, c: &mut Context<'_>) -> Poll> { - if let Poll::Ready(()) = Pin::new(&mut self.control).poll_close(c).map_err(Error)? { - return Poll::Ready(Ok(())); - } +/// A stream produced by the yamux multiplexer. +#[derive(Debug)] +pub struct Stream(Either); - while let Poll::Ready(maybe_inbound_stream) = self.incoming.poll_next_unpin(c)? { - match maybe_inbound_stream { - Some(inbound_stream) => mem::drop(inbound_stream), - None => return Poll::Ready(Ok(())), - } - } +impl AsyncRead for Stream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_read(cx, buf)) + } - Poll::Pending + fn poll_read_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_read_vectored(cx, bufs)) } } -impl Muxer +impl AsyncWrite for Stream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_write(cx, buf)) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_write_vectored(cx, bufs)) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_flush(cx)) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + either::for_both!(self.0.as_mut(), s => Pin::new(s).poll_close(cx)) + } +} + +impl Muxer where - S: Stream> + Unpin, + C: AsyncRead + AsyncWrite + Unpin + 'static, { - fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll> { - self.incoming.poll_next_unpin(cx).map(|maybe_stream| { - let stream = maybe_stream - .transpose()? - .ok_or(Error(ConnectionError::Closed))?; - - Ok(stream) - }) + fn poll_inner(&mut self, cx: &mut Context<'_>) -> Poll> { + let stream = match self.connection.as_mut() { + Either::Left(c) => ready!(c.poll_next_inbound(cx)) + .ok_or(Error(Either::Left(yamux012::ConnectionError::Closed)))? + .map_err(|e| Error(Either::Left(e))) + .map(|s| Stream(Either::Left(s)))?, + Either::Right(c) => ready!(c.poll_next_inbound(cx)) + .ok_or(Error(Either::Right(yamux013::ConnectionError::Closed)))? + .map_err(|e| Error(Either::Right(e))) + .map(|s| Stream(Either::Right(s)))?, + }; + + Poll::Ready(Ok(stream)) } } -#[deprecated(note = "Import the `yamux` module and refer to this type as `yamux::Config` instead.")] -pub type YamuxConfig = Config; - /// The yamux configuration. #[derive(Debug, Clone)] -pub struct Config { - inner: yamux::Config, - mode: Option, +pub struct Config(Either); + +impl Default for Config { + fn default() -> Self { + Self(Either::Right(Config013::default())) + } +} + +#[derive(Debug, Clone)] +struct Config012 { + inner: yamux012::Config, + mode: Option, +} + +impl Default for Config012 { + fn default() -> Self { + let mut inner = yamux012::Config::default(); + // For conformity with mplex, read-after-close on a multiplexed + // connection is never permitted and not configurable. + inner.set_read_after_close(false); + Self { inner, mode: None } + } } /// The window update mode determines when window updates are /// sent to the remote, giving it new credit to send more data. -pub struct WindowUpdateMode(yamux::WindowUpdateMode); +pub struct WindowUpdateMode(yamux012::WindowUpdateMode); impl WindowUpdateMode { /// The window update mode whereby the remote is given @@ -227,8 +262,10 @@ impl WindowUpdateMode { /// > size must be tuned appropriately for the desired /// > throughput and level of tolerance for (temporarily) /// > slow receivers. + #[deprecated(note = "Use `WindowUpdateMode::on_read` instead.")] pub fn on_receive() -> Self { - WindowUpdateMode(yamux::WindowUpdateMode::OnReceive) + #[allow(deprecated)] + WindowUpdateMode(yamux012::WindowUpdateMode::OnReceive) } /// The window update mode whereby the remote is given new @@ -246,90 +283,75 @@ impl WindowUpdateMode { /// > **Note**: With this strategy, there is usually no point in the /// > receive buffer being larger than the window size. pub fn on_read() -> Self { - WindowUpdateMode(yamux::WindowUpdateMode::OnRead) + WindowUpdateMode(yamux012::WindowUpdateMode::OnRead) } } -#[deprecated( - note = "Import the `yamux` module and refer to this type as `yamux::LocalConfig` instead." -)] -pub type YamuxLocalConfig = LocalConfig; - -/// The yamux configuration for upgrading I/O resources which are ![`Send`]. -#[derive(Clone)] -pub struct LocalConfig(Config); - impl Config { /// Creates a new `YamuxConfig` in client mode, regardless of whether /// it will be used for an inbound or outbound upgrade. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn client() -> Self { - Self { - mode: Some(yamux::Mode::Client), + Self(Either::Left(Config012 { + mode: Some(yamux012::Mode::Client), ..Default::default() - } + })) } /// Creates a new `YamuxConfig` in server mode, regardless of whether /// it will be used for an inbound or outbound upgrade. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn server() -> Self { - Self { - mode: Some(yamux::Mode::Server), + Self(Either::Left(Config012 { + mode: Some(yamux012::Mode::Server), ..Default::default() - } + })) } /// Sets the size (in bytes) of the receive window per substream. + #[deprecated( + note = "Will be replaced in the next breaking release with a connection receive window size limit." + )] pub fn set_receive_window_size(&mut self, num_bytes: u32) -> &mut Self { - self.inner.set_receive_window(num_bytes); - self + self.set(|cfg| cfg.set_receive_window(num_bytes)) } /// Sets the maximum size (in bytes) of the receive buffer per substream. + #[deprecated(note = "Will be removed with the next breaking release.")] pub fn set_max_buffer_size(&mut self, num_bytes: usize) -> &mut Self { - self.inner.set_max_buffer_size(num_bytes); - self + self.set(|cfg| cfg.set_max_buffer_size(num_bytes)) } /// Sets the maximum number of concurrent substreams. pub fn set_max_num_streams(&mut self, num_streams: usize) -> &mut Self { - self.inner.set_max_num_streams(num_streams); - self + self.set(|cfg| cfg.set_max_num_streams(num_streams)) } /// Sets the window update mode that determines when the remote /// is given new credit for sending more data. + #[deprecated( + note = "`WindowUpdate::OnRead` is the default. `WindowUpdate::OnReceive` breaks backpressure, is thus not recommended, and will be removed in the next breaking release. Thus this method becomes obsolete and will be removed with the next breaking release." + )] pub fn set_window_update_mode(&mut self, mode: WindowUpdateMode) -> &mut Self { - self.inner.set_window_update_mode(mode.0); - self + self.set(|cfg| cfg.set_window_update_mode(mode.0)) } - /// Converts the config into a [`LocalConfig`] for use with upgrades - /// of I/O streams that are ![`Send`]. - pub fn into_local(self) -> LocalConfig { - LocalConfig(self) - } -} - -impl Default for Config { - fn default() -> Self { - let mut inner = yamux::Config::default(); - // For conformity with mplex, read-after-close on a multiplexed - // connection is never permitted and not configurable. - inner.set_read_after_close(false); - Config { inner, mode: None } - } -} + fn set(&mut self, f: impl FnOnce(&mut yamux012::Config) -> &mut yamux012::Config) -> &mut Self { + let cfg012 = match self.0.as_mut() { + Either::Left(c) => &mut c.inner, + Either::Right(_) => { + self.0 = Either::Left(Config012::default()); + &mut self.0.as_mut().unwrap_left().inner + } + }; -impl UpgradeInfo for Config { - type Info = &'static str; - type InfoIter = iter::Once; + f(cfg012); - fn protocol_info(&self) -> Self::InfoIter { - iter::once("/yamux/1.0.0") + self } } -impl UpgradeInfo for LocalConfig { +impl UpgradeInfo for Config { type Info = &'static str; type InfoIter = iter::Once; @@ -338,129 +360,102 @@ impl UpgradeInfo for LocalConfig { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = Muxer>; + type Output = Muxer; type Error = io::Error; type Future = future::Ready>; fn upgrade_inbound(self, io: C, _: Self::Info) -> Self::Future { - let mode = self.mode.unwrap_or(yamux::Mode::Server); - future::ready(Ok(Muxer::new(io, self.inner, mode))) - } -} - -impl InboundUpgrade for LocalConfig -where - C: AsyncRead + AsyncWrite + Unpin + 'static, -{ - type Output = Muxer>; - type Error = io::Error; - type Future = future::Ready>; + let connection = match self.0 { + Either::Left(Config012 { inner, mode }) => Either::Left(yamux012::Connection::new( + io, + inner, + mode.unwrap_or(yamux012::Mode::Server), + )), + Either::Right(Config013(cfg)) => { + Either::Right(yamux013::Connection::new(io, cfg, yamux013::Mode::Server)) + } + }; - fn upgrade_inbound(self, io: C, _: Self::Info) -> Self::Future { - let cfg = self.0; - let mode = cfg.mode.unwrap_or(yamux::Mode::Server); - future::ready(Ok(Muxer::local(io, cfg.inner, mode))) + future::ready(Ok(Muxer::new(connection))) } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = Muxer>; + type Output = Muxer; type Error = io::Error; type Future = future::Ready>; fn upgrade_outbound(self, io: C, _: Self::Info) -> Self::Future { - let mode = self.mode.unwrap_or(yamux::Mode::Client); - future::ready(Ok(Muxer::new(io, self.inner, mode))) + let connection = match self.0 { + Either::Left(Config012 { inner, mode }) => Either::Left(yamux012::Connection::new( + io, + inner, + mode.unwrap_or(yamux012::Mode::Client), + )), + Either::Right(Config013(cfg)) => { + Either::Right(yamux013::Connection::new(io, cfg, yamux013::Mode::Client)) + } + }; + + future::ready(Ok(Muxer::new(connection))) } } -impl OutboundUpgrade for LocalConfig -where - C: AsyncRead + AsyncWrite + Unpin + 'static, -{ - type Output = Muxer>; - type Error = io::Error; - type Future = future::Ready>; +#[derive(Debug, Clone)] +struct Config013(yamux013::Config); - fn upgrade_outbound(self, io: C, _: Self::Info) -> Self::Future { - let cfg = self.0; - let mode = cfg.mode.unwrap_or(yamux::Mode::Client); - future::ready(Ok(Muxer::local(io, cfg.inner, mode))) +impl Default for Config013 { + fn default() -> Self { + let mut cfg = yamux013::Config::default(); + // For conformity with mplex, read-after-close on a multiplexed + // connection is never permitted and not configurable. + cfg.set_read_after_close(false); + Self(cfg) } } -#[deprecated(note = "Import the `yamux` module and refer to this type as `yamux::Error` instead.")] -pub type YamuxError = Error; - /// The Yamux [`StreamMuxer`] error type. #[derive(Debug, Error)] -#[error("yamux error: {0}")] -pub struct Error(#[from] yamux::ConnectionError); +#[error(transparent)] +pub struct Error(Either); impl From for io::Error { fn from(err: Error) -> Self { match err.0 { - yamux::ConnectionError::Io(e) => e, - e => io::Error::new(io::ErrorKind::Other, e), + Either::Left(err) => match err { + yamux012::ConnectionError::Io(e) => e, + e => io::Error::new(io::ErrorKind::Other, e), + }, + Either::Right(err) => match err { + yamux013::ConnectionError::Io(e) => e, + e => io::Error::new(io::ErrorKind::Other, e), + }, } } } -/// The [`futures::stream::Stream`] of incoming substreams. -pub struct Incoming { - stream: BoxStream<'static, Result>, - _marker: std::marker::PhantomData, -} - -impl fmt::Debug for Incoming { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Incoming") +#[cfg(test)] +mod test { + use super::*; + #[test] + fn config_set_switches_to_v012() { + // By default we use yamux v0.13. Thus we provide the benefits of yamux v0.13 to all users + // that do not depend on any of the behaviors (i.e. configuration options) of v0.12. + let mut cfg = Config::default(); + assert!(matches!( + cfg, + Config(Either::Right(Config013(yamux013::Config { .. }))) + )); + + // In case a user makes any configurations, use yamux v0.12 instead. + cfg.set_max_num_streams(42); + assert!(matches!(cfg, Config(Either::Left(Config012 { .. })))); } } - -/// The [`futures::stream::Stream`] of incoming substreams (`!Send`). -pub struct LocalIncoming { - stream: LocalBoxStream<'static, Result>, - _marker: std::marker::PhantomData, -} - -impl fmt::Debug for LocalIncoming { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("LocalIncoming") - } -} - -impl Stream for Incoming { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.as_mut().poll_next_unpin(cx) - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -impl Unpin for Incoming {} - -impl Stream for LocalIncoming { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.as_mut().poll_next_unpin(cx) - } - - fn size_hint(&self) -> (usize, Option) { - self.stream.size_hint() - } -} - -impl Unpin for LocalIncoming {} diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index d7047e4b43b..1259dd01fd4 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.11.0 - unreleased +## 0.12.0 + +- Remove `Clone`, `PartialEq` and `Eq` implementations on `Event` and its sub-structs. + The `Event` also contains errors which are not clonable or comparable. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). + +## 0.11.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 27cfcde0758..a1ecae7ccab 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-autonat" edition = "2021" rust-version = { workspace = true } description = "NAT and firewall detection for libp2p" -version = "0.11.0" +version = "0.12.0" authors = ["David Craven ", "Elena Frank "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -19,14 +19,16 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-request-response = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" -rand = "0.8" quick-protobuf = "0.8" +rand = "0.8" +tracing = "0.1.37" +quick-protobuf-codec = { workspace = true } +asynchronous-codec = { workspace = true } [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } -env_logger = "0.10" -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -34,3 +36,6 @@ libp2p-swarm-test = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index c46d8044989..e95163ab23f 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -32,15 +32,12 @@ use instant::Instant; use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, ProtocolSupport, RequestId, ResponseChannel, + self as request_response, InboundRequestId, OutboundRequestId, ProtocolSupport, ResponseChannel, }; use libp2p_swarm::{ - behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredExternalAddr, - ExpiredListenAddr, FromSwarm, - }, - ConnectionDenied, ConnectionId, ExternalAddresses, ListenAddresses, NetworkBehaviour, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -133,7 +130,7 @@ impl ProbeId { } /// Event produced by [`Behaviour`]. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum Event { /// Event on an inbound probe. InboundProbe(InboundProbeEvent), @@ -187,14 +184,14 @@ pub struct Behaviour { PeerId, ( ProbeId, - RequestId, + InboundRequestId, Vec, ResponseChannel, ), >, // Ongoing outbound probes and mapped to the inner request id. - ongoing_outbound: HashMap, + ongoing_outbound: HashMap, // Connected peers with the observed address of each connection. // If the endpoint of a connection is relayed or not global (in case of Config::only_global_ips), @@ -209,20 +206,22 @@ pub struct Behaviour { last_probe: Option, - pending_actions: VecDeque::OutEvent, THandlerInEvent>>, + pending_actions: VecDeque::ToSwarm, THandlerInEvent>>, probe_id: ProbeId, listen_addresses: ListenAddresses, - external_addresses: ExternalAddresses, + other_candidates: HashSet, } impl Behaviour { pub fn new(local_peer_id: PeerId, config: Config) -> Self { let protocols = iter::once((DEFAULT_PROTOCOL_NAME, ProtocolSupport::Full)); - let mut cfg = request_response::Config::default(); - cfg.set_request_timeout(config.timeout); - let inner = request_response::Behaviour::new(AutoNatCodec, protocols, cfg); + let inner = request_response::Behaviour::with_codec( + AutoNatCodec, + protocols, + request_response::Config::default().with_request_timeout(config.timeout), + ); Self { local_peer_id, inner, @@ -240,7 +239,7 @@ impl Behaviour { pending_actions: VecDeque::new(), probe_id: ProbeId(0), listen_addresses: Default::default(), - external_addresses: Default::default(), + other_candidates: Default::default(), } } @@ -279,6 +278,12 @@ impl Behaviour { self.servers.retain(|p| p != peer); } + /// Explicitly probe the provided address for external reachability. + pub fn probe_address(&mut self, candidate: Multiaddr) { + self.other_candidates.insert(candidate); + self.as_client().on_new_address(); + } + fn as_client(&mut self) -> AsClient { AsClient { inner: &mut self.inner, @@ -294,7 +299,7 @@ impl Behaviour { last_probe: &mut self.last_probe, schedule_probe: &mut self.schedule_probe, listen_addresses: &self.listen_addresses, - external_addresses: &self.external_addresses, + other_candidates: &self.other_candidates, } } @@ -355,20 +360,10 @@ impl Behaviour { ConnectionClosed { peer_id, connection_id, - endpoint, - handler, remaining_established, - }: ConnectionClosed<::ConnectionHandler>, + .. + }: ConnectionClosed, ) { - self.inner - .on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - if remaining_established == 0 { self.connected.remove(&peer_id); } else { @@ -380,20 +375,7 @@ impl Behaviour { } } - fn on_dial_failure( - &mut self, - DialFailure { - peer_id, - connection_id, - error, - }: DialFailure, - ) { - self.inner - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - })); + fn on_dial_failure(&mut self, DialFailure { peer_id, error, .. }: DialFailure) { if let Some(event) = self.as_server().on_outbound_dial_error(peer_id, error) { self.pending_actions .push_back(ToSwarm::GenerateEvent(Event::InboundProbe(event))); @@ -427,15 +409,19 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; - type OutEvent = Event; + type ToSwarm = Event; - fn poll(&mut self, cx: &mut Context<'_>, params: &mut impl PollParameters) -> Poll { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { loop { if let Some(event) = self.pending_actions.pop_front() { return Poll::Ready(event); } - match self.inner.poll(cx, params) { + match self.inner.poll(cx) { Poll::Ready(ToSwarm::GenerateEvent(event)) => { let actions = match event { request_response::Event::Message { @@ -443,14 +429,14 @@ impl NetworkBehaviour for Behaviour { .. } | request_response::Event::OutboundFailure { .. } => { - self.as_client().handle_event(params, event) + self.as_client().handle_event(event) } request_response::Event::Message { message: request_response::Message::Request { .. }, .. } | request_response::Event::InboundFailure { .. } => { - self.as_server().handle_event(params, event) + self.as_server().handle_event(event) } request_response::Event::ResponseSent { .. } => VecDeque::new(), }; @@ -530,56 +516,28 @@ impl NetworkBehaviour for Behaviour { .handle_established_outbound_connection(connection_id, peer, addr, role_override) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.listen_addresses.on_swarm_event(&event); - self.external_addresses.on_swarm_event(&event); + self.inner.on_swarm_event(event); match event { - FromSwarm::ConnectionEstablished(connection_established) => { - self.inner - .on_swarm_event(FromSwarm::ConnectionEstablished(connection_established)); - self.on_connection_established(connection_established) - } - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::AddressChange(address_change) => { - self.inner - .on_swarm_event(FromSwarm::AddressChange(address_change)); - self.on_address_change(address_change) - } - listen_addr @ FromSwarm::NewListenAddr(_) => { - self.inner.on_swarm_event(listen_addr); - self.as_client().on_new_address(); - } - FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { - self.inner - .on_swarm_event(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })); - self.as_client().on_expired_address(addr); - } - FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr }) => { - self.inner - .on_swarm_event(FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr })); - self.as_client().on_expired_address(addr); - } - external_addr @ FromSwarm::NewExternalAddr(_) => { - self.inner.on_swarm_event(external_addr); + FromSwarm::ConnectionEstablished(e) => self.on_connection_established(e), + FromSwarm::ConnectionClosed(e) => self.on_connection_closed(e), + FromSwarm::DialFailure(e) => self.on_dial_failure(e), + FromSwarm::AddressChange(e) => self.on_address_change(e), + FromSwarm::NewListenAddr(_) => { self.as_client().on_new_address(); } - listen_failure @ FromSwarm::ListenFailure(_) => { - self.inner.on_swarm_event(listen_failure) + FromSwarm::ExpiredListenAddr(e) => { + self.as_client().on_expired_address(e.addr); } - new_listener @ FromSwarm::NewListener(_) => self.inner.on_swarm_event(new_listener), - listener_error @ FromSwarm::ListenerError(_) => { - self.inner.on_swarm_event(listener_error) + FromSwarm::ExternalAddrExpired(e) => { + self.as_client().on_expired_address(e.addr); } - listener_closed @ FromSwarm::ListenerClosed(_) => { - self.inner.on_swarm_event(listener_closed) + FromSwarm::NewExternalAddrCandidate(e) => { + self.probe_address(e.addr.to_owned()); } + _ => {} } } @@ -594,13 +552,12 @@ impl NetworkBehaviour for Behaviour { } } -type Action = ToSwarm<::OutEvent, THandlerInEvent>; +type Action = ToSwarm<::ToSwarm, THandlerInEvent>; // Trait implemented for `AsClient` and `AsServer` to handle events from the inner [`request_response::Behaviour`] Protocol. trait HandleInnerEvent { fn handle_event( &mut self, - params: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque; } diff --git a/protocols/autonat/src/behaviour/as_client.rs b/protocols/autonat/src/behaviour/as_client.rs index e0c0b2e9e0a..668f3b93719 100644 --- a/protocols/autonat/src/behaviour/as_client.rs +++ b/protocols/autonat/src/behaviour/as_client.rs @@ -29,10 +29,8 @@ use futures_timer::Delay; use instant::Instant; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_request_response::{self as request_response, OutboundFailure, RequestId}; -use libp2p_swarm::{ - AddressScore, ConnectionId, ExternalAddresses, ListenAddresses, PollParameters, ToSwarm, -}; +use libp2p_request_response::{self as request_response, OutboundFailure, OutboundRequestId}; +use libp2p_swarm::{ConnectionId, ListenAddresses, ToSwarm}; use rand::{seq::SliceRandom, thread_rng}; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -41,7 +39,7 @@ use std::{ }; /// Outbound probe failed or was aborted. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundProbeError { /// Probe was aborted because no server is known, or all servers /// are throttled through [`Config::throttle_server_period`]. @@ -55,7 +53,7 @@ pub enum OutboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundProbeEvent { /// A dial-back request was sent to a remote peer. Request { @@ -93,17 +91,16 @@ pub(crate) struct AsClient<'a> { pub(crate) throttled_servers: &'a mut Vec<(PeerId, Instant)>, pub(crate) nat_status: &'a mut NatStatus, pub(crate) confidence: &'a mut usize, - pub(crate) ongoing_outbound: &'a mut HashMap, + pub(crate) ongoing_outbound: &'a mut HashMap, pub(crate) last_probe: &'a mut Option, pub(crate) schedule_probe: &'a mut Delay, pub(crate) listen_addresses: &'a ListenAddresses, - pub(crate) external_addresses: &'a ExternalAddresses, + pub(crate) other_candidates: &'a HashSet, } impl<'a> HandleInnerEvent for AsClient<'a> { fn handle_event( &mut self, - params: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque { match event { @@ -115,12 +112,12 @@ impl<'a> HandleInnerEvent for AsClient<'a> { response, }, } => { - log::debug!("Outbound dial-back request returned {:?}.", response); + tracing::debug!(?response, "Outbound dial-back request returned response"); let probe_id = self .ongoing_outbound .remove(&request_id) - .expect("RequestId exists."); + .expect("OutboundRequestId exists."); let event = match response.result.clone() { Ok(address) => OutboundProbeEvent::Response { @@ -147,19 +144,7 @@ impl<'a> HandleInnerEvent for AsClient<'a> { } if let Ok(address) = response.result { - // Update observed address score if it is finite. - #[allow(deprecated)] - // TODO: Fix once we report `AddressScore` through `FromSwarm` event. - let score = params - .external_addresses() - .find_map(|r| (r.addr == address).then_some(r.score)) - .unwrap_or(AddressScore::Finite(0)); - if let AddressScore::Finite(finite_score) = score { - actions.push_back(ToSwarm::ReportObservedAddr { - address, - score: AddressScore::Finite(finite_score + 1), - }); - } + actions.push_back(ToSwarm::ExternalAddrConfirmed(address)); } actions @@ -169,10 +154,10 @@ impl<'a> HandleInnerEvent for AsClient<'a> { error, request_id, } => { - log::debug!( - "Outbound Failure {} when on dial-back request to peer {}.", + tracing::debug!( + %peer, + "Outbound Failure {} when on dial-back request to peer.", error, - peer ); let probe_id = self .ongoing_outbound @@ -201,7 +186,7 @@ impl<'a> AsClient<'a> { self.schedule_probe.reset(self.config.retry_interval); let addresses = self - .external_addresses + .other_candidates .iter() .chain(self.listen_addresses.iter()) .cloned() @@ -290,16 +275,12 @@ impl<'a> AsClient<'a> { ) -> Result { let _ = self.last_probe.insert(Instant::now()); if addresses.is_empty() { - log::debug!("Outbound dial-back request aborted: No dial-back addresses."); + tracing::debug!("Outbound dial-back request aborted: No dial-back addresses"); return Err(OutboundProbeError::NoAddresses); } - let server = match self.random_server() { - Some(s) => s, - None => { - log::debug!("Outbound dial-back request aborted: No qualified server."); - return Err(OutboundProbeError::NoServer); - } - }; + + let server = self.random_server().ok_or(OutboundProbeError::NoServer)?; + let request_id = self.inner.send_request( &server, DialRequest { @@ -308,7 +289,7 @@ impl<'a> AsClient<'a> { }, ); self.throttled_servers.push((server, Instant::now())); - log::debug!("Send dial-back request to peer {}.", server); + tracing::debug!(peer=%server, "Send dial-back request to peer"); self.ongoing_outbound.insert(request_id, probe_id); Ok(server) } @@ -316,11 +297,8 @@ impl<'a> AsClient<'a> { // Set the delay to the next probe based on the time of our last probe // and the specified delay. fn schedule_next_probe(&mut self, delay: Duration) { - let last_probe_instant = match self.last_probe { - Some(instant) => instant, - None => { - return; - } + let Some(last_probe_instant) = self.last_probe else { + return; }; let schedule_next = *last_probe_instant + delay; self.schedule_probe @@ -359,10 +337,10 @@ impl<'a> AsClient<'a> { return None; } - log::debug!( - "Flipped assumed NAT status from {:?} to {:?}", - self.nat_status, - reported_status + tracing::debug!( + old_status=?self.nat_status, + new_status=?reported_status, + "Flipped assumed NAT status" ); let old_status = self.nat_status.clone(); diff --git a/protocols/autonat/src/behaviour/as_server.rs b/protocols/autonat/src/behaviour/as_server.rs index 063943392f3..878fd713dda 100644 --- a/protocols/autonat/src/behaviour/as_server.rs +++ b/protocols/autonat/src/behaviour/as_server.rs @@ -26,11 +26,11 @@ use instant::Instant; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_identity::PeerId; use libp2p_request_response::{ - self as request_response, InboundFailure, RequestId, ResponseChannel, + self as request_response, InboundFailure, InboundRequestId, ResponseChannel, }; use libp2p_swarm::{ dial_opts::{DialOpts, PeerCondition}, - ConnectionId, DialError, PollParameters, ToSwarm, + ConnectionId, DialError, ToSwarm, }; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -38,7 +38,7 @@ use std::{ }; /// Inbound probe failed. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundProbeError { /// Receiving the dial-back request or sending a response failed. InboundRequest(InboundFailure), @@ -46,7 +46,7 @@ pub enum InboundProbeError { Response(ResponseError), } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundProbeEvent { /// A dial-back request was received from a remote peer. Request { @@ -85,7 +85,7 @@ pub(crate) struct AsServer<'a> { PeerId, ( ProbeId, - RequestId, + InboundRequestId, Vec, ResponseChannel, ), @@ -95,7 +95,6 @@ pub(crate) struct AsServer<'a> { impl<'a> HandleInnerEvent for AsServer<'a> { fn handle_event( &mut self, - _params: &mut impl PollParameters, event: request_response::Event, ) -> VecDeque { match event { @@ -111,9 +110,9 @@ impl<'a> HandleInnerEvent for AsServer<'a> { let probe_id = self.probe_id.next(); match self.resolve_inbound_request(peer, request) { Ok(addrs) => { - log::debug!( - "Inbound dial request from Peer {} with dial-back addresses {:?}.", - peer, + tracing::debug!( + %peer, + "Inbound dial request from peer with dial-back addresses {:?}", addrs ); @@ -141,10 +140,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { ]) } Err((status_text, error)) => { - log::debug!( - "Reject inbound dial request from peer {}: {}.", - peer, - status_text + tracing::debug!( + %peer, + status=%status_text, + "Reject inbound dial request from peer" ); let response = DialResponse { @@ -168,10 +167,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { error, request_id, } => { - log::debug!( - "Inbound Failure {} when on dial-back request from peer {}.", - error, - peer + tracing::debug!( + %peer, + "Inbound Failure {} when on dial-back request from peer", + error ); let probe_id = match self.ongoing_inbound.get(&peer) { @@ -207,10 +206,10 @@ impl<'a> AsServer<'a> { return None; } - log::debug!( - "Dial-back to peer {} succeeded at addr {:?}.", - peer, - address + tracing::debug!( + %peer, + %address, + "Dial-back to peer succeeded" ); let (probe_id, _, _, channel) = self.ongoing_inbound.remove(peer).unwrap(); @@ -233,11 +232,19 @@ impl<'a> AsServer<'a> { error: &DialError, ) -> Option { let (probe_id, _, _, channel) = peer.and_then(|p| self.ongoing_inbound.remove(&p))?; - log::debug!( - "Dial-back to peer {} failed with error {:?}.", - peer.unwrap(), - error - ); + + match peer { + Some(p) => tracing::debug!( + peer=%p, + "Dial-back to peer failed with error {:?}", + error + ), + None => tracing::debug!( + "Dial-back to non existent peer failed with error {:?}", + error + ), + }; + let response_error = ResponseError::DialError; let response = DialResponse { result: Err(response_error.clone()), @@ -319,13 +326,13 @@ impl<'a> AsServer<'a> { demanded: Vec, observed_remote_at: &Multiaddr, ) -> Vec { - let observed_ip = match observed_remote_at + let Some(observed_ip) = observed_remote_at .into_iter() .find(|p| matches!(p, Protocol::Ip4(_) | Protocol::Ip6(_))) - { - Some(ip) => ip, - None => return Vec::new(), + else { + return Vec::new(); }; + let mut distinct = HashSet::new(); demanded .into_iter() @@ -338,7 +345,7 @@ impl<'a> AsServer<'a> { let is_valid = addr.iter().all(|proto| match proto { Protocol::P2pCircuit => false, - Protocol::P2p(hash) => hash == peer.into(), + Protocol::P2p(peer_id) => peer_id == peer, _ => true, }); @@ -346,7 +353,7 @@ impl<'a> AsServer<'a> { return None; } if !addr.iter().any(|p| matches!(p, Protocol::P2p(_))) { - addr.push(Protocol::P2p(peer.into())) + addr.push(Protocol::P2p(peer)) } // Only collect distinct addresses. distinct.insert(addr.clone()).then_some(addr) @@ -380,26 +387,26 @@ mod test { let observed_addr = Multiaddr::empty() .with(observed_ip.clone()) .with(random_port()) - .with(Protocol::P2p(peer_id.into())); + .with(Protocol::P2p(peer_id)); // Valid address with matching peer-id let demanded_1 = Multiaddr::empty() .with(random_ip()) .with(random_port()) - .with(Protocol::P2p(peer_id.into())); + .with(Protocol::P2p(peer_id)); // Invalid because peer_id does not match let demanded_2 = Multiaddr::empty() .with(random_ip()) .with(random_port()) - .with(Protocol::P2p(PeerId::random().into())); + .with(Protocol::P2p(PeerId::random())); // Valid address without peer-id let demanded_3 = Multiaddr::empty().with(random_ip()).with(random_port()); // Invalid because relayed let demanded_4 = Multiaddr::empty() .with(random_ip()) .with(random_port()) - .with(Protocol::P2p(PeerId::random().into())) + .with(Protocol::P2p(PeerId::random())) .with(Protocol::P2pCircuit) - .with(Protocol::P2p(peer_id.into())); + .with(Protocol::P2p(peer_id)); let demanded = vec![ demanded_1.clone(), demanded_2, @@ -413,7 +420,7 @@ mod test { let expected_2 = demanded_3 .replace(0, |_| Some(observed_ip)) .unwrap() - .with(Protocol::P2p(peer_id.into())); + .with(Protocol::P2p(peer_id)); assert_eq!(filtered, vec![expected_1, expected_2]); } } diff --git a/protocols/autonat/src/protocol.rs b/protocols/autonat/src/protocol.rs index a63fd8cdf4d..c1862058400 100644 --- a/protocols/autonat/src/protocol.rs +++ b/protocols/autonat/src/protocol.rs @@ -20,12 +20,13 @@ use crate::proto; use async_trait::async_trait; -use futures::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::{upgrade, Multiaddr}; +use asynchronous_codec::{FramedRead, FramedWrite}; +use futures::io::{AsyncRead, AsyncWrite}; +use futures::{SinkExt, StreamExt}; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use quick_protobuf::{BytesReader, Writer}; use std::{convert::TryFrom, io}; /// The protocol name used for negotiating with multistream-select. @@ -44,8 +45,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let request = DialRequest::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let request = DialRequest::from_proto(message)?; + Ok(request) } @@ -57,8 +62,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let response = DialResponse::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let response = DialResponse::from_proto(message)?; + Ok(response) } @@ -71,8 +80,11 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } async fn write_response( @@ -84,11 +96,18 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } } +fn codec() -> quick_protobuf_codec::Codec { + quick_protobuf_codec::Codec::::new(1024) +} + #[derive(Clone, Debug, Eq, PartialEq)] pub struct DialRequest { pub peer_id: PeerId, @@ -96,31 +115,22 @@ pub struct DialRequest { } impl DialRequest { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } - let (peer_id, addrs) = if let Some(proto::Dial { - peer: - Some(proto::PeerInfo { - id: Some(peer_id), - addrs, - }), - }) = msg.dial - { - (peer_id, addrs) - } else { - log::debug!("Received malformed dial message."); - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "invalid dial message", - )); - }; + + let peer_id_result = msg.dial.and_then(|dial| { + dial.peer.and_then(|peer_info| { + let Some(peer_id) = peer_info.id else { + return None; + }; + Some((peer_id, peer_info.addrs)) + }) + }); + + let (peer_id, addrs) = peer_id_result + .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "invalid dial message"))?; let peer_id = { PeerId::try_from(peer_id.to_vec()) @@ -132,7 +142,7 @@ impl DialRequest { .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) @@ -143,9 +153,7 @@ impl DialRequest { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let peer_id = self.peer_id.to_bytes(); let addrs = self .addresses @@ -153,7 +161,7 @@ impl DialRequest { .map(|addr| addr.to_vec()) .collect(); - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL), dial: Some(proto::Dial { peer: Some(proto::PeerInfo { @@ -162,12 +170,7 @@ impl DialRequest { }), }), dialResponse: None, - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } @@ -200,7 +203,7 @@ impl TryFrom for ResponseError { proto::ResponseStatus::E_BAD_REQUEST => Ok(ResponseError::BadRequest), proto::ResponseStatus::E_INTERNAL_ERROR => Ok(ResponseError::InternalError), proto::ResponseStatus::OK => { - log::debug!("Received response with status code OK but expected error."); + tracing::debug!("Received response with status code OK but expected error"); Err(io::Error::new( io::ErrorKind::InvalidData, "invalid response error type", @@ -217,12 +220,7 @@ pub struct DialResponse { } impl DialResponse { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL_RESPONSE) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } @@ -249,7 +247,7 @@ impl DialResponse { result: Err(ResponseError::try_from(status)?), }, _ => { - log::debug!("Received malformed response message."); + tracing::debug!("Received malformed response message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial response message", @@ -258,9 +256,7 @@ impl DialResponse { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let dial_response = match self.result { Ok(addr) => proto::DialResponse { status: Some(proto::ResponseStatus::OK), @@ -274,23 +270,17 @@ impl DialResponse { }, }; - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL_RESPONSE), dial: None, dialResponse: Some(dial_response), - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } #[cfg(test)] mod tests { use super::*; - use quick_protobuf::MessageWrite; #[test] fn test_request_encode_decode() { @@ -301,8 +291,8 @@ mod tests { "/ip4/192.168.1.42/tcp/30333".parse().unwrap(), ], }; - let bytes = request.clone().into_bytes(); - let request2 = DialRequest::from_bytes(&bytes).unwrap(); + let proto = request.clone().into_proto(); + let request2 = DialRequest::from_proto(proto).unwrap(); assert_eq!(request, request2); } @@ -312,8 +302,8 @@ mod tests { result: Ok("/ip4/8.8.8.8/tcp/30333".parse().unwrap()), status_text: None, }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -323,8 +313,8 @@ mod tests { result: Err(ResponseError::DialError), status_text: Some("dial failed".to_string()), }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -350,11 +340,7 @@ mod tests { dialResponse: None, }; - let mut bytes = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut bytes); - msg.write_message(&mut writer).expect("Encoding to succeed"); - - let request = DialRequest::from_bytes(&bytes).expect("not to fail"); + let request = DialRequest::from_proto(msg).expect("not to fail"); assert_eq!(request.addresses, vec![valid_multiaddr]) } diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 221833c9377..7509d3ef425 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -24,7 +24,7 @@ use libp2p_autonat::{ }; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_swarm::{AddressScore, Swarm, SwarmEvent}; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; @@ -61,7 +61,7 @@ async fn test_auto_probe() { match client.next_behaviour_event().await { Event::OutboundProbe(OutboundProbeEvent::Error { peer, error, .. }) => { assert!(peer.is_none()); - assert_eq!(error, OutboundProbeError::NoAddresses); + assert!(matches!(error, OutboundProbeError::NoAddresses)); } other => panic!("Unexpected behaviour event: {other:?}."), } @@ -70,48 +70,6 @@ async fn test_auto_probe() { assert!(client.behaviour().public_address().is_none()); assert_eq!(client.behaviour().confidence(), 0); - // Test Private NAT Status - - // Artificially add a faulty address. - let unreachable_addr: Multiaddr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); - client.add_external_address(unreachable_addr.clone(), AddressScore::Infinite); - - let id = match client.next_behaviour_event().await { - Event::OutboundProbe(OutboundProbeEvent::Request { probe_id, peer }) => { - assert_eq!(peer, server_id); - probe_id - } - other => panic!("Unexpected behaviour event: {other:?}."), - }; - - match client.next_behaviour_event().await { - Event::OutboundProbe(OutboundProbeEvent::Error { - probe_id, - peer, - error, - }) => { - assert_eq!(peer.unwrap(), server_id); - assert_eq!(probe_id, id); - assert_eq!( - error, - OutboundProbeError::Response(ResponseError::DialError) - ); - } - other => panic!("Unexpected behaviour event: {other:?}."), - } - - match client.next_behaviour_event().await { - Event::StatusChanged { old, new } => { - assert_eq!(old, NatStatus::Unknown); - assert_eq!(new, NatStatus::Private); - } - other => panic!("Unexpected behaviour event: {other:?}."), - } - - assert_eq!(client.behaviour().confidence(), 0); - assert_eq!(client.behaviour().nat_status(), NatStatus::Private); - assert!(client.behaviour().public_address().is_none()); - // Test new public listening address client.listen().await; @@ -142,12 +100,14 @@ async fn test_auto_probe() { } SwarmEvent::Behaviour(Event::StatusChanged { old, new }) => { // Expect to flip status to public - assert_eq!(old, NatStatus::Private); + assert_eq!(old, NatStatus::Unknown); assert!(matches!(new, NatStatus::Public(_))); assert!(new.is_public()); break; } SwarmEvent::IncomingConnection { .. } + | SwarmEvent::ConnectionEstablished { .. } + | SwarmEvent::Dialing { .. } | SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {} other => panic!("Unexpected swarm event: {other:?}."), @@ -195,10 +155,10 @@ async fn test_confidence() { // Randomly test either for public or for private status the confidence. let test_public = rand::random::(); if test_public { - client.listen().await; + client.listen().with_memory_addr_external().await; } else { let unreachable_addr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); - client.add_external_address(unreachable_addr, AddressScore::Infinite); + client.behaviour_mut().probe_address(unreachable_addr); } for i in 0..MAX_CONFIDENCE + 1 { @@ -221,10 +181,10 @@ async fn test_confidence() { peer, error, } if !test_public => { - assert_eq!( + assert!(matches!( error, OutboundProbeError::Response(ResponseError::DialError) - ); + )); (peer.unwrap(), probe_id) } other => panic!("Unexpected Outbound Event: {other:?}"), @@ -301,7 +261,7 @@ async fn test_throttle_server_period() { match client.next_behaviour_event().await { Event::OutboundProbe(OutboundProbeEvent::Error { peer, error, .. }) => { assert!(peer.is_none()); - assert_eq!(error, OutboundProbeError::NoServer); + assert!(matches!(error, OutboundProbeError::NoServer)); } other => panic!("Unexpected behaviour event: {other:?}."), } diff --git a/protocols/autonat/tests/test_server.rs b/protocols/autonat/tests/test_server.rs index 319fd84865d..b0610ef59a4 100644 --- a/protocols/autonat/tests/test_server.rs +++ b/protocols/autonat/tests/test_server.rs @@ -24,7 +24,7 @@ use libp2p_autonat::{ use libp2p_core::{multiaddr::Protocol, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::DialError; -use libp2p_swarm::{AddressScore, Swarm, SwarmEvent}; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::{num::NonZeroU32, time::Duration}; @@ -69,7 +69,7 @@ async fn test_dial_back() { let expect_addr = Multiaddr::empty() .with(Protocol::Ip4(observed_client_ip)) .with(Protocol::Tcp(client_port)) - .with(Protocol::P2p(client_id.into())); + .with(Protocol::P2p(client_id)); let request_probe_id = match server.next_behaviour_event().await { Event::InboundProbe(InboundProbeEvent::Request { peer, @@ -96,6 +96,7 @@ async fn test_dial_back() { num_established, concurrent_dial_errors, established_in: _, + connection_id: _, } => { assert_eq!(peer_id, client_id); assert_eq!(num_established, NonZeroU32::new(2).unwrap()); @@ -103,7 +104,10 @@ async fn test_dial_back() { assert_eq!(address, expect_addr); break; } - SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id), + SwarmEvent::Dialing { + peer_id: Some(peer), + .. + } => assert_eq!(peer, client_id), SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {} other => panic!("Unexpected swarm event: {other:?}."), } @@ -127,10 +131,9 @@ async fn test_dial_back() { async fn test_dial_error() { let (mut server, server_id, server_addr) = new_server_swarm(None).await; let (mut client, client_id) = new_client_swarm(server_id, server_addr).await; - client.add_external_address( - "/ip4/127.0.0.1/tcp/12345".parse().unwrap(), - AddressScore::Infinite, - ); + client + .behaviour_mut() + .probe_address("/ip4/127.0.0.1/tcp/12345".parse().unwrap()); async_std::task::spawn(client.loop_on_next()); let request_probe_id = match server.next_behaviour_event().await { @@ -143,12 +146,15 @@ async fn test_dial_error() { loop { match server.next_swarm_event().await { - SwarmEvent::OutgoingConnectionError { peer_id, error } => { + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { assert_eq!(peer_id.unwrap(), client_id); assert!(matches!(error, DialError::Transport(_))); break; } - SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id), + SwarmEvent::Dialing { + peer_id: Some(peer), + .. + } => assert_eq!(peer, client_id), SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {} other => panic!("Unexpected swarm event: {other:?}."), } @@ -162,7 +168,10 @@ async fn test_dial_error() { }) => { assert_eq!(probe_id, request_probe_id); assert_eq!(peer, client_id); - assert_eq!(error, InboundProbeError::Response(ResponseError::DialError)); + assert!(matches!( + error, + InboundProbeError::Response(ResponseError::DialError) + )); } other => panic!("Unexpected behaviour event: {other:?}."), } @@ -246,10 +255,10 @@ async fn test_throttle_peer_max() { }) => { assert_eq!(client_id, peer); assert_ne!(first_probe_id, probe_id); - assert_eq!( + assert!(matches!( error, InboundProbeError::Response(ResponseError::DialRefused) - ) + )); } other => panic!("Unexpected behaviour event: {other:?}."), }; @@ -267,10 +276,9 @@ async fn test_dial_multiple_addr() { let (mut client, client_id) = new_client_swarm(server_id, server_addr.clone()).await; client.listen().await; - client.add_external_address( - "/ip4/127.0.0.1/tcp/12345".parse().unwrap(), - AddressScore::Infinite, - ); + client + .behaviour_mut() + .probe_address("/ip4/127.0.0.1/tcp/12345".parse().unwrap()); async_std::task::spawn(client.loop_on_next()); let dial_addresses = match server.next_behaviour_event().await { @@ -300,14 +308,17 @@ async fn test_dial_multiple_addr() { let dial_errors = concurrent_dial_errors.unwrap(); // The concurrent dial might not be fast enough to produce a dial error. - if let Some((addr, _)) = dial_errors.get(0) { + if let Some((addr, _)) = dial_errors.first() { assert_eq!(addr, &dial_addresses[0]); } assert_eq!(address, dial_addresses[1]); break; } - SwarmEvent::Dialing(peer) => assert_eq!(peer, client_id), + SwarmEvent::Dialing { + peer_id: Some(peer), + .. + } => assert_eq!(peer, client_id), SwarmEvent::NewListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } => {} other => panic!("Unexpected swarm event: {other:?}."), } diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index 5fb84593488..d3857373658 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,11 +1,25 @@ -## 0.10.0 - unreleased +## 0.11.0 + +- Add `ConnectionId` to `Event::DirectConnectionUpgradeSucceeded` and `Event::DirectConnectionUpgradeFailed`. + See [PR 4558](https://github.com/libp2p/rust-libp2p/pull/4558). +- Exchange address _candidates_ instead of external addresses in `CONNECT`. + If hole-punching wasn't working properly for you until now, this might be the reason why. + See [PR 4624](https://github.com/libp2p/rust-libp2p/pull/4624). +- Simplify public API. + We now only emit a single event: whether the hole-punch was successful or not. + See [PR 4749](https://github.com/libp2p/rust-libp2p/pull/4749). + +## 0.10.0 - Raise MSRV to 1.65. See [PR 3715]. - Remove deprecated items. See [PR 3700]. +- Keep connection alive while we are using it. See [PR 3960]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3700]: https://github.com/libp2p/rust-libp2p/pull/3700 +[PR 3960]: https://github.com/libp2p/rust-libp2p/pull/3960 ## 0.9.1 diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 05cf2a62ec6..cf353f5aef6 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dcutr" edition = "2021" rust-version = { workspace = true } description = "Direct connection upgrade through relay" -version = "0.10.0" +version = "0.11.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,24 +11,25 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" -either = "1.6.0" -futures = "0.3.28" +asynchronous-codec = { workspace = true } +either = "1.9.0" +futures = "0.3.30" futures-timer = "3.0" -instant = "0.1.11" +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } thiserror = "1.0" +tracing = "0.1.37" void = "1" +lru = "0.12.1" +futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -clap = { version = "4.2.7", features = ["derive"] } -env_logger = "0.10.0" +clap = { version = "4.4.11", features = ["derive"] } libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } libp2p-noise = { workspace = true } @@ -36,10 +37,11 @@ libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } libp2p-relay = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros"] } -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -47,3 +49,6 @@ rand = "0.8" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs new file mode 100644 index 00000000000..3742eb512f5 --- /dev/null +++ b/protocols/dcutr/src/behaviour.rs @@ -0,0 +1,379 @@ +// Copyright 2021 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. + +use crate::{handler, protocol}; +use either::Either; +use libp2p_core::connection::ConnectedPoint; +use libp2p_core::multiaddr::Protocol; +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::behaviour::{ConnectionClosed, DialFailure, FromSwarm}; +use libp2p_swarm::dial_opts::{self, DialOpts}; +use libp2p_swarm::{ + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, + THandlerOutEvent, +}; +use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; +use lru::LruCache; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::num::NonZeroUsize; +use std::task::{Context, Poll}; +use thiserror::Error; +use void::Void; + +pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; + +/// The events produced by the [`Behaviour`]. +#[derive(Debug)] +pub struct Event { + pub remote_peer_id: PeerId, + pub result: Result, +} + +#[derive(Debug, Error)] +#[error("Failed to hole-punch connection: {inner}")] +pub struct Error { + inner: InnerError, +} + +#[derive(Debug, Error)] +enum InnerError { + #[error("Giving up after {0} dial attempts")] + AttemptsExceeded(u8), + #[error("Inbound stream error: {0}")] + InboundError(protocol::inbound::Error), + #[error("Outbound stream error: {0}")] + OutboundError(protocol::outbound::Error), +} + +pub struct Behaviour { + /// Queue of actions to return when polled. + queued_events: VecDeque>>, + + /// All direct (non-relayed) connections. + direct_connections: HashMap>, + + address_candidates: Candidates, + + direct_to_relayed_connections: HashMap, + + /// Indexed by the [`ConnectionId`] of the relayed connection and + /// the [`PeerId`] we are trying to establish a direct connection to. + outgoing_direct_connection_attempts: HashMap<(ConnectionId, PeerId), u8>, +} + +impl Behaviour { + pub fn new(local_peer_id: PeerId) -> Self { + Behaviour { + queued_events: Default::default(), + direct_connections: Default::default(), + address_candidates: Candidates::new(local_peer_id), + direct_to_relayed_connections: Default::default(), + outgoing_direct_connection_attempts: Default::default(), + } + } + + fn observed_addresses(&self) -> Vec { + self.address_candidates.iter().cloned().collect() + } + + fn on_dial_failure( + &mut self, + DialFailure { + peer_id, + connection_id: failed_direct_connection, + .. + }: DialFailure, + ) { + let Some(peer_id) = peer_id else { + return; + }; + + let Some(relayed_connection_id) = self + .direct_to_relayed_connections + .get(&failed_direct_connection) + else { + return; + }; + + let Some(attempt) = self + .outgoing_direct_connection_attempts + .get(&(*relayed_connection_id, peer_id)) + else { + return; + }; + + if *attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { + self.queued_events.push_back(ToSwarm::NotifyHandler { + handler: NotifyHandler::One(*relayed_connection_id), + peer_id, + event: Either::Left(handler::relayed::Command::Connect), + }) + } else { + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer_id, + result: Err(Error { + inner: InnerError::AttemptsExceeded(MAX_NUMBER_OF_UPGRADE_ATTEMPTS), + }), + })]); + } + } + + fn on_connection_closed( + &mut self, + ConnectionClosed { + peer_id, + connection_id, + endpoint: connected_point, + .. + }: ConnectionClosed, + ) { + if !connected_point.is_relayed() { + let connections = self + .direct_connections + .get_mut(&peer_id) + .expect("Peer of direct connection to be tracked."); + connections + .remove(&connection_id) + .then_some(()) + .expect("Direct connection to be tracked."); + if connections.is_empty() { + self.direct_connections.remove(&peer_id); + } + } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = Either; + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, + ) -> Result, ConnectionDenied> { + if is_relayed(local_addr) { + let connected_point = ConnectedPoint::Listener { + local_addr: local_addr.clone(), + send_back_addr: remote_addr.clone(), + }; + let mut handler = + handler::relayed::Handler::new(connected_point, self.observed_addresses()); + handler.on_behaviour_event(handler::relayed::Command::Connect); + + return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + } + self.direct_connections + .entry(peer) + .or_default() + .insert(connection_id); + + assert!( + self.direct_to_relayed_connections + .get(&connection_id) + .is_none(), + "state mismatch" + ); + + Ok(Either::Right(dummy::ConnectionHandler)) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: Endpoint, + ) -> Result, ConnectionDenied> { + if is_relayed(addr) { + return Ok(Either::Left(handler::relayed::Handler::new( + ConnectedPoint::Dialer { + address: addr.clone(), + role_override, + }, + self.observed_addresses(), + ))); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. + } + + self.direct_connections + .entry(peer) + .or_default() + .insert(connection_id); + + // Whether this is a connection requested by this behaviour. + if let Some(&relayed_connection_id) = self.direct_to_relayed_connections.get(&connection_id) + { + if role_override == Endpoint::Listener { + assert!( + self.outgoing_direct_connection_attempts + .remove(&(relayed_connection_id, peer)) + .is_some(), + "state mismatch" + ); + } + + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer, + result: Ok(connection_id), + })]); + } + Ok(Either::Right(dummy::ConnectionHandler)) + } + + fn on_connection_handler_event( + &mut self, + event_source: PeerId, + connection_id: ConnectionId, + handler_event: THandlerOutEvent, + ) { + let relayed_connection_id = match handler_event.as_ref() { + Either::Left(_) => connection_id, + Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) { + None => { + // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it. + return; + } + Some(relayed_connection_id) => *relayed_connection_id, + }, + }; + + match handler_event { + Either::Left(handler::relayed::Event::InboundConnectNegotiated { remote_addrs }) => { + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); + + let opts = DialOpts::peer_id(event_source) + .addresses(remote_addrs) + .condition(dial_opts::PeerCondition::Always) + .build(); + + let maybe_direct_connection_id = opts.connection_id(); + + self.direct_to_relayed_connections + .insert(maybe_direct_connection_id, relayed_connection_id); + self.queued_events.push_back(ToSwarm::Dial { opts }); + } + Either::Left(handler::relayed::Event::InboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::InboundError(error), + }), + })); + } + Either::Left(handler::relayed::Event::OutboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::OutboundError(error), + }), + })); + + // Maybe treat these as transient and retry? + } + Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as listener"); + + let opts = DialOpts::peer_id(event_source) + .condition(dial_opts::PeerCondition::Always) + .addresses(remote_addrs) + .override_role() + .build(); + + let maybe_direct_connection_id = opts.connection_id(); + + self.direct_to_relayed_connections + .insert(maybe_direct_connection_id, relayed_connection_id); + *self + .outgoing_direct_connection_attempts + .entry((relayed_connection_id, event_source)) + .or_default() += 1; + self.queued_events.push_back(ToSwarm::Dial { opts }); + } + Either::Right(never) => void::unreachable(never), + }; + } + + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + if let Some(event) = self.queued_events.pop_front() { + return Poll::Ready(event); + } + + Poll::Pending + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::ConnectionClosed(connection_closed) => { + self.on_connection_closed(connection_closed) + } + FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), + FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { + self.address_candidates.add(addr.clone()); + } + _ => {} + } + } +} + +/// Stores our address candidates. +/// +/// We use an [`LruCache`] to favor addresses that are reported more often. +/// When attempting a hole-punch, we will try more frequent addresses first. +/// Most of these addresses will come from observations by other nodes (via e.g. the identify protocol). +/// More common observations mean a more likely stable port-mapping and thus a higher chance of a successful hole-punch. +struct Candidates { + inner: LruCache, + me: PeerId, +} + +impl Candidates { + fn new(me: PeerId) -> Self { + Self { + inner: LruCache::new(NonZeroUsize::new(20).expect("20 > 0")), + me, + } + } + + fn add(&mut self, mut address: Multiaddr) { + if is_relayed(&address) { + return; + } + + if address.iter().last() != Some(Protocol::P2p(self.me)) { + address.push(Protocol::P2p(self.me)); + } + + self.inner.push(address, ()); + } + + fn iter(&self) -> impl Iterator { + self.inner.iter().map(|(a, _)| a) + } +} + +fn is_relayed(addr: &Multiaddr) -> bool { + addr.iter().any(|p| p == Protocol::P2pCircuit) +} diff --git a/protocols/dcutr/src/behaviour_impl.rs b/protocols/dcutr/src/behaviour_impl.rs deleted file mode 100644 index 42920b2f001..00000000000 --- a/protocols/dcutr/src/behaviour_impl.rs +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2021 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. - -use crate::handler; -use either::Either; -use libp2p_core::connection::ConnectedPoint; -use libp2p_core::multiaddr::Protocol; -use libp2p_core::{Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; -use libp2p_swarm::dial_opts::{self, DialOpts}; -use libp2p_swarm::{dummy, ConnectionDenied, ConnectionId, THandler, THandlerOutEvent}; -use libp2p_swarm::{ - ConnectionHandlerUpgrErr, ExternalAddresses, NetworkBehaviour, NotifyHandler, PollParameters, - THandlerInEvent, ToSwarm, -}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::task::{Context, Poll}; -use thiserror::Error; -use void::Void; - -const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; - -/// The events produced by the [`Behaviour`]. -#[derive(Debug)] -pub enum Event { - InitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - local_relayed_addr: Multiaddr, - }, - RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - remote_relayed_addr: Multiaddr, - }, - DirectConnectionUpgradeSucceeded { - remote_peer_id: PeerId, - }, - DirectConnectionUpgradeFailed { - remote_peer_id: PeerId, - error: Error, - }, -} - -#[derive(Debug, Error)] -pub enum Error { - #[error("Failed to dial peer.")] - Dial, - #[error("Failed to establish substream: {0}.")] - Handler(ConnectionHandlerUpgrErr), -} - -pub struct Behaviour { - /// Queue of actions to return when polled. - queued_events: VecDeque>>>, - - /// All direct (non-relayed) connections. - direct_connections: HashMap>, - - external_addresses: ExternalAddresses, - - local_peer_id: PeerId, - - direct_to_relayed_connections: HashMap, - - /// Indexed by the [`ConnectionId`] of the relayed connection and - /// the [`PeerId`] we are trying to establish a direct connection to. - outgoing_direct_connection_attempts: HashMap<(ConnectionId, PeerId), u8>, -} - -impl Behaviour { - pub fn new(local_peer_id: PeerId) -> Self { - Behaviour { - queued_events: Default::default(), - direct_connections: Default::default(), - external_addresses: Default::default(), - local_peer_id, - direct_to_relayed_connections: Default::default(), - outgoing_direct_connection_attempts: Default::default(), - } - } - - fn observed_addreses(&self) -> Vec { - self.external_addresses - .iter() - .cloned() - .filter(|a| !a.iter().any(|p| p == Protocol::P2pCircuit)) - .map(|a| a.with(Protocol::P2p(self.local_peer_id.into()))) - .collect() - } - - fn on_connection_established( - &mut self, - ConnectionEstablished { - peer_id, - connection_id, - endpoint: connected_point, - .. - }: ConnectionEstablished, - ) { - if connected_point.is_relayed() { - if connected_point.is_listener() && !self.direct_connections.contains_key(&peer_id) { - // TODO: Try dialing the remote peer directly. Specification: - // - // > The protocol starts with the completion of a relay connection from A to B. Upon - // observing the new connection, the inbound peer (here B) checks the addresses - // advertised by A via identify. If that set includes public addresses, then A may - // be reachable by a direct connection, in which case B attempts a unilateral - // connection upgrade by initiating a direct connection to A. - // - // https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol - self.queued_events.extend([ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection_id), - event: Either::Left(handler::relayed::Command::Connect { - obs_addrs: self.observed_addreses(), - }), - }, - ToSwarm::GenerateEvent(Event::InitiatedDirectConnectionUpgrade { - remote_peer_id: peer_id, - local_relayed_addr: match connected_point { - ConnectedPoint::Listener { local_addr, .. } => local_addr.clone(), - ConnectedPoint::Dialer { .. } => unreachable!("Due to outer if."), - }, - }), - ]); - } - } else { - self.direct_connections - .entry(peer_id) - .or_default() - .insert(connection_id); - } - } - - fn on_dial_failure( - &mut self, - DialFailure { - peer_id, - connection_id: failed_direct_connection, - .. - }: DialFailure, - ) { - let peer_id = if let Some(peer_id) = peer_id { - peer_id - } else { - return; - }; - - let relayed_connection_id = if let Some(relayed_connection_id) = self - .direct_to_relayed_connections - .get(&failed_direct_connection) - { - *relayed_connection_id - } else { - return; - }; - - let attempt = if let Some(attempt) = self - .outgoing_direct_connection_attempts - .get(&(relayed_connection_id, peer_id)) - { - *attempt - } else { - return; - }; - - if attempt < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { - self.queued_events.push_back(ToSwarm::NotifyHandler { - handler: NotifyHandler::One(relayed_connection_id), - peer_id, - event: Either::Left(handler::relayed::Command::Connect { - obs_addrs: self.observed_addreses(), - }), - }) - } else { - self.queued_events.extend([ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(relayed_connection_id), - event: Either::Left(handler::relayed::Command::UpgradeFinishedDontKeepAlive), - }, - ToSwarm::GenerateEvent(Event::DirectConnectionUpgradeFailed { - remote_peer_id: peer_id, - error: Error::Dial, - }), - ]); - } - } - - fn on_connection_closed( - &mut self, - ConnectionClosed { - peer_id, - connection_id, - endpoint: connected_point, - .. - }: ConnectionClosed<::ConnectionHandler>, - ) { - if !connected_point.is_relayed() { - let connections = self - .direct_connections - .get_mut(&peer_id) - .expect("Peer of direct connection to be tracked."); - connections - .remove(&connection_id) - .then_some(()) - .expect("Direct connection to be tracked."); - if connections.is_empty() { - self.direct_connections.remove(&peer_id); - } - } - } -} - -impl NetworkBehaviour for Behaviour { - type ConnectionHandler = Either< - handler::relayed::Handler, - Either, - >; - type OutEvent = Event; - - fn handle_established_inbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - local_addr: &Multiaddr, - remote_addr: &Multiaddr, - ) -> Result, ConnectionDenied> { - match self - .outgoing_direct_connection_attempts - .remove(&(connection_id, peer)) - { - None => { - let handler = if is_relayed(local_addr) { - Either::Left(handler::relayed::Handler::new(ConnectedPoint::Listener { - local_addr: local_addr.clone(), - send_back_addr: remote_addr.clone(), - })) // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. - } else { - Either::Right(Either::Right(dummy::ConnectionHandler)) - }; - - Ok(handler) - } - Some(_) => { - assert!( - !is_relayed(local_addr), - "`Prototype::DirectConnection` is never created for relayed connection." - ); - - Ok(Either::Right(Either::Left( - handler::direct::Handler::default(), - ))) - } - } - } - - fn handle_established_outbound_connection( - &mut self, - connection_id: ConnectionId, - peer: PeerId, - addr: &Multiaddr, - role_override: Endpoint, - ) -> Result, ConnectionDenied> { - match self - .outgoing_direct_connection_attempts - .remove(&(connection_id, peer)) - { - None => { - let handler = if is_relayed(addr) { - Either::Left(handler::relayed::Handler::new(ConnectedPoint::Dialer { - address: addr.clone(), - role_override, - })) // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. - } else { - Either::Right(Either::Right(dummy::ConnectionHandler)) - }; - - Ok(handler) - } - Some(_) => { - assert!( - !is_relayed(addr), - "`Prototype::DirectConnection` is never created for relayed connection." - ); - - Ok(Either::Right(Either::Left( - handler::direct::Handler::default(), - ))) - } - } - } - - fn on_connection_handler_event( - &mut self, - event_source: PeerId, - connection_id: ConnectionId, - handler_event: THandlerOutEvent, - ) { - let relayed_connection_id = match handler_event.as_ref() { - Either::Left(_) => connection_id, - Either::Right(_) => match self.direct_to_relayed_connections.get(&connection_id) { - None => { - // If the connection ID is unknown to us, it means we didn't create it so ignore any event coming from it. - return; - } - Some(relayed_connection_id) => *relayed_connection_id, - }, - }; - - match handler_event { - Either::Left(handler::relayed::Event::InboundConnectRequest { - inbound_connect, - remote_addr, - }) => { - self.queued_events.extend([ - ToSwarm::NotifyHandler { - handler: NotifyHandler::One(relayed_connection_id), - peer_id: event_source, - event: Either::Left(handler::relayed::Command::AcceptInboundConnect { - inbound_connect, - obs_addrs: self.observed_addreses(), - }), - }, - ToSwarm::GenerateEvent(Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: event_source, - remote_relayed_addr: remote_addr, - }), - ]); - } - Either::Left(handler::relayed::Event::InboundNegotiationFailed { error }) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: event_source, - error: Error::Handler(error), - }, - )); - } - Either::Left(handler::relayed::Event::InboundConnectNegotiated(remote_addrs)) => { - let opts = DialOpts::peer_id(event_source) - .addresses(remote_addrs) - .condition(dial_opts::PeerCondition::Always) - .build(); - - let maybe_direct_connection_id = opts.connection_id(); - - self.direct_to_relayed_connections - .insert(maybe_direct_connection_id, relayed_connection_id); - self.queued_events.push_back(ToSwarm::Dial { opts }); - } - Either::Left(handler::relayed::Event::OutboundNegotiationFailed { error }) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: event_source, - error: Error::Handler(error), - }, - )); - } - Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { - let opts = DialOpts::peer_id(event_source) - .condition(dial_opts::PeerCondition::Always) - .addresses(remote_addrs) - .override_role() - .build(); - - let maybe_direct_connection_id = opts.connection_id(); - - self.direct_to_relayed_connections - .insert(maybe_direct_connection_id, relayed_connection_id); - *self - .outgoing_direct_connection_attempts - .entry((relayed_connection_id, event_source)) - .or_default() += 1; - self.queued_events.push_back(ToSwarm::Dial { opts }); - } - Either::Right(Either::Left(handler::direct::Event::DirectConnectionEstablished)) => { - self.queued_events.extend([ - ToSwarm::NotifyHandler { - peer_id: event_source, - handler: NotifyHandler::One(relayed_connection_id), - event: Either::Left( - handler::relayed::Command::UpgradeFinishedDontKeepAlive, - ), - }, - ToSwarm::GenerateEvent(Event::DirectConnectionUpgradeSucceeded { - remote_peer_id: event_source, - }), - ]); - } - Either::Right(Either::Right(never)) => void::unreachable(never), - }; - } - - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { - if let Some(event) = self.queued_events.pop_front() { - return Poll::Ready(event); - } - - Poll::Pending - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - self.external_addresses.on_swarm_event(&event); - - match event { - FromSwarm::ConnectionEstablished(connection_established) => { - self.on_connection_established(connection_established) - } - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } -} - -fn is_relayed(addr: &Multiaddr) -> bool { - addr.iter().any(|p| p == Protocol::P2pCircuit) -} diff --git a/protocols/dcutr/src/handler.rs b/protocols/dcutr/src/handler.rs index 0339b9654d3..d679c1d9d22 100644 --- a/protocols/dcutr/src/handler.rs +++ b/protocols/dcutr/src/handler.rs @@ -18,5 +18,4 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -pub(crate) mod direct; pub(crate) mod relayed; diff --git a/protocols/dcutr/src/handler/direct.rs b/protocols/dcutr/src/handler/direct.rs deleted file mode 100644 index aab212483eb..00000000000 --- a/protocols/dcutr/src/handler/direct.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! [`ConnectionHandler`] handling direct connection upgraded through a relayed connection. - -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_swarm::handler::ConnectionEvent; -use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - SubstreamProtocol, -}; -use std::task::{Context, Poll}; -use void::Void; - -#[derive(Debug)] -pub enum Event { - DirectConnectionEstablished, -} - -#[derive(Default)] -pub struct Handler { - reported: bool, -} - -impl ConnectionHandler for Handler { - type InEvent = void::Void; - type OutEvent = Event; - type Error = ConnectionHandlerUpgrErr; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = Void; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn on_behaviour_event(&mut self, _: Self::InEvent) {} - - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - - fn poll( - &mut self, - _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - if !self.reported { - self.reported = true; - return Poll::Ready(ConnectionHandlerEvent::Custom( - Event::DirectConnectionEstablished, - )); - } - Poll::Pending - } - - fn on_connection_event( - &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, - ) { - match event { - ConnectionEvent::FullyNegotiatedInbound(_) - | ConnectionEvent::FullyNegotiatedOutbound(_) - | ConnectionEvent::DialUpgradeError(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::AddressChange(_) => {} - } - } -} diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 3b48303d2e9..eba58f89313 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -20,140 +20,72 @@ //! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. -use crate::protocol; +use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; +use crate::{protocol, PROTOCOL_NAME}; use either::Either; use futures::future; -use futures::future::{BoxFuture, FutureExt}; -use instant::Instant; use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::{DeniedUpgrade, NegotiationError, UpgradeError}; +use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_core::ConnectedPoint; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; +use protocol::{inbound, outbound}; use std::collections::VecDeque; -use std::fmt; +use std::io; use std::task::{Context, Poll}; use std::time::Duration; +#[derive(Debug)] pub enum Command { - Connect { - obs_addrs: Vec, - }, - AcceptInboundConnect { - obs_addrs: Vec, - inbound_connect: Box, - }, - /// Upgrading the relayed connection to a direct connection either failed for good or succeeded. - /// There is no need to keep the relayed connection alive for the sake of upgrading to a direct - /// connection. - UpgradeFinishedDontKeepAlive, -} - -impl fmt::Debug for Command { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Command::Connect { obs_addrs } => f - .debug_struct("Command::Connect") - .field("obs_addrs", obs_addrs) - .finish(), - Command::AcceptInboundConnect { - obs_addrs, - inbound_connect: _, - } => f - .debug_struct("Command::AcceptInboundConnect") - .field("obs_addrs", obs_addrs) - .finish(), - Command::UpgradeFinishedDontKeepAlive => f - .debug_struct("Command::UpgradeFinishedDontKeepAlive") - .finish(), - } - } + Connect, } +#[derive(Debug)] pub enum Event { - InboundConnectRequest { - inbound_connect: Box, - remote_addr: Multiaddr, - }, - InboundNegotiationFailed { - error: ConnectionHandlerUpgrErr, - }, - InboundConnectNegotiated(Vec), - OutboundNegotiationFailed { - error: ConnectionHandlerUpgrErr, - }, - OutboundConnectNegotiated { - remote_addrs: Vec, - }, -} - -impl fmt::Debug for Event { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Event::InboundConnectRequest { - inbound_connect: _, - remote_addr, - } => f - .debug_struct("Event::InboundConnectRequest") - .field("remote_addrs", remote_addr) - .finish(), - Event::InboundNegotiationFailed { error } => f - .debug_struct("Event::InboundNegotiationFailed") - .field("error", error) - .finish(), - Event::InboundConnectNegotiated(addrs) => f - .debug_tuple("Event::InboundConnectNegotiated") - .field(addrs) - .finish(), - Event::OutboundNegotiationFailed { error } => f - .debug_struct("Event::OutboundNegotiationFailed") - .field("error", error) - .finish(), - Event::OutboundConnectNegotiated { remote_addrs } => f - .debug_struct("Event::OutboundConnectNegotiated") - .field("remote_addrs", remote_addrs) - .finish(), - } - } + InboundConnectNegotiated { remote_addrs: Vec }, + OutboundConnectNegotiated { remote_addrs: Vec }, + InboundConnectFailed { error: inbound::Error }, + OutboundConnectFailed { error: outbound::Error }, } pub struct Handler { endpoint: ConnectedPoint, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - ConnectionHandlerUpgrErr< - Either, - >, - >, /// Queue of events to return when polled. queued_events: VecDeque< ConnectionHandlerEvent< ::OutboundProtocol, ::OutboundOpenInfo, - ::OutEvent, - ::Error, + ::ToBehaviour, >, >, - /// Inbound connect, accepted by the behaviour, pending completion. - inbound_connect: - Option, protocol::inbound::UpgradeError>>>, - keep_alive: KeepAlive, + + // Inbound DCUtR handshakes + inbound_stream: futures_bounded::FuturesSet, inbound::Error>>, + + // Outbound DCUtR handshake. + outbound_stream: futures_bounded::FuturesSet, outbound::Error>>, + + /// The addresses we will send to the other party for hole-punching attempts. + holepunch_candidates: Vec, + + attempts: u8, } impl Handler { - pub fn new(endpoint: ConnectedPoint) -> Self { + pub fn new(endpoint: ConnectedPoint, holepunch_candidates: Vec) -> Self { Self { endpoint, - pending_error: Default::default(), queued_events: Default::default(), - inbound_connect: Default::default(), - keep_alive: KeepAlive::Until(Instant::now() + Duration::from_secs(30)), + inbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), + outbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), + holepunch_candidates, + attempts: 0, } } @@ -167,17 +99,20 @@ impl Handler { >, ) { match output { - future::Either::Left(inbound_connect) => { - let remote_addr = match &self.endpoint { - ConnectedPoint::Dialer { address, role_override: _ } => address.clone(), - ConnectedPoint::Listener { ..} => unreachable!("`::listen_protocol` denies all incoming substreams as a listener."), - }; - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundConnectRequest { - inbound_connect: Box::new(inbound_connect), - remote_addr, - }, - )); + future::Either::Left(stream) => { + if self + .inbound_stream + .try_push(inbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() + { + tracing::warn!( + "New inbound connect stream while still upgrading previous one. Replacing previous with new.", + ); + } + self.attempts += 1; } // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. future::Either::Right(output) => void::unreachable(output), @@ -187,8 +122,7 @@ impl Handler { fn on_fully_negotiated_outbound( &mut self, FullyNegotiatedOutbound { - protocol: protocol::outbound::Connect { obs_addrs }, - .. + protocol: stream, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, @@ -198,11 +132,18 @@ impl Handler { self.endpoint.is_listener(), "A connection dialer never initiates a connection upgrade." ); - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundConnectNegotiated { - remote_addrs: obs_addrs, - }, - )); + if self + .outbound_stream + .try_push(outbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() + { + tracing::warn!( + "New outbound connect stream while still upgrading previous one. Replacing previous with new.", + ); + } } fn on_listen_upgrade_error( @@ -212,45 +153,7 @@ impl Handler { ::InboundProtocol, >, ) { - match error { - ConnectionHandlerUpgrErr::Timeout => { - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundNegotiationFailed { - error: ConnectionHandlerUpgrErr::Timeout, - }, - )); - } - ConnectionHandlerUpgrErr::Timer => { - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundNegotiationFailed { - error: ConnectionHandlerUpgrErr::Timer, - }, - )); - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - // The remote merely doesn't support the DCUtR protocol. - // This is no reason to close the connection, which may - // successfully communicate with other protocols already. - self.keep_alive = KeepAlive::No; - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundNegotiationFailed { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), - }, - )); - } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error.map_upgrade_err(|e| { - e.map_err(|e| match e { - Either::Left(e) => Either::Left(e), - Either::Right(v) => void::unreachable(v), - }) - })); - } - } + void::unreachable(error.into_inner()); } fn on_dial_upgrade_error( @@ -260,52 +163,32 @@ impl Handler { ::OutboundProtocol, >, ) { - self.keep_alive = KeepAlive::No; + let error = match error { + StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::NegotiationFailed => outbound::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound::Error::Io(e), + StreamUpgradeError::Timeout => outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }; - match error { - ConnectionHandlerUpgrErr::Timeout => { - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundNegotiationFailed { - error: ConnectionHandlerUpgrErr::Timeout, - }, - )); - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - // The remote merely doesn't support the DCUtR protocol. - // This is no reason to close the connection, which may - // successfully communicate with other protocols already. - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundNegotiationFailed { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), - }, - )); - } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error.map_upgrade_err(|e| e.map_err(Either::Right))); - } - } + self.queued_events + .push_back(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) } } impl ConnectionHandler for Handler { - type InEvent = Command; - type OutEvent = Event; - type Error = ConnectionHandlerUpgrErr< - Either, - >; - type InboundProtocol = Either; - type OutboundProtocol = protocol::outbound::Upgrade; + type FromBehaviour = Command; + type ToBehaviour = Event; + type InboundProtocol = Either, DeniedUpgrade>; + type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { match self.endpoint { ConnectedPoint::Dialer { .. } => { - SubstreamProtocol::new(Either::Left(protocol::inbound::Upgrade {}), ()) + SubstreamProtocol::new(Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), ()) } ConnectedPoint::Listener { .. } => { // By the protocol specification the listening side of a relayed connection @@ -318,78 +201,82 @@ impl ConnectionHandler for Handler { } } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { - Command::Connect { obs_addrs } => { + Command::Connect => { self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - protocol::outbound::Upgrade::new(obs_addrs), - (), - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), }); - } - Command::AcceptInboundConnect { - inbound_connect, - obs_addrs, - } => { - if self - .inbound_connect - .replace(inbound_connect.accept(obs_addrs).boxed()) - .is_some() - { - log::warn!( - "New inbound connect stream while still upgrading previous one. \ - Replacing previous with new.", - ); - } - } - Command::UpgradeFinishedDontKeepAlive => { - self.keep_alive = KeepAlive::No; + self.attempts += 1; } } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn connection_keep_alive(&self) -> bool { + if self.attempts < MAX_NUMBER_OF_UPGRADE_ATTEMPTS { + return true; + } + + false } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - if let Some(Poll::Ready(result)) = self.inbound_connect.as_mut().map(|f| f.poll_unpin(cx)) { - self.inbound_connect = None; - match result { - Ok(addresses) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( - Event::InboundConnectNegotiated(addresses), - )); - } - Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Close( - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Left(e))), - )) - } + match self.inbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { + error: inbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) + } + Poll::Pending => {} + } + + match self.outbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { + error: outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) } + Poll::Pending => {} } Poll::Pending @@ -417,7 +304,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) => {} + _ => {} } } } diff --git a/protocols/dcutr/src/lib.rs b/protocols/dcutr/src/lib.rs index 6001c9144e7..7c5d28aba19 100644 --- a/protocols/dcutr/src/lib.rs +++ b/protocols/dcutr/src/lib.rs @@ -23,7 +23,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod behaviour_impl; // TODO: Rename back `behaviour` once deprecation symbols are removed. +mod behaviour; mod handler; mod protocol; @@ -33,13 +33,11 @@ mod proto { pub(crate) use self::holepunch::pb::{mod_HolePunch::*, HolePunch}; } -pub use behaviour_impl::Behaviour; -pub use behaviour_impl::Error; -pub use behaviour_impl::Event; +pub use behaviour::{Behaviour, Error, Event}; pub use protocol::PROTOCOL_NAME; pub mod inbound { - pub use crate::protocol::inbound::UpgradeError; + pub use crate::protocol::inbound::ProtocolViolation; } pub mod outbound { - pub use crate::protocol::outbound::UpgradeError; + pub use crate::protocol::outbound::ProtocolViolation; } diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 83fa926a550..b8f90daf3a1 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -20,114 +20,91 @@ use crate::proto; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; +use futures::prelude::*; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade {} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); + + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; + + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + }; + + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + tracing::debug!(address=%a, "Dropping relayed address"); + false + } else { + true + } + }) + .collect(); - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = PendingConnect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - fn upgrade_inbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - async move { - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; - - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; + stream.send(msg).await?; + let proto::HolePunch { type_pb, .. } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } - - Ok(PendingConnect { - substream, - remote_obs_addrs: obs_addrs, - }) - } - .boxed() + if !matches!(type_pb, proto::Type::SYNC) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)); } -} -pub struct PendingConnect { - substream: Framed>, - remote_obs_addrs: Vec, + Ok(obs_addrs) } -impl PendingConnect { - pub async fn accept( - mut self, - local_obs_addrs: Vec, - ) -> Result, UpgradeError> { - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: local_obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; - - self.substream.send(msg).await?; - let proto::HolePunch { type_pb, .. } = self - .substream - .next() - .await - .ok_or(UpgradeError::StreamClosed)??; - - match type_pb { - proto::Type::CONNECT => return Err(UpgradeError::UnexpectedTypeConnect), - proto::Type::SYNC => {} - } +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} - Ok(self.remote_obs_addrs) +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected at least one address in reservation.")] NoAddresses, #[error("Failed to parse response type field.")] diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 00b16e20617..d9cb60a01f6 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -19,115 +19,102 @@ // DEALINGS IN THE SOFTWARE. use crate::proto; +use crate::PROTOCOL_NAME; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; +use futures::prelude::*; use futures_timer::Delay; use instant::Instant; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade { - obs_addrs: Vec, -} +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) - } -} + stream.send(msg).await?; -impl Upgrade { - pub fn new(obs_addrs: Vec) -> Self { - Self { obs_addrs } - } -} + let sent_time = Instant::now(); -impl upgrade::OutboundUpgrade for Upgrade { - type Output = Connect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - fn upgrade_outbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let rtt = sent_time.elapsed(); - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: self.obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); + } - async move { - substream.send(msg).await?; + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + } - let sent_time = Instant::now(); + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + tracing::debug!(address=%a, "Dropping relayed address"); + false + } else { + true + } + }) + .collect(); - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; + let msg = proto::HolePunch { + type_pb: proto::Type::SYNC, + ObsAddrs: vec![], + }; - let rtt = sent_time.elapsed(); + stream.send(msg).await?; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } + Delay::new(rtt / 2).await; - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; - - let msg = proto::HolePunch { - type_pb: proto::Type::SYNC, - ObsAddrs: vec![], - }; - - substream.send(msg).await?; - - Delay::new(rtt / 2).await; - - Ok(Connect { obs_addrs }) - } - .boxed() - } + Ok(obs_addrs) } -pub struct Connect { - pub obs_addrs: Vec, +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Remote does not support the `{PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index ba190782b12..9e1f0591e6d 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -22,36 +22,43 @@ use libp2p_core::multiaddr::{Multiaddr, Protocol}; use libp2p_core::transport::upgrade::Version; use libp2p_core::transport::{MemoryTransport, Transport}; use libp2p_dcutr as dcutr; +use libp2p_identify as identify; use libp2p_identity as identity; use libp2p_identity::PeerId; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut relay = build_relay(); let mut dst = build_client(); let mut src = build_client(); - // Have all swarms listen on a local memory address. - let (relay_addr, _) = relay.listen().await; - let (dst_addr, _) = dst.listen().await; + // Have all swarms listen on a local TCP address. + let (_, relay_tcp_addr) = relay.listen().with_tcp_addr_external().await; + let (_, dst_tcp_addr) = dst.listen().await; src.listen().await; + assert!(src.external_addresses().next().is_none()); + assert!(dst.external_addresses().next().is_none()); + let relay_peer_id = *relay.local_peer_id(); let dst_peer_id = *dst.local_peer_id(); async_std::task::spawn(relay.loop_on_next()); - let dst_relayed_addr = relay_addr - .with(Protocol::P2p(relay_peer_id.into())) + let dst_relayed_addr = relay_tcp_addr + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) - .with(Protocol::P2p(dst_peer_id.into())); + .with(Protocol::P2p(dst_peer_id)); dst.listen_on(dst_relayed_addr.clone()).unwrap(); wait_for_reservation( @@ -65,54 +72,62 @@ async fn connect() { src.dial_and_wait(dst_relayed_addr.clone()).await; - loop { - match src - .next_swarm_event() - .await - .try_into_behaviour_event() - .unwrap() - { - ClientEvent::Dcutr(dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id, - remote_relayed_addr, - }) => { - if remote_peer_id == dst_peer_id && remote_relayed_addr == dst_relayed_addr { - break; - } - } - other => panic!("Unexpected event: {other:?}."), - } - } - - let dst_addr = dst_addr.with(Protocol::P2p(dst_peer_id.into())); - - src.wait(move |e| match e { - SwarmEvent::ConnectionEstablished { endpoint, .. } => { - (*endpoint.get_remote_address() == dst_addr).then_some(()) - } - _ => None, - }) - .await; + let dst_addr = dst_tcp_addr.with(Protocol::P2p(dst_peer_id)); + + let established_conn_id = src + .wait(move |e| match e { + SwarmEvent::ConnectionEstablished { + endpoint, + connection_id, + .. + } => (*endpoint.get_remote_address() == dst_addr).then_some(connection_id), + _ => None, + }) + .await; + + let reported_conn_id = src + .wait(move |e| match e { + SwarmEvent::Behaviour(ClientEvent::Dcutr(dcutr::Event { + result: Ok(connection_id), + .. + })) => Some(connection_id), + _ => None, + }) + .await; + + assert_eq!(established_conn_id, reported_conn_id); } -fn build_relay() -> Swarm { +fn build_relay() -> Swarm { Swarm::new_ephemeral(|identity| { let local_peer_id = identity.public().to_peer_id(); - relay::Behaviour::new( - local_peer_id, - relay::Config { - reservation_duration: Duration::from_secs(2), - ..Default::default() - }, - ) + Relay { + relay: relay::Behaviour::new( + local_peer_id, + relay::Config { + reservation_duration: Duration::from_secs(2), + ..Default::default() + }, + ), + identify: identify::Behaviour::new(identify::Config::new( + "/relay".to_owned(), + identity.public(), + )), + } }) } +#[derive(NetworkBehaviour)] +#[behaviour(prelude = "libp2p_swarm::derive_prelude")] +struct Relay { + relay: relay::Behaviour, + identify: identify::Behaviour, +} + fn build_client() -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); let (relay_transport, behaviour) = relay::client::new(local_peer_id); @@ -120,48 +135,31 @@ fn build_client() -> Swarm { .or_transport(MemoryTransport::default()) .or_transport(libp2p_tcp::async_io::Transport::default()) .upgrade(Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new(&local_key)) .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::without_executor( + Swarm::new( transport, Client { relay: behaviour, dcutr: dcutr::Behaviour::new(local_peer_id), + identify: identify::Behaviour::new(identify::Config::new( + "/client".to_owned(), + local_key.public(), + )), }, local_peer_id, + Config::with_async_std_executor(), ) - .build() } #[derive(NetworkBehaviour)] -#[behaviour( - out_event = "ClientEvent", - event_process = false, - prelude = "libp2p_swarm::derive_prelude" -)] +#[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Client { relay: relay::client::Behaviour, dcutr: dcutr::Behaviour, -} - -#[derive(Debug)] -enum ClientEvent { - Relay(relay::client::Event), - Dcutr(dcutr::Event), -} - -impl From for ClientEvent { - fn from(event: relay::client::Event) -> Self { - ClientEvent::Relay(event) - } -} - -impl From for ClientEvent { - fn from(event: dcutr::Event) -> Self { - ClientEvent::Dcutr(event) - } + identify: identify::Behaviour, } async fn wait_for_reservation( @@ -172,14 +170,16 @@ async fn wait_for_reservation( ) { let mut new_listen_addr_for_relayed_addr = false; let mut reservation_req_accepted = false; + let mut addr_observed = false; + loop { + if new_listen_addr_for_relayed_addr && reservation_req_accepted && addr_observed { + break; + } + match client.next_swarm_event().await { - SwarmEvent::NewListenAddr { address, .. } if address != client_addr => {} SwarmEvent::NewListenAddr { address, .. } if address == client_addr => { new_listen_addr_for_relayed_addr = true; - if reservation_req_accepted { - break; - } } SwarmEvent::Behaviour(ClientEvent::Relay( relay::client::Event::ReservationReqAccepted { @@ -189,12 +189,20 @@ async fn wait_for_reservation( }, )) if relay_peer_id == peer_id && renewal == is_renewal => { reservation_req_accepted = true; - if new_listen_addr_for_relayed_addr { - break; - } } - SwarmEvent::Dialing(peer_id) if peer_id == relay_peer_id => {} + SwarmEvent::Dialing { + peer_id: Some(peer_id), + .. + } if peer_id == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == relay_peer_id => {} + SwarmEvent::Behaviour(ClientEvent::Identify(identify::Event::Received { .. })) => { + addr_observed = true; + } + SwarmEvent::Behaviour(ClientEvent::Identify(_)) => {} + SwarmEvent::NewExternalAddrCandidate { .. } => {} + SwarmEvent::ExternalAddrConfirmed { address } if !is_renewal => { + assert_eq!(address, client_addr); + } e => panic!("{e:?}"), } } diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index 06059ff1dbb..8e3cb70ddf1 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,4 +1,9 @@ -## 0.43.0 - unreleased +## 0.44.0 + +- Change publish to require `data: impl Into` to internally avoid any costly cloning / allocation. + See [PR 4754](https://github.com/libp2p/rust-libp2p/pull/4754). + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 00fa57c0272..9376a91cf9a 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-floodsub" edition = "2021" rust-version = { workspace = true } description = "Floodsub protocol for libp2p" -version = "0.43.0" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,19 +11,20 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } cuckoofilter = "0.5.0" fnv = "1.0" -futures = "0.3.28" +bytes = "1.5" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" -smallvec = "1.6.1" -thiserror = "1.0.40" +smallvec = "1.11.2" +thiserror = "1.0.51" +tracing = "0.1.37" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -31,3 +32,6 @@ thiserror = "1.0.40" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index a3673a13ed1..35711408a8d 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -24,16 +24,16 @@ use crate::protocol::{ }; use crate::topic::Topic; use crate::FloodsubConfig; +use bytes::Bytes; use cuckoofilter::{CuckooError, CuckooFilter}; use fnv::FnvHashSet; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::{ - dial_opts::DialOpts, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, - OneShotHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, + NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::warn; use smallvec::SmallVec; use std::collections::hash_map::{DefaultHasher, HashMap}; use std::task::{Context, Poll}; @@ -146,9 +146,8 @@ impl Floodsub { /// /// Returns true if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: Topic) -> bool { - let pos = match self.subscribed_topics.iter().position(|t| *t == topic) { - Some(pos) => pos, - None => return false, + let Some(pos) = self.subscribed_topics.iter().position(|t| *t == topic) else { + return false; }; self.subscribed_topics.remove(pos); @@ -171,12 +170,12 @@ impl Floodsub { } /// Publishes a message to the network, if we're subscribed to the topic only. - pub fn publish(&mut self, topic: impl Into, data: impl Into>) { + pub fn publish(&mut self, topic: impl Into, data: impl Into) { self.publish_many(iter::once(topic), data) } /// Publishes a message to the network, even if we're not subscribed to the topic. - pub fn publish_any(&mut self, topic: impl Into, data: impl Into>) { + pub fn publish_any(&mut self, topic: impl Into, data: impl Into) { self.publish_many_any(iter::once(topic), data) } @@ -187,7 +186,7 @@ impl Floodsub { pub fn publish_many( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, ) { self.publish_many_inner(topic, data, true) } @@ -196,7 +195,7 @@ impl Floodsub { pub fn publish_many_any( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, ) { self.publish_many_inner(topic, data, false) } @@ -204,7 +203,7 @@ impl Floodsub { fn publish_many_inner( &mut self, topic: impl IntoIterator>, - data: impl Into>, + data: impl Into, check_self_subscriptions: bool, ) { let message = FloodsubMessage { @@ -223,7 +222,7 @@ impl Floodsub { .any(|t| message.topics.iter().any(|u| t == u)); if self_subscribed { if let Err(e @ CuckooError::NotEnoughSpace) = self.received.add(&message) { - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -307,7 +306,7 @@ impl Floodsub { peer_id, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if remaining_established > 0 { // we only care about peer disconnections @@ -329,7 +328,7 @@ impl Floodsub { impl NetworkBehaviour for Floodsub { type ConnectionHandler = OneShotHandler; - type OutEvent = FloodsubEvent; + type ToSwarm = FloodsubEvent; fn handle_established_inbound_connection( &mut self, @@ -354,13 +353,21 @@ impl NetworkBehaviour for Floodsub { fn on_connection_handler_event( &mut self, propagation_source: PeerId, - _connection_id: ConnectionId, + connection_id: ConnectionId, event: THandlerOutEvent, ) { // We ignore successful sends or timeouts. let event = match event { - InnerMessage::Rx(event) => event, - InnerMessage::Sent => return, + Ok(InnerMessage::Rx(event)) => event, + Ok(InnerMessage::Sent) => return, + Err(e) => { + tracing::debug!("Failed to send floodsub message: {e}"); + self.events.push_back(ToSwarm::CloseConnection { + peer_id: propagation_source, + connection: CloseConnection::One(connection_id), + }); + return; + } }; // Update connected peers topics @@ -406,7 +413,7 @@ impl NetworkBehaviour for Floodsub { Ok(false) => continue, // Message already existed. Err(e @ CuckooError::NotEnoughSpace) => { // Message added, but some other removed. - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -466,11 +473,8 @@ impl NetworkBehaviour for Floodsub { } } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } @@ -478,7 +482,7 @@ impl NetworkBehaviour for Floodsub { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -486,16 +490,7 @@ impl NetworkBehaviour for Floodsub { FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } } diff --git a/protocols/floodsub/src/protocol.rs b/protocols/floodsub/src/protocol.rs index ebd3d8b3bc8..edc842be8ce 100644 --- a/protocols/floodsub/src/protocol.rs +++ b/protocols/floodsub/src/protocol.rs @@ -21,6 +21,7 @@ use crate::proto; use crate::topic::Topic; use asynchronous_codec::Framed; +use bytes::Bytes; use futures::{ io::{AsyncRead, AsyncWrite}, Future, @@ -81,7 +82,7 @@ where messages.push(FloodsubMessage { source: PeerId::from_bytes(&publish.from.unwrap_or_default()) .map_err(|_| FloodsubError::InvalidPeerId)?, - data: publish.data.unwrap_or_default(), + data: publish.data.unwrap_or_default().into(), sequence_number: publish.seqno.unwrap_or_default(), topics: publish.topic_ids.into_iter().map(Topic::new).collect(), }); @@ -172,7 +173,7 @@ impl FloodsubRpc { .into_iter() .map(|msg| proto::Message { from: Some(msg.source.to_bytes()), - data: Some(msg.data), + data: Some(msg.data.to_vec()), seqno: Some(msg.sequence_number), topic_ids: msg.topics.into_iter().map(|topic| topic.into()).collect(), }) @@ -197,7 +198,7 @@ pub struct FloodsubMessage { pub source: PeerId, /// Content of the message. Its meaning is out of scope of this library. - pub data: Vec, + pub data: Bytes, /// An incrementing sequence number. pub sequence_number: Vec, diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index b3885d66600..5ff4cfa27d6 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,46 @@ -## 0.45.0 - unreleased +## 0.46.1 + +- Deprecate `Rpc` in preparation for removing it from the public API because it is an internal type. + See [PR 4833](https://github.com/libp2p/rust-libp2p/pull/4833). + +## 0.46.0 + +- Remove `fast_message_id_fn` mechanism from `Config`. + See [PR 4285](https://github.com/libp2p/rust-libp2p/pull/4285). +- Remove deprecated `gossipsub::Config::idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4642](https://github.com/libp2p/rust-libp2p/pull/4642). +- Return typed error from config builder. + See [PR 4445](https://github.com/libp2p/rust-libp2p/pull/4445). +- Process outbound stream before inbound stream in `EnabledHandler::poll(..)`. + See [PR 4778](https://github.com/libp2p/rust-libp2p/pull/4778). + +## 0.45.2 + +- Deprecate `gossipsub::Config::idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4648]. + + + +[PR 4648]: (https://github.com/libp2p/rust-libp2p/pull/4648) + + + +## 0.45.1 + +- Add getter function to o btain `TopicScoreParams`. + See [PR 4231]. + +[PR 4231]: https://github.com/libp2p/rust-libp2p/pull/4231 + +## 0.45.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 9cc46dc3664..9264a866482 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -3,49 +3,53 @@ name = "libp2p-gossipsub" edition = "2021" rust-version = { workspace = true } description = "Gossipsub protocol for libp2p" -version = "0.45.0" +version = "0.46.1" authors = ["Age Manning "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] +[features] +wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] + [dependencies] -either = "1.5" -libp2p-swarm = { workspace = true } -libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } -bytes = "1.4" -byteorder = "1.3.4" +asynchronous-codec = { workspace = true } +base64 = "0.21.5" +byteorder = "1.5.0" +bytes = "1.5" +either = "1.9" fnv = "1.0.7" -futures = "0.3.28" -rand = "0.8" -asynchronous-codec = "0.6" -unsigned-varint = { version = "0.7.0", features = ["asynchronous_codec"] } -log = "0.4.11" -sha2 = "0.10.0" -base64 = "0.21.0" -smallvec = "1.6.1" +futures = "0.3.30" +futures-ticker = "0.0.3" +getrandom = "0.2.11" +hex_fmt = "0.3.0" +instant = "0.1.12" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-swarm = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } -hex_fmt = "0.3.0" -regex = "1.8.1" +rand = "0.8" +regex = "1.10.2" serde = { version = "1", optional = true, features = ["derive"] } -wasm-timer = "0.2.5" -instant = "0.1.11" +sha2 = "0.10.8" +smallvec = "1.11.2" +tracing = "0.1.37" void = "1.0.2" + # Metrics dependencies -prometheus-client = "0.20.0" +prometheus-client = { workspace = true } [dev-dependencies] async-std = { version = "1.6.3", features = ["unstable"] } -env_logger = "0.10.0" hex = "0.4.2" libp2p-core = { workspace = true } -libp2p-mplex = { workspace = true } +libp2p-yamux = { workspace = true } libp2p-noise = { workspace = true } -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -53,3 +57,6 @@ quickcheck = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/gossipsub/src/backoff.rs b/protocols/gossipsub/src/backoff.rs index e6f05047a3e..b4a40b91a74 100644 --- a/protocols/gossipsub/src/backoff.rs +++ b/protocols/gossipsub/src/backoff.rs @@ -20,13 +20,13 @@ //! Data structure for efficiently storing known back-off's when pruning peers. use crate::topic::TopicHash; +use instant::Instant; use libp2p_identity::PeerId; use std::collections::{ hash_map::{Entry, HashMap}, HashSet, }; use std::time::Duration; -use wasm_timer::Instant; #[derive(Copy, Clone)] struct HeartbeatIndex(usize); @@ -86,12 +86,7 @@ impl BackoffStorage { backoffs_by_heartbeat[index].insert(pair); HeartbeatIndex(index) }; - match self - .backoffs - .entry(topic.clone()) - .or_insert_with(HashMap::new) - .entry(*peer) - { + match self.backoffs.entry(topic.clone()).or_default().entry(*peer) { Entry::Occupied(mut o) => { let (backoff, index) = o.get(); if backoff < &instant { diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index c2d6b8da3fe..24a32de4cc7 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -30,20 +30,20 @@ use std::{ }; use futures::StreamExt; -use log::{debug, error, trace, warn}; +use futures_ticker::Ticker; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; +use instant::Instant; use libp2p_core::{multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, Endpoint, Multiaddr}; use libp2p_identity::Keypair; use libp2p_identity::PeerId; use libp2p_swarm::{ behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; -use wasm_timer::Instant; use crate::backoff::BackoffStorage; use crate::config::{Config, ValidationMode}; @@ -54,20 +54,19 @@ use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; use crate::protocol::SIGNING_PREFIX; use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; -use crate::time_cache::{DuplicateCache, TimeCache}; +use crate::time_cache::DuplicateCache; use crate::topic::{Hasher, Topic, TopicHash}; use crate::transform::{DataTransform, IdentityTransform}; use crate::types::{ - ControlAction, FastMessageId, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, - Subscription, SubscriptionAction, + ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, + SubscriptionAction, }; -use crate::types::{PeerConnections, PeerKind, Rpc}; +use crate::types::{PeerConnections, PeerKind, RpcOut}; use crate::{rpc_proto::proto, TopicScoreParams}; use crate::{PublishError, SubscriptionError, ValidationError}; use instant::SystemTime; use quick_protobuf::{MessageWrite, Writer}; use std::{cmp::Ordering::Equal, fmt::Debug}; -use wasm_timer::Interval; #[cfg(test)] mod tests; @@ -289,7 +288,7 @@ pub struct Behaviour { mcache: MessageCache, /// Heartbeat interval stream. - heartbeat: Interval, + heartbeat: Ticker, /// Number of heartbeats since the beginning of time; this allows us to amortize some resource /// clean up -- eg backoff clean up. @@ -307,7 +306,7 @@ pub struct Behaviour { /// Stores optional peer score data together with thresholds, decay interval and gossip /// promises. - peer_score: Option<(PeerScore, PeerScoreThresholds, Interval, GossipPromises)>, + peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, /// Counts the number of `IHAVE` received from each peer since the last heartbeat. count_received_ihave: HashMap, @@ -323,9 +322,6 @@ pub struct Behaviour { /// our own messages back if the messages are anonymous or use a random author. published_message_ids: DuplicateCache, - /// Short term cache for fast message ids mapping them to the real message ids - fast_message_id_cache: TimeCache, - /// The filter used to handle message subscriptions. subscription_filter: F, @@ -446,7 +442,6 @@ where control_pool: HashMap::new(), publish_config: privacy.into(), duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), - fast_message_id_cache: TimeCache::new(config.duplicate_cache_time()), topic_peers: HashMap::new(), peer_topics: HashMap::new(), explicit_peers: HashSet::new(), @@ -460,9 +455,9 @@ where config.backoff_slack(), ), mcache: MessageCache::new(config.history_gossip(), config.history_length()), - heartbeat: Interval::new_at( - Instant::now() + config.heartbeat_initial_delay(), + heartbeat: Ticker::new_with_next( config.heartbeat_interval(), + config.heartbeat_initial_delay(), ), heartbeat_ticks: 0, px_peers: HashSet::new(), @@ -527,41 +522,28 @@ where /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { - debug!("Subscribing to topic: {}", topic); + tracing::debug!(%topic, "Subscribing to topic"); let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); } if self.mesh.get(&topic_hash).is_some() { - debug!("Topic: {} is already in the mesh.", topic); + tracing::debug!(%topic, "Topic is already in the mesh"); return Ok(false); } // send subscription request to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - debug!("Sending SUBSCRIBE to peer: {:?}", peer); - self.send_message(peer, event.clone()) - .map_err(SubscriptionError::PublishError)?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); + let event = RpcOut::Subscribe(topic_hash.clone()); + self.send_message(peer, event); } // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); - debug!("Subscribed to topic: {}", topic); + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -569,39 +551,27 @@ where /// /// Returns [`Ok(true)`] if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: &Topic) -> Result { - debug!("Unsubscribing from topic: {}", topic); + tracing::debug!(%topic, "Unsubscribing from topic"); let topic_hash = topic.hash(); if self.mesh.get(&topic_hash).is_none() { - debug!("Already unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); // we are not subscribed return Ok(false); } // announce to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Unsubscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - debug!("Sending UNSUBSCRIBE to peer: {}", peer.to_string()); - self.send_message(peer, event.clone())?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); + let event = RpcOut::Unsubscribe(topic_hash.clone()); + self.send_message(peer, event); } // call LEAVE(topic) // this will remove the topic from the mesh self.leave(&topic_hash); - debug!("Unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); Ok(true) } @@ -629,15 +599,8 @@ where topic: raw_message.topic.clone(), }); - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![raw_message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); - // check that the size doesn't exceed the max transmission size - if event.get_size() > self.config.max_transmit_size() { + if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { return Err(PublishError::MessageTooLarge); } @@ -645,34 +608,71 @@ where if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already // been published on the network. - warn!( - "Not publishing a message that has already been published. Msg-id {}", - msg_id + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" ); return Err(PublishError::Duplicate); } - trace!("Publishing message: {:?}", msg_id); + tracing::trace!(message=%msg_id, "Publishing message"); let topic_hash = raw_message.topic.clone(); - // If we are not flood publishing forward the message to mesh peers. - let mesh_peers_sent = !self.config.flood_publish() - && self.forward_msg(&msg_id, raw_message.clone(), None, HashSet::new())?; - let mut recipient_peers = HashSet::new(); if let Some(set) = self.topic_peers.get(&topic_hash) { if self.config.flood_publish() { // Forward to all peers above score and all explicit peers - recipient_peers.extend( - set.iter() - .filter(|p| { - self.explicit_peers.contains(*p) - || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 - }) - .cloned(), - ); + recipient_peers.extend(set.iter().filter(|p| { + self.explicit_peers.contains(*p) + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 + })); } else { + match self.mesh.get(&raw_message.topic) { + // Mesh peers + Some(mesh_peers) => { + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // If we have fanout peers add them to the map. + if self.fanout.contains_key(&topic_hash) { + for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + recipient_peers.insert(*peer); + } + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let mesh_n = self.config.mesh_n(); + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + &topic_hash, + mesh_n, + { + |p| { + !self.explicit_peers.contains(p) + && !self + .score_below_threshold(p, |pst| { + pst.publish_threshold + }) + .0 + } + }, + ); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + // Explicit peers for peer in &self.explicit_peers { if set.contains(peer) { @@ -690,54 +690,17 @@ where recipient_peers.insert(*peer); } } - - // Gossipsub peers - if self.mesh.get(&topic_hash).is_none() { - debug!("Topic: {:?} not in the mesh", topic_hash); - // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { - recipient_peers.insert(*peer); - } - } else { - // We have no fanout peers, select mesh_n of them and add them to the fanout - let mesh_n = self.config.mesh_n(); - let new_peers = get_random_peers( - &self.topic_peers, - &self.connected_peers, - &topic_hash, - mesh_n, - { - |p| { - !self.explicit_peers.contains(p) - && !self - .score_below_threshold(p, |pst| pst.publish_threshold) - .0 - } - }, - ); - // Add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - debug!("Peer added to fanout: {:?}", peer); - recipient_peers.insert(peer); - } - } - // We are publishing to fanout peers - update the time we published - self.fanout_last_pub - .insert(topic_hash.clone(), Instant::now()); - } } } - if recipient_peers.is_empty() && !mesh_peers_sent { + if recipient_peers.is_empty() { return Err(PublishError::InsufficientPeers); } // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. self.duplicate_cache.insert(msg_id.clone()); - self.mcache.put(&msg_id, raw_message); + self.mcache.put(&msg_id, raw_message.clone()); // If the message is anonymous or has a random author add it to the published message ids // cache. @@ -748,17 +711,12 @@ where } // Send to peers we know are subscribed to the topic. - let msg_bytes = event.get_size(); for peer_id in recipient_peers.iter() { - trace!("Sending message to peer: {:?}", peer_id); - self.send_message(*peer_id, event.clone())?; - - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&topic_hash, msg_bytes); - } + tracing::trace!(peer=%peer_id, "Sending message to peer"); + self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); } - debug!("Published message: {:?}", &msg_id); + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { metrics.register_published_message(&topic_hash); @@ -799,9 +757,9 @@ where (raw_message.clone(), originating_peers) } None => { - warn!( - "Message not in cache. Ignoring forwarding. Message Id: {}", - msg_id + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" ); if let Some(metrics) = self.metrics.as_mut() { metrics.memcache_miss(); @@ -846,14 +804,14 @@ where } Ok(true) } else { - warn!("Rejected message not in cache. Message Id: {}", msg_id); + tracing::warn!(message=%msg_id, "Rejected message not in cache"); Ok(false) } } /// Adds a new peer to the list of explicitly connected peers. pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Adding explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Adding explicit peer"); self.explicit_peers.insert(*peer_id); @@ -863,7 +821,7 @@ where /// This removes the peer from explicitly connected peers, note that this does not disconnect /// the peer. pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Removing explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Removing explicit peer"); self.explicit_peers.remove(peer_id); } @@ -871,14 +829,14 @@ where /// created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.insert(*peer_id) { - debug!("Peer has been blacklisted: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); } } /// Removes a peer from the blacklist if it has previously been blacklisted. pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.remove(peer_id) { - debug!("Peer has been removed from the blacklist: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); } } @@ -908,7 +866,7 @@ where return Err("Peer score set twice".into()); } - let interval = Interval::new(params.decay_interval); + let interval = Ticker::new(params.decay_interval); let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); Ok(()) @@ -930,6 +888,11 @@ where } } + /// Returns a scoring parameters for a topic if existent. + pub fn get_topic_params(&self, topic: &Topic) -> Option<&TopicScoreParams> { + self.peer_score.as_ref()?.0.get_topic_params(&topic.hash()) + } + /// Sets the application specific score for a peer. Returns true if scoring is active and /// the peer is connected or if the score of the peer is not yet expired, false otherwise. pub fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { @@ -942,11 +905,11 @@ where /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. fn join(&mut self, topic_hash: &TopicHash) { - debug!("Running JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); // if we are already in the mesh, return if self.mesh.contains_key(topic_hash) { - debug!("JOIN: The topic is already in the mesh, ignoring JOIN"); + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); return; } @@ -959,9 +922,9 @@ where // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, // removing the fanout entry. if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { - debug!( - "JOIN: Removing peers from the fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" ); // remove explicit peers, peers with negative scores, and backoffed peers @@ -974,11 +937,12 @@ where // Add up to mesh_n of them them to the mesh // NOTE: These aren't randomly added, currently FIFO let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); - debug!( - "JOIN: Adding {:?} peers from the fanout for topic: {:?}", - add_peers, topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers ); - added_peers.extend(peers.iter().cloned().take(add_peers)); + added_peers.extend(peers.iter().take(add_peers)); self.mesh.insert( topic_hash.clone(), @@ -1011,14 +975,11 @@ where ); added_peers.extend(new_peers.clone()); // add them to the mesh - debug!( + tracing::debug!( "JOIN: Inserting {:?} random peers into the mesh", new_peers.len() ); - let mesh_peers = self - .mesh - .entry(topic_hash.clone()) - .or_insert_with(Default::default); + let mesh_peers = self.mesh.entry(topic_hash.clone()).or_default(); mesh_peers.extend(new_peers); } @@ -1029,7 +990,7 @@ where for peer_id in added_peers { // Send a GRAFT control message - debug!("JOIN: Sending Graft message to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(&peer_id, topic_hash.clone()); } @@ -1057,7 +1018,7 @@ where m.set_mesh_peers(topic_hash, mesh_peers) } - debug!("Completed JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); } /// Creates a PRUNE gossipsub action. @@ -1074,7 +1035,7 @@ where match self.connected_peers.get(peer).map(|v| &v.kind) { Some(PeerKind::Floodsub) => { - error!("Attempted to prune a Floodsub peer"); + tracing::error!("Attempted to prune a Floodsub peer"); } Some(PeerKind::Gossipsub) => { // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway @@ -1085,7 +1046,7 @@ where }; } None => { - error!("Attempted to Prune an unknown peer"); + tracing::error!("Attempted to Prune an unknown peer"); } _ => {} // Gossipsub 1.1 peer perform the `Prune` } @@ -1124,7 +1085,7 @@ where /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. fn leave(&mut self, topic_hash: &TopicHash) { - debug!("Running LEAVE for topic {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); // If our mesh contains the topic, send prune to peers and delete it from the mesh if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { @@ -1133,7 +1094,7 @@ where } for peer in peers { // Send a PRUNE control message - debug!("LEAVE: Sending PRUNE to peer: {:?}", peer); + tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); let on_unsubscribe = true; let control = self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); @@ -1150,14 +1111,14 @@ where ); } } - debug!("Completed LEAVE for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); } /// Checks if the given peer is still connected and if not dials the peer again. fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { if !self.peer_topics.contains_key(peer_id) { // Connect to peer - debug!("Connecting to explicit peer {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); self.events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer_id).build(), }); @@ -1175,7 +1136,7 @@ where } fn score_below_threshold_from_scores( - peer_score: &Option<(PeerScore, PeerScoreThresholds, Interval, GossipPromises)>, + peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, peer_id: &PeerId, threshold: impl Fn(&PeerScoreThresholds) -> f64, ) -> (bool, f64) { @@ -1195,9 +1156,10 @@ where fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { // We ignore IHAVE gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IHAVE: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" ); return; } @@ -1206,25 +1168,27 @@ where let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); *peer_have += 1; if *peer_have > self.config.max_ihave_messages() { - debug!( - "IHAVE: peer {} has advertised too many times ({}) within this heartbeat \ + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ interval; ignoring", - peer_id, *peer_have + *peer_have ); return; } if let Some(iasked) = self.count_sent_iwant.get(peer_id) { if *iasked >= self.config.max_ihave_length() { - debug!( - "IHAVE: peer {} has already advertised too many messages ({}); ignoring", - peer_id, *iasked + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked ); return; } } - trace!("Handling IHAVE for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); let mut iwant_ids = HashSet::new(); @@ -1246,9 +1210,9 @@ where for (topic, ids) in ihave_msgs { // only process the message if we are subscribed if !self.mesh.contains_key(&topic) { - debug!( - "IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?}", - topic + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" ); continue; } @@ -1272,11 +1236,11 @@ where } // Send the list of IWANT control messages - debug!( - "IHAVE: Asking for {} out of {} messages from {}", + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", iask, - iwant_ids.len(), - peer_id + iwant_ids.len() ); // Ask in random order @@ -1299,9 +1263,9 @@ where Instant::now() + self.config.iwant_followup_time(), ); } - trace!( - "IHAVE: Asking for the following messages from {}: {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", iwant_ids_vec ); @@ -1313,7 +1277,7 @@ where }, ); } - trace!("Completed IHAVE handling for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); } /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is @@ -1321,68 +1285,43 @@ where fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { // We ignore IWANT gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IWANT: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score ); return; } - debug!("Handling IWANT for peer: {:?}", peer_id); - // build a hashmap of available messages - let mut cached_messages = HashMap::new(); + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); for id in iwant_msgs { - // If we have it and the IHAVE count is not above the threshold, add it do the - // cached_messages mapping - if let Some((msg, count)) = self.mcache.get_with_iwant_counts(&id, peer_id) { + // If we have it and the IHAVE count is not above the threshold, + // foward the message. + if let Some((msg, count)) = self + .mcache + .get_with_iwant_counts(&id, peer_id) + .map(|(msg, count)| (msg.clone(), count)) + { if count > self.config.gossip_retransimission() { - debug!( - "IWANT: Peer {} has asked for message {} too many times; ignoring \ - request", - peer_id, &id + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" ); } else { - cached_messages.insert(id.clone(), msg.clone()); + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); + self.send_message(*peer_id, RpcOut::Forward(msg)); } } } - - if !cached_messages.is_empty() { - debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); - // Send the messages to the peer - let message_list: Vec<_> = cached_messages.into_iter().map(|entry| entry.1).collect(); - - let topics = message_list - .iter() - .map(|message| message.topic.clone()) - .collect::>(); - - let message = Rpc { - subscriptions: Vec::new(), - messages: message_list, - control_msgs: Vec::new(), - } - .into_protobuf(); - - let msg_bytes = message.get_size(); - - if self.send_message(*peer_id, message).is_err() { - error!("Failed to send cached messages. Messages too large"); - } else if let Some(m) = self.metrics.as_mut() { - // Sending of messages succeeded, register them on the internal metrics. - for topic in topics.iter() { - m.msg_sent(topic, msg_bytes); - } - } - } - debug!("Completed IWANT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); } /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, /// responds with PRUNE messages. fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling GRAFT message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); let mut to_prune_topics = HashSet::new(); @@ -1403,7 +1342,7 @@ where // we don't GRAFT to/from explicit peers; complain loudly if this happens if self.explicit_peers.contains(peer_id) { - warn!("GRAFT: ignoring request from direct peer {}", peer_id); + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics to_prune_topics = topics.into_iter().collect(); // but don't PX @@ -1415,9 +1354,10 @@ where if let Some(peers) = self.mesh.get_mut(&topic_hash) { // if the peer is already in the mesh ignore the graft if peers.contains(peer_id) { - debug!( - "GRAFT: Received graft for peer {:?} that is already in topic {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" ); continue; } @@ -1426,9 +1366,9 @@ where if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) { if backoff_time > now { - warn!( - "[Penalty] Peer attempted graft within backoff time, penalizing {}", - peer_id + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" ); // add behavioural penalty if let Some((peer_score, ..)) = &mut self.peer_score { @@ -1459,10 +1399,11 @@ where // check the score if below_zero { // we don't GRAFT peers with negative score - debug!( - "GRAFT: ignoring peer {:?} with negative score [score = {}, \ - topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" ); // we do send them PRUNE however, because it's a matter of protocol correctness to_prune_topics.insert(topic_hash.clone()); @@ -1481,9 +1422,10 @@ where } // add peer to the mesh - debug!( - "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" ); if peers.insert(*peer_id) { @@ -1508,9 +1450,10 @@ where } else { // don't do PX when there is an unknown topic to avoid leaking our peers do_px = false; - debug!( - "GRAFT: Received graft for unknown topic {:?} from peer {:?}", - &topic_hash, peer_id + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" ); // spam hardening: ignore GRAFTs for unknown topics continue; @@ -1521,29 +1464,20 @@ where if !to_prune_topics.is_empty() { // build the prune messages to send let on_unsubscribe = false; - let prune_messages = to_prune_topics + for action in to_prune_topics .iter() .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) - .collect(); + .collect::>() + { + self.send_message(*peer_id, RpcOut::Control(action)); + } // Send the prune messages to the peer - debug!( - "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {}", - peer_id + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" ); - - if let Err(e) = self.send_message( - *peer_id, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: prune_messages, - } - .into_protobuf(), - ) { - error!("Failed to send PRUNE: {:?}", e); - } } - debug!("Completed GRAFT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } fn remove_peer_from_mesh( @@ -1558,10 +1492,10 @@ where if let Some(peers) = self.mesh.get_mut(topic_hash) { // remove the peer if it exists in the mesh if peers.remove(peer_id) { - debug!( - "PRUNE: Removing peer: {} from the mesh for topic: {}", - peer_id.to_string(), - topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_removed(topic_hash, reason, 1) @@ -1601,7 +1535,7 @@ where peer_id: &PeerId, prune_data: Vec<(TopicHash, Vec, Option)>, ) { - debug!("Handling PRUNE message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); let (below_threshold, score) = self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); for (topic_hash, px, backoff) in prune_data { @@ -1612,10 +1546,11 @@ where if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { - debug!( - "PRUNE: ignoring PX from peer {:?} with insufficient score \ - [score ={} topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" ); continue; } @@ -1632,7 +1567,7 @@ where } } } - debug!("Completed PRUNE handling for peer: {}", peer_id.to_string()); + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); } fn px_connect(&mut self, mut px: Vec) { @@ -1672,17 +1607,17 @@ where raw_message: &mut RawMessage, propagation_source: &PeerId, ) -> bool { - debug!( - "Handling message: {:?} from peer: {}", - msg_id, - propagation_source.to_string() + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" ); // Reject any message from a blacklisted peer if self.blacklisted_peers.contains(propagation_source) { - debug!( - "Rejecting message from blacklisted peer: {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" ); if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { peer_score.reject_message( @@ -1699,9 +1634,10 @@ where // Also reject any message that originated from a blacklisted peer if let Some(source) = raw_message.source.as_ref() { if self.blacklisted_peers.contains(source) { - debug!( - "Rejecting message from peer {} because of blacklisted source: {}", - propagation_source, source + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" ); self.handle_invalid_message( propagation_source, @@ -1729,9 +1665,10 @@ where }; if self_published { - debug!( - "Dropping message {} claiming to be from self but forwarded from {}", - msg_id, propagation_source + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" ); self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); return false; @@ -1753,36 +1690,11 @@ where metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); } - let fast_message_id = self.config.fast_message_id(&raw_message); - - if let Some(fast_message_id) = fast_message_id.as_ref() { - if let Some(msg_id) = self.fast_message_id_cache.get(fast_message_id) { - let msg_id = msg_id.clone(); - // Report the duplicate - if self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { - if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.duplicated_message( - propagation_source, - &msg_id, - &raw_message.topic, - ); - } - // Update the cache, informing that we have received a duplicate from another peer. - // The peers in this cache are used to prevent us forwarding redundant messages onto - // these peers. - self.mcache.observe_duplicate(&msg_id, propagation_source); - } - - // This message has been seen previously. Ignore it - return; - } - } - // Try and perform the data transform to the message. If it fails, consider it invalid. let message = match self.data_transform.inbound_transform(raw_message.clone()) { Ok(message) => message, Err(e) => { - debug!("Invalid message. Transform error: {:?}", e); + tracing::debug!("Invalid message. Transform error: {:?}", e); // Reject the message and return self.handle_invalid_message( propagation_source, @@ -1803,25 +1715,17 @@ where return; } - // Add the message to the duplicate caches - if let Some(fast_message_id) = fast_message_id { - // add id to cache - self.fast_message_id_cache - .entry(fast_message_id) - .or_insert_with(|| msg_id.clone()); - } - if !self.duplicate_cache.insert(msg_id.clone()) { - debug!("Message already received, ignoring. Message: {}", msg_id); + tracing::debug!(message=%msg_id, "Message already received, ignoring"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); return; } - debug!( - "Put message {:?} in duplicate_cache and resolve promises", - msg_id + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" ); // Record the received message with the metrics @@ -1841,7 +1745,7 @@ where // Dispatch the message to the user if we are subscribed to any of the topics if self.mesh.contains_key(&message.topic) { - debug!("Sending received message to user"); + tracing::debug!("Sending received message to user"); self.events .push_back(ToSwarm::GenerateEvent(Event::Message { propagation_source: *propagation_source, @@ -1849,9 +1753,9 @@ where message, })); } else { - debug!( - "Received message on a topic we are not subscribed to: {:?}", - message.topic + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" ); return; } @@ -1867,9 +1771,9 @@ where ) .is_err() { - error!("Failed to forward message. Too large"); + tracing::error!("Failed to forward message. Too large"); } - debug!("Completed message handling for message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Completed message handling for message"); } } @@ -1885,20 +1789,17 @@ where metrics.register_invalid_message(&raw_message.topic); } - let fast_message_id_cache = &self.fast_message_id_cache; + if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { + let message_id = self.config.message_id(&message); - if let Some(msg_id) = self - .config - .fast_message_id(raw_message) - .and_then(|id| fast_message_id_cache.get(&id)) - { peer_score.reject_message( propagation_source, - msg_id, - &raw_message.topic, + &message_id, + &message.topic, reject_reason, ); - gossip_promises.reject_message(msg_id, &reject_reason); + + gossip_promises.reject_message(&message_id, &reject_reason); } else { // The message is invalid, we reject it ignoring any gossip promises. If a peer is // advertising this message via an IHAVE and it's invalid it will be double @@ -1914,23 +1815,20 @@ where subscriptions: &[Subscription], propagation_source: &PeerId, ) { - debug!( - "Handling subscriptions: {:?}, from source: {}", + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", subscriptions, - propagation_source.to_string() ); let mut unsubscribed_peers = Vec::new(); - let subscribed_topics = match self.peer_topics.get_mut(propagation_source) { - Some(topics) => topics, - None => { - error!( - "Subscription by unknown peer: {}", - propagation_source.to_string() - ); - return; - } + let Some(subscribed_topics) = self.peer_topics.get_mut(propagation_source) else { + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" + ); + return; }; // Collect potential graft topics for the peer. @@ -1945,10 +1843,10 @@ where { Ok(topics) => topics, Err(s) => { - error!( - "Subscription filter error: {}; ignoring RPC from peer {}", - s, - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s ); return; } @@ -1957,18 +1855,15 @@ where for subscription in filtered_topics { // get the peers from the mapping, or insert empty lists if the topic doesn't exist let topic_hash = &subscription.topic_hash; - let peer_list = self - .topic_peers - .entry(topic_hash.clone()) - .or_insert_with(Default::default); + let peer_list = self.topic_peers.entry(topic_hash.clone()).or_default(); match subscription.action { SubscriptionAction::Subscribe => { if peer_list.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" ); } @@ -1997,19 +1892,19 @@ where if peers.len() < self.config.mesh_n_low() && peers.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Subscribed, 1) } // send graft to the peer - debug!( - "Sending GRAFT to peer {} for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" ); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(propagation_source, topic_hash.clone()); @@ -2026,10 +1921,10 @@ where } SubscriptionAction::Unsubscribe => { if peer_list.remove(propagation_source) { - debug!( - "SUBSCRIPTION: Removing gossip peer: {} from topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" ); } @@ -2069,23 +1964,12 @@ where // If we need to send grafts to peer, do so immediately, rather than waiting for the // heartbeat. - if !topics_to_graft.is_empty() - && self - .send_message( - *propagation_source, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: topics_to_graft - .into_iter() - .map(|topic_hash| ControlAction::Graft { topic_hash }) - .collect(), - } - .into_protobuf(), - ) - .is_err() + for action in topics_to_graft + .into_iter() + .map(|topic_hash| ControlAction::Graft { topic_hash }) + .collect::>() { - error!("Failed sending grafts. Message too large"); + self.send_message(*propagation_source, RpcOut::Control(action)) } // Notify the application of the subscriptions @@ -2093,9 +1977,9 @@ where self.events.push_back(event); } - trace!( - "Completed handling subscriptions from source: {:?}", - propagation_source + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" ); } @@ -2113,7 +1997,7 @@ where /// Heartbeat function which shifts the memcache and updates the mesh. fn heartbeat(&mut self) { - debug!("Starting heartbeat"); + tracing::debug!("Starting heartbeat"); let start = Instant::now(); self.heartbeat_ticks += 1; @@ -2169,10 +2053,11 @@ where } if peer_score < 0.0 { - debug!( - "HEARTBEAT: Prune peer {:?} with negative score [score = {}, topic = \ - {}]", - peer_id, peer_score, topic_hash + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" ); let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); @@ -2192,9 +2077,9 @@ where // too little peers - add some if peers.len() < self.config.mesh_n_low() { - debug!( - "HEARTBEAT: Mesh low. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_low() ); @@ -2217,7 +2102,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) } @@ -2226,9 +2111,9 @@ where // too many peers - remove some if peers.len() > self.config.mesh_n_high() { - debug!( - "HEARTBEAT: Mesh high. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_high() ); @@ -2236,7 +2121,7 @@ where // shuffle the peers and then sort by score ascending beginning with the worst let mut rng = thread_rng(); - let mut shuffled = peers.iter().cloned().collect::>(); + let mut shuffled = peers.iter().copied().collect::>(); shuffled.shuffle(&mut rng); shuffled.sort_by(|p1, p2| { let score_p1 = *scores.get(p1).unwrap_or(&0.0); @@ -2311,7 +2196,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) } @@ -2378,9 +2263,10 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!( - "Opportunistically graft in topic {} with peers {:?}", - topic_hash, peer_list + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) @@ -2401,9 +2287,9 @@ where let fanout_ttl = self.config.fanout_ttl(); self.fanout_last_pub.retain(|topic_hash, last_pub_time| { if *last_pub_time + fanout_ttl < Instant::now() { - debug!( - "HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" ); fanout.remove(topic_hash); return false; @@ -2426,9 +2312,9 @@ where match self.peer_topics.get(peer) { Some(topics) => { if !topics.contains(topic_hash) || peer_score < publish_threshold { - debug!( - "HEARTBEAT: Peer removed from fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" ); to_remove_peers.push(*peer); } @@ -2445,7 +2331,7 @@ where // not enough peers if peers.len() < self.config.mesh_n() { - debug!( + tracing::debug!( "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", peers.len(), self.config.mesh_n() @@ -2468,7 +2354,7 @@ where } if self.peer_score.is_some() { - trace!("Mesh message deliveries: {:?}", { + tracing::trace!("Mesh message deliveries: {:?}", { self.mesh .iter() .map(|(t, peers)| { @@ -2507,7 +2393,7 @@ where // shift the memcache self.mcache.shift(); - debug!("Completed Heartbeat"); + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); metrics.observe_heartbeat_duration(duration); @@ -2527,7 +2413,7 @@ where // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list if message_ids.len() > self.config.max_ihave_length() { // we do the truncation (with shuffling) per peer below - debug!( + tracing::debug!( "too many messages for gossip; will truncate IHAVE list ({} messages)", message_ids.len() ); @@ -2556,7 +2442,7 @@ where }, ); - debug!("Gossiping IHAVE to {} peers.", to_msg_peers.len()); + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); for peer in to_msg_peers { let mut peer_message_ids = message_ids.clone(); @@ -2609,12 +2495,9 @@ where &self.connected_peers, ); } - let mut control_msgs: Vec = topics - .iter() - .map(|topic_hash| ControlAction::Graft { - topic_hash: topic_hash.clone(), - }) - .collect(); + let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { + topic_hash: topic_hash.clone(), + }); // If there are prunes associated with the same peer add them. // NOTE: In this case a peer has been added to a topic mesh, and removed from another. @@ -2622,52 +2505,37 @@ where // of its removal from another. // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; - if let Some(topics) = to_prune.remove(&peer) { - let mut prunes = topics - .iter() - .map(|topic_hash| { - self.make_prune( - topic_hash, - &peer, - self.config.do_px() && !no_px.contains(&peer), - on_unsubscribe, - ) - }) - .collect::>(); - control_msgs.append(&mut prunes); - } + let prunes = to_prune + .remove(&peer) + .into_iter() + .flatten() + .map(|topic_hash| { + self.make_prune( + &topic_hash, + &peer, + self.config.do_px() && !no_px.contains(&peer), + false, + ) + }); // send the control messages - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send control messages. Message too large"); + for msg in control_msgs.chain(prunes).collect::>() { + self.send_message(peer, RpcOut::Control(msg)); } } // handle the remaining prunes // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; for (peer, topics) in to_prune.iter() { - let mut remaining_prunes = Vec::new(); for topic_hash in topics { let prune = self.make_prune( topic_hash, peer, self.config.do_px() && !no_px.contains(peer), - on_unsubscribe, + false, ); - remaining_prunes.push(prune); + self.send_message(*peer, RpcOut::Control(prune)); + // inform the handler peer_removed_from_mesh( *peer, @@ -2678,21 +2546,6 @@ where &self.connected_peers, ); } - - if self - .send_message( - *peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: remaining_prunes, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send prune messages. Message too large"); - } } } @@ -2713,7 +2566,7 @@ where } } - debug!("Forwarding message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Forwarding message"); let mut recipient_peers = HashSet::new(); { @@ -2749,22 +2602,13 @@ where // forward the message to peers if !recipient_peers.is_empty() { - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); + let event = RpcOut::Forward(message.clone()); - let msg_bytes = event.get_size(); for peer in recipient_peers.iter() { - debug!("Sending message: {:?} to peer {:?}", msg_id, peer); - self.send_message(*peer, event.clone())?; - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&message.topic, msg_bytes); - } + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); + self.send_message(*peer, event.clone()); } - debug!("Completed forwarding message"); + tracing::debug!("Completed forwarding message"); Ok(true) } else { Ok(false) @@ -2788,7 +2632,7 @@ where let signature = { let message = proto::Message { - from: Some(author.clone().to_bytes()), + from: Some(author.to_bytes()), data: Some(data.clone()), seqno: Some(sequence_number.to_be_bytes().to_vec()), topic: topic.clone().into_string(), @@ -2869,28 +2713,14 @@ where peer: PeerId, control: ControlAction, ) { - control_pool - .entry(peer) - .or_insert_with(Vec::new) - .push(control); + control_pool.entry(peer).or_default().push(control); } /// Takes each control action mapping and turns it into a message fn flush_control_pool(&mut self) { for (peer, controls) in self.control_pool.drain().collect::>() { - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: controls, - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to flush control pool. Message too large"); + for msg in controls { + self.send_message(peer, RpcOut::Control(msg)); } } @@ -2898,144 +2728,21 @@ where self.pending_iwant_msgs.clear(); } - /// Send a [`Rpc`] message to a peer. This will wrap the message in an arc if it + /// Send a [`RpcOut`] message to a peer. This will wrap the message in an arc if it /// is not already an arc. - fn send_message(&mut self, peer_id: PeerId, message: proto::RPC) -> Result<(), PublishError> { - // If the message is oversized, try and fragment it. If it cannot be fragmented, log an - // error and drop the message (all individual messages should be small enough to fit in the - // max_transmit_size) - - let messages = self.fragment_message(message)?; - - for message in messages { - self.events.push_back(ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(message), - handler: NotifyHandler::Any, - }) - } - Ok(()) - } - - // If a message is too large to be sent as-is, this attempts to fragment it into smaller RPC - // messages to be sent. - fn fragment_message(&self, rpc: proto::RPC) -> Result, PublishError> { - if rpc.get_size() < self.config.max_transmit_size() { - return Ok(vec![rpc]); - } - - let new_rpc = proto::RPC { - subscriptions: Vec::new(), - publish: Vec::new(), - control: None, - }; - - let mut rpc_list = vec![new_rpc.clone()]; - - // Gets an RPC if the object size will fit, otherwise create a new RPC. The last element - // will be the RPC to add an object. - macro_rules! create_or_add_rpc { - ($object_size: ident ) => { - let list_index = rpc_list.len() - 1; // the list is never empty - - // create a new RPC if the new object plus 5% of its size (for length prefix - // buffers) exceeds the max transmit size. - if rpc_list[list_index].get_size() + (($object_size as f64) * 1.05) as usize - > self.config.max_transmit_size() - && rpc_list[list_index] != new_rpc - { - // create a new rpc and use this as the current - rpc_list.push(new_rpc.clone()); - } - }; - } - - macro_rules! add_item { - ($object: ident, $type: ident ) => { - let object_size = $object.get_size(); - - if object_size + 2 > self.config.max_transmit_size() { - // This should not be possible. All received and published messages have already - // been vetted to fit within the size. - error!("Individual message too large to fragment"); - return Err(PublishError::MessageTooLarge); - } - - create_or_add_rpc!(object_size); - rpc_list - .last_mut() - .expect("Must have at least one element") - .$type - .push($object.clone()); - }; - } - - // Add messages until the limit - for message in &rpc.publish { - add_item!(message, publish); - } - for subscription in &rpc.subscriptions { - add_item!(subscription, subscriptions); - } - - // handle the control messages. If all are within the max_transmit_size, send them without - // fragmenting, otherwise, fragment the control messages - let empty_control = proto::ControlMessage::default(); - if let Some(control) = rpc.control.as_ref() { - if control.get_size() + 2 > self.config.max_transmit_size() { - // fragment the RPC - for ihave in &control.ihave { - let len = ihave.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .ihave - .push(ihave.clone()); - } - for iwant in &control.iwant { - let len = iwant.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .iwant - .push(iwant.clone()); - } - for graft in &control.graft { - let len = graft.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .graft - .push(graft.clone()); - } - for prune in &control.prune { - let len = prune.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .prune - .push(prune.clone()); - } - } else { - let len = control.get_size(); - create_or_add_rpc!(len); - rpc_list.last_mut().expect("Always an element").control = Some(control.clone()); + fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) { + if let Some(m) = self.metrics.as_mut() { + if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { + // register bytes sent on the internal metrics. + m.msg_sent(&message.topic, message.raw_protobuf_len()); } } - Ok(rpc_list) + self.events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::Message(rpc), + handler: NotifyHandler::Any, + }); } fn on_connection_established( @@ -3062,9 +2769,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3084,46 +2791,27 @@ where .connections .push(connection_id); - if other_established == 0 { - // Ignore connections from blacklisted peers. - if self.blacklisted_peers.contains(&peer_id) { - debug!("Ignoring connection from blacklisted peer: {}", peer_id); - } else { - debug!("New peer connected: {}", peer_id); - // We need to send our subscriptions to the newly-connected node. - let mut subscriptions = vec![]; - for topic_hash in self.mesh.keys() { - subscriptions.push(Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }); - } + if other_established > 0 { + return; // Not our first connection to this peer, hence nothing to do. + } - if !subscriptions.is_empty() { - // send our subscriptions to the peer - if self - .send_message( - peer_id, - Rpc { - messages: Vec::new(), - subscriptions, - control_msgs: Vec::new(), - } - .into_protobuf(), - ) - .is_err() - { - error!("Failed to send subscriptions, message too large"); - } - } - } + // Insert an empty set of the topics of this peer until known. + self.peer_topics.insert(peer_id, Default::default()); + + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.add_peer(peer_id); + } - // Insert an empty set of the topics of this peer until known. - self.peer_topics.insert(peer_id, Default::default()); + // Ignore connections from blacklisted peers. + if self.blacklisted_peers.contains(&peer_id) { + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); + return; + } - if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.add_peer(peer_id); - } + tracing::debug!(peer=%peer_id, "New peer connected"); + // We need to send our subscriptions to the newly-connected node. + for topic_hash in self.mesh.clone().into_keys() { + self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); } } @@ -3135,16 +2823,16 @@ where endpoint, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { // Remove IP from peer scoring system if let Some((peer_score, ..)) = &mut self.peer_score { if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3181,17 +2869,14 @@ where } } else { // remove from mesh, topic_peers, peer_topic and the fanout - debug!("Peer disconnected: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer disconnected"); { - let topics = match self.peer_topics.get(&peer_id) { - Some(topics) => topics, - None => { - debug_assert!( - self.blacklisted_peers.contains(&peer_id), - "Disconnected node not in connected list" - ); - return; - } + let Some(topics) = self.peer_topics.get(&peer_id) else { + debug_assert!( + self.blacklisted_peers.contains(&peer_id), + "Disconnected node not in connected list" + ); + return; }; // remove peer from all mappings @@ -3211,18 +2896,19 @@ where if let Some(peer_list) = self.topic_peers.get_mut(topic) { if !peer_list.remove(&peer_id) { // debugging purposes - warn!( - "Disconnected node: {} not in topic_peers peer list", - peer_id + tracing::warn!( + peer=%peer_id, + "Disconnected node: peer not in topic_peers" ); } if let Some(m) = self.metrics.as_mut() { m.set_topic_peers(topic, peer_list.len()) } } else { - warn!( - "Disconnected node: {} with topic: {:?} not in topic_peers", - &peer_id, &topic + tracing::warn!( + peer=%peer_id, + topic=%topic, + "Disconnected node: peer with topic not in topic_peers" ); } @@ -3274,18 +2960,18 @@ where if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - &peer_id, + tracing::trace!( + peer=%&peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_old ) } if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_new ) } @@ -3307,7 +2993,7 @@ where F: Send + 'static + TopicSubscriptionFilter, { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -3316,10 +3002,7 @@ where _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new( - self.config.protocol_config(), - self.config.idle_timeout(), - )) + Ok(Handler::new(self.config.protocol_config())) } fn handle_established_outbound_connection( @@ -3329,10 +3012,7 @@ where _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new( - self.config.protocol_config(), - self.config.idle_timeout(), - )) + Ok(Handler::new(self.config.protocol_config())) } fn on_connection_handler_event( @@ -3350,9 +3030,9 @@ where } if let PeerKind::NotSupported = kind { - debug!( - "Peer does not support gossipsub protocols. {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Peer does not support gossipsub protocols" ); self.events .push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { @@ -3362,9 +3042,10 @@ where // Only change the value if the old value is Floodsub (the default set in // `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished). // All other PeerKind changes are ignored. - debug!( - "New peer type found: {} for peer: {}", - kind, propagation_source + tracing::debug!( + peer=%propagation_source, + peer_type=%kind, + "New peer type found for peer" ); if let PeerKind::Floodsub = conn.kind { conn.kind = kind; @@ -3387,7 +3068,7 @@ where if let (true, _) = self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) { - debug!("RPC Dropped from greylisted peer {}", propagation_source); + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); return; } @@ -3403,11 +3084,11 @@ where } else { // log the invalid messages for (message, validation_error) in invalid_messages { - warn!( - "Invalid message. Reason: {:?} propagation_peer {} source {:?}", + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", validation_error, - propagation_source.to_string(), - message.source ); } } @@ -3418,7 +3099,7 @@ where if self.config.max_messages_per_rpc().is_some() && Some(count) >= self.config.max_messages_per_rpc() { - warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); break; } self.handle_received_message(raw_message, &propagation_source); @@ -3461,30 +3142,30 @@ where } } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } // update scores if let Some((peer_score, _, interval, _)) = &mut self.peer_score { - while let Poll::Ready(Some(())) = interval.poll_next_unpin(cx) { + while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { peer_score.refresh_scores(); } } - while let Poll::Ready(Some(())) = self.heartbeat.poll_next_unpin(cx) { + while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { self.heartbeat(); } Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -3493,15 +3174,7 @@ where self.on_connection_closed(connection_closed) } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } } @@ -3563,7 +3236,7 @@ fn peer_removed_from_mesh( .get(&peer_id) .expect("To be connected to peer.") .connections - .get(0) + .first() .expect("There should be at least one connection to a peer."); if let Some(topics) = known_topics { @@ -3601,7 +3274,7 @@ fn get_random_peers_dynamic( // if they exist, filter the peers by `f` Some(peer_list) => peer_list .iter() - .cloned() + .copied() .filter(|p| { f(p) && match connected_peers.get(p) { Some(connections) if connections.kind == PeerKind::Gossipsub => true, @@ -3616,7 +3289,7 @@ fn get_random_peers_dynamic( // if we have less than needed, return them let n = n_map(gossip_peers.len()); if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); return gossip_peers.into_iter().collect(); } @@ -3624,7 +3297,7 @@ fn get_random_peers_dynamic( let mut rng = thread_rng(); gossip_peers.partial_shuffle(&mut rng, n); - debug!("RANDOM PEERS: Got {:?} peers", n); + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); gossip_peers.into_iter().take(n).collect() } @@ -3708,17 +3381,8 @@ impl fmt::Debug for PublishConfig { mod local_test { use super::*; use crate::IdentTopic; - use asynchronous_codec::Encoder; use quickcheck::*; - fn empty_rpc() -> Rpc { - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: Vec::new(), - } - } - fn test_message() -> RawMessage { RawMessage { source: Some(PeerId::random()), @@ -3731,13 +3395,6 @@ mod local_test { } } - fn test_subscription() -> Subscription { - Subscription { - action: SubscriptionAction::Subscribe, - topic_hash: IdentTopic::new("TestTopic").hash(), - } - } - fn test_control() -> ControlAction { ControlAction::IHave { topic_hash: IdentTopic::new("TestTopic").hash(), @@ -3745,119 +3402,16 @@ mod local_test { } } - impl Arbitrary for Rpc { + impl Arbitrary for RpcOut { fn arbitrary(g: &mut Gen) -> Self { - let mut rpc = empty_rpc(); - - for _ in 0..g.gen_range(0..10u8) { - rpc.subscriptions.push(test_subscription()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.messages.push(test_message()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.control_msgs.push(test_control()); - } - rpc - } - } - - #[test] - /// Tests RPC message fragmentation - fn test_message_fragmentation_deterministic() { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - // Message under the limit should be fine. - let mut rpc = empty_rpc(); - rpc.messages.push(test_message()); - - let mut rpc_proto = rpc.clone().into_protobuf(); - let fragmented_messages = gs.fragment_message(rpc_proto.clone()).unwrap(); - assert_eq!( - fragmented_messages, - vec![rpc_proto.clone()], - "Messages under the limit shouldn't be fragmented" - ); - - // Messages over the limit should be split - - while rpc_proto.get_size() < max_transmit_size { - rpc.messages.push(test_message()); - rpc_proto = rpc.clone().into_protobuf(); - } - - let fragmented_messages = gs - .fragment_message(rpc_proto) - .expect("Should be able to fragment the messages"); - - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size" - ); - } - } - - #[test] - fn test_message_fragmentation() { - fn prop(rpc: Rpc) { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - let mut length_codec = unsigned_varint::codec::UviBytes::default(); - length_codec.set_max_len(max_transmit_size); - let mut codec = - crate::protocol::GossipsubCodec::new(length_codec, ValidationMode::Permissive); - - let rpc_proto = rpc.into_protobuf(); - let fragmented_messages = gs - .fragment_message(rpc_proto.clone()) - .expect("Messages must be valid"); - - if rpc_proto.get_size() < max_transmit_size { - assert_eq!( - fragmented_messages.len(), - 1, - "the message should not be fragmented" - ); - } else { - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - } - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size: list size {} max size{}", message.get_size(), max_transmit_size - ); - - // ensure they can all be encoded - let mut buf = bytes::BytesMut::with_capacity(message.get_size()); - codec.encode(message, &mut buf).unwrap() + match u8::arbitrary(g) % 5 { + 0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), + 1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), + 2 => RpcOut::Publish(test_message()), + 3 => RpcOut::Forward(test_message()), + 4 => RpcOut::Control(test_control()), + _ => panic!("outside range"), } } - QuickCheck::new() - .max_tests(100) - .quickcheck(prop as fn(_) -> _) } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 29262e9c8f6..570cdf43f90 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -21,20 +21,16 @@ // Collection of tests for the gossipsub network behaviour use super::*; -use crate::protocol::ProtocolConfig; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::transform::{DataTransform, IdentityTransform}; -use crate::types::FastMessageId; use crate::ValidationError; use crate::{ - config::Config, config::ConfigBuilder, IdentTopic as Topic, Message, TopicScoreParams, + config::Config, config::ConfigBuilder, types::Rpc, IdentTopic as Topic, TopicScoreParams, }; use async_std::net::Ipv4Addr; use byteorder::{BigEndian, ByteOrder}; use libp2p_core::{ConnectedPoint, Endpoint}; use rand::Rng; -use std::collections::hash_map::DefaultHasher; -use std::hash::{Hash, Hasher}; use std::thread::sleep; use std::time::Duration; @@ -272,13 +268,10 @@ where for connection_id in peer_connections.connections.clone() { active_connections = active_connections.checked_sub(1).unwrap(); - let dummy_handler = Handler::new(ProtocolConfig::default(), Duration::ZERO); - gs.on_swarm_event(FromSwarm::ConnectionClosed(ConnectionClosed { peer_id: *peer_id, connection_id, endpoint: &fake_endpoint, - handler: dummy_handler, remaining_established: active_connections, })); } @@ -411,26 +404,19 @@ fn test_subscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), - .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; + .filter(|e| { + matches!( + e, + ToSwarm::NotifyHandler { + event: HandlerIn::Message(RpcOut::Subscribe(_)), + .. } - collected_subscriptions - } - _ => collected_subscriptions, - }); + ) + }) + .count(); // we sent a subscribe to all known peers - assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" - ); + assert_eq!(subscriptions, 20); } #[test] @@ -479,26 +465,16 @@ fn test_unsubscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { + .fold(0, |collected_subscriptions, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Subscribe(_)), .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions - } + } => collected_subscriptions + 1, _ => collected_subscriptions, }); // we sent a unsubscribe to all known peers, for two topics - assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" - ); + assert_eq!(subscriptions, 40); // check we clean up internal structures for topic_hash in &topic_hashes { @@ -666,16 +642,13 @@ fn test_publish_without_flood_publishing() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -756,16 +729,13 @@ fn test_fanout() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -807,37 +777,36 @@ fn test_inject_connected() { // check that our subscriptions are sent to each of the peers // collect all the SendEvents - let send_events: Vec<_> = gs + let subscriptions = gs .events - .iter() - .filter(|e| match e { + .into_iter() + .filter_map(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Subscribe(topic)), + peer_id, .. - } => !m.subscriptions.is_empty(), - _ => false, + } => Some((peer_id, topic)), + _ => None, }) - .collect(); + .fold( + HashMap::>::new(), + |mut subs, (peer, sub)| { + let mut peer_subs = subs.remove(&peer).unwrap_or_default(); + peer_subs.push(sub.into_string()); + subs.insert(peer, peer_subs); + subs + }, + ); // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - if let ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), - .. - } = sevent - { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - }; + for peer_subs in subscriptions.values() { + assert!(peer_subs.contains(&String::from("topic1"))); + assert!(peer_subs.contains(&String::from("topic2"))); + assert_eq!(peer_subs.len(), 2); } // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); + assert_eq!(subscriptions.len(), 20); // should add the new peers to `peer_topics` with an empty vec as a gossipsub node for peer in peers { @@ -858,7 +827,7 @@ fn test_handle_received_subscriptions() { // UNSUBSCRIBE - Remove topic from peer_topics for peer. // - Remove peer from topic_peers. - let topics = vec!["topic1", "topic2", "topic3", "topic4"] + let topics = ["topic1", "topic2", "topic3", "topic4"] .iter() .map(|&t| String::from(t)) .collect(); @@ -1050,21 +1019,18 @@ fn test_handle_iwant_msg_cached() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { + let sent_messages = gs.events.into_iter().fold( + Vec::::new(), + |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push(c.clone()) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push(message); } collected_messages } _ => collected_messages, - }); + }, + ); assert!( sent_messages @@ -1113,15 +1079,14 @@ fn test_handle_iwant_msg_cached_shifted() { // is the message is being sent? let message_exists = gs.events.iter().any(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(message)), .. } => { - let event = proto_to_message(m); - event - .messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id) + gs.config.message_id( + &gs.data_transform + .inbound_transform(message.clone()) + .unwrap(), + ) == msg_id } _ => false, }); @@ -1280,7 +1245,7 @@ fn test_handle_graft_is_not_subscribed() { #[test] // tests multiple topics in a single graft message fn test_handle_graft_multiple_topics() { - let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] + let topics: Vec = ["topic1", "topic2", "topic3", "topic4"] .iter() .map(|&t| String::from(t)) .collect(); @@ -1352,22 +1317,15 @@ fn count_control_msgs( .sum::() + gs.events .iter() - .map(|e| match e { + .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Control(action)), .. - } => { - let event = proto_to_message(m); - event - .control_msgs - .iter() - .filter(|m| filter(peer_id, m)) - .count() - } - _ => 0, + } => filter(peer_id, action), + _ => false, }) - .sum::() + .count() } fn flush_events(gs: &mut Behaviour) { @@ -1418,7 +1376,7 @@ fn test_explicit_peer_reconnects() { .gs_config(config) .create_network(); - let peer = others.get(0).unwrap(); + let peer = others.first().unwrap(); //add peer as explicit peer gs.add_explicit_peer(peer); @@ -1469,7 +1427,7 @@ fn test_handle_graft_explicit_peer() { .explicit(1) .create_network(); - let peer = peers.get(0).unwrap(); + let peer = peers.first().unwrap(); gs.handle_graft(peer, topic_hashes.clone()); @@ -1576,17 +1534,10 @@ fn do_forward_messages_to_explicit_peers() { .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(m)), .. } => { - let event = proto_to_message(m); - peer_id == &peers[0] - && event - .messages - .iter() - .filter(|m| m.data == message.data) - .count() - > 0 + peer_id == &peers[0] && m.data == message.data } _ => false, }) @@ -2120,14 +2071,11 @@ fn test_flood_publish() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } collected_publish } @@ -2681,14 +2629,11 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { // the messages we are sending let sent_messages = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push((*peer_id, c.clone())) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push((peer_id, message)); } collected_messages } @@ -2829,14 +2774,11 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -2886,14 +2828,11 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -4412,17 +4351,14 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { assert_eq!( gs.events .iter() - .map(|e| match e { + .filter(|e| matches!( + e, ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(_)), .. - } => { - let event = proto_to_message(m); - event.messages.len() } - _ => 0, - }) - .sum::(), + )) + .count(), config.gossip_retransimission() as usize, "not more then gossip_retransmission many messages get sent back" ); @@ -4665,7 +4601,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) @@ -4821,11 +4760,8 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -4878,11 +4814,8 @@ fn test_do_not_use_floodsub_in_fanout() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -5064,86 +4997,6 @@ fn test_public_api() { ); } -#[test] -fn test_msg_id_fn_only_called_once_with_fast_message_ids() { - struct Pointers { - slow_counter: u32, - fast_counter: u32, - } - - let mut counters = Pointers { - slow_counter: 0, - fast_counter: 0, - }; - - let counters_pointer: *mut Pointers = &mut counters; - - let counters_address = counters_pointer as u64; - - macro_rules! get_counters_pointer { - ($m: expr) => {{ - let mut address_bytes: [u8; 8] = Default::default(); - address_bytes.copy_from_slice($m.as_slice()); - let address = u64::from_be_bytes(address_bytes); - address as *mut Pointers - }}; - } - - macro_rules! get_counters_and_hash { - ($m: expr) => {{ - let mut hasher = DefaultHasher::new(); - $m.hash(&mut hasher); - let id = hasher.finish().to_be_bytes().into(); - (id, get_counters_pointer!($m)) - }}; - } - - let message_id_fn = |m: &Message| -> MessageId { - let (mut id, mut counters_pointer): (MessageId, *mut Pointers) = - get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).slow_counter += 1; - } - id.0.reverse(); - id - }; - let fast_message_id_fn = |m: &RawMessage| -> FastMessageId { - let (id, mut counters_pointer) = get_counters_and_hash!(&m.data); - unsafe { - (*counters_pointer).fast_counter += 1; - } - id - }; - let config = ConfigBuilder::default() - .message_id_fn(message_id_fn) - .fast_message_id_fn(fast_message_id_fn) - .build() - .unwrap(); - let (mut gs, _, topic_hashes) = inject_nodes1() - .peer_no(0) - .topics(vec![String::from("topic1")]) - .to_subscribe(true) - .gs_config(config) - .create_network(); - - let message = RawMessage { - source: None, - data: counters_address.to_be_bytes().to_vec(), - sequence_number: None, - topic: topic_hashes[0].clone(), - signature: None, - key: None, - validated: true, - }; - - for _ in 0..5 { - gs.handle_received_message(message.clone(), &PeerId::random()); - } - - assert_eq!(counters.fast_counter, 5); - assert_eq!(counters.slow_counter, 1); -} - #[test] fn test_subscribe_to_invalid_topic() { let t1 = Topic::new("t1"); @@ -5208,7 +5061,7 @@ fn test_subscribe_and_graft_with_negative_score() { p2, connection_id, HandlerEvent::Message { - rpc: proto_to_message(&message), + rpc: proto_to_message(&message.into_protobuf()), invalid_messages: vec![], }, ); diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index a5d31071538..7e79912cc4a 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -22,8 +22,9 @@ use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; +use crate::error::ConfigBuilderError; use crate::protocol::{ProtocolConfig, ProtocolId, FLOODSUB_PROTOCOL}; -use crate::types::{FastMessageId, Message, MessageId, PeerKind, RawMessage}; +use crate::types::{Message, MessageId, PeerKind}; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; @@ -74,11 +75,9 @@ pub struct Config { heartbeat_interval: Duration, fanout_ttl: Duration, check_explicit_peers_ticks: u64, - idle_timeout: Duration, duplicate_cache_time: Duration, validate_messages: bool, message_id_fn: Arc MessageId + Send + Sync + 'static>, - fast_message_id_fn: Option FastMessageId + Send + Sync + 'static>>, allow_self_origin: bool, do_px: bool, prune_peers: usize, @@ -183,13 +182,6 @@ impl Config { self.protocol.max_transmit_size } - /// The time a connection is maintained to a peer without being in the mesh and without - /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. - /// Default is 120 seconds. - pub fn idle_timeout(&self) -> Duration { - self.idle_timeout - } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. /// This settings sets the time period that messages are stored in the cache. Duplicates can be /// received if duplicate messages are sent at a time greater than this setting apart. The @@ -225,20 +217,6 @@ impl Config { (self.message_id_fn)(message) } - /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from [`RawMessage`] to - /// [`Message`] for duplicates. Two semantically different messages must always - /// have different fast message ids, but it is allowed that two semantically identical messages - /// have different fast message ids as long as the message_id_fn produces the same id for them. - /// - /// The function takes a [`RawMessage`] as input and outputs a String to be - /// interpreted as the fast message id. Default is None. - pub fn fast_message_id(&self, message: &RawMessage) -> Option { - self.fast_message_id_fn - .as_ref() - .map(|fast_message_id_fn| fast_message_id_fn(message)) - } - /// By default, gossipsub will reject messages that are sent to us that have the same message /// source as we have specified locally. Enabling this, allows these messages and prevents /// penalizing the peer that sent us the message. Default is false. @@ -406,7 +384,6 @@ impl Default for ConfigBuilder { heartbeat_interval: Duration::from_secs(1), fanout_ttl: Duration::from_secs(60), check_explicit_peers_ticks: 300, - idle_timeout: Duration::from_secs(120), duplicate_cache_time: Duration::from_secs(60), validate_messages: false, message_id_fn: Arc::new(|message| { @@ -423,7 +400,6 @@ impl Default for ConfigBuilder { .push_str(&message.sequence_number.unwrap_or_default().to_string()); MessageId::from(source_string) }), - fast_message_id_fn: None, allow_self_origin: false, do_px: false, prune_peers: 0, // NOTE: Increasing this currently has little effect until Signed records are implemented. @@ -601,14 +577,6 @@ impl ConfigBuilder { self } - /// The time a connection is maintained to a peer without being in the mesh and without - /// send/receiving a message from. Connections that idle beyond this timeout are disconnected. - /// Default is 120 seconds. - pub fn idle_timeout(&mut self, idle_timeout: Duration) -> &mut Self { - self.config.idle_timeout = idle_timeout; - self - } - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. /// This settings sets the time period that messages are stored in the cache. Duplicates can be /// received if duplicate messages are sent at a time greater than this setting apart. The @@ -650,22 +618,6 @@ impl ConfigBuilder { self } - /// A user-defined optional function that computes fast ids from raw messages. This can be used - /// to avoid possibly expensive transformations from [`RawMessage`] to - /// [`Message`] for duplicates. Two semantically different messages must always - /// have different fast message ids, but it is allowed that two semantically identical messages - /// have different fast message ids as long as the message_id_fn produces the same id for them. - /// - /// The function takes a [`Message`] as input and outputs a String to be interpreted - /// as the fast message id. Default is None. - pub fn fast_message_id_fn(&mut self, fast_id_fn: F) -> &mut Self - where - F: Fn(&RawMessage) -> FastMessageId + Send + Sync + 'static, - { - self.config.fast_message_id_fn = Some(Arc::new(fast_id_fn)); - self - } - /// Enables Peer eXchange. This should be enabled in bootstrappers and other well /// connected/trusted nodes. The default is false. /// @@ -831,40 +783,34 @@ impl ConfigBuilder { } /// Constructs a [`Config`] from the given configuration and validates the settings. - pub fn build(&self) -> Result { + pub fn build(&self) -> Result { // check all constraints on config if self.config.protocol.max_transmit_size < 100 { - return Err("The maximum transmission size must be greater than 100 to permit basic control messages"); + return Err(ConfigBuilderError::MaxTransmissionSizeTooSmall); } if self.config.history_length < self.config.history_gossip { - return Err( - "The history_length must be greater than or equal to the history_gossip \ - length", - ); + return Err(ConfigBuilderError::HistoryLengthTooSmall); } if !(self.config.mesh_outbound_min <= self.config.mesh_n_low && self.config.mesh_n_low <= self.config.mesh_n && self.config.mesh_n <= self.config.mesh_n_high) { - return Err("The following inequality doesn't hold \ - mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"); + return Err(ConfigBuilderError::MeshParametersInvalid); } if self.config.mesh_outbound_min * 2 > self.config.mesh_n { - return Err( - "The following inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2", - ); + return Err(ConfigBuilderError::MeshOutboundInvalid); } if self.config.unsubscribe_backoff.as_millis() == 0 { - return Err("The unsubscribe_backoff parameter should be positive."); + return Err(ConfigBuilderError::UnsubscribeBackoffIsZero); } if self.invalid_protocol { - return Err("The provided protocol is invalid, it must start with a forward-slash"); + return Err(ConfigBuilderError::InvalidProtocol); } Ok(self.config.clone()) @@ -886,7 +832,6 @@ impl std::fmt::Debug for Config { let _ = builder.field("heartbeat_initial_delay", &self.heartbeat_initial_delay); let _ = builder.field("heartbeat_interval", &self.heartbeat_interval); let _ = builder.field("fanout_ttl", &self.fanout_ttl); - let _ = builder.field("idle_timeout", &self.idle_timeout); let _ = builder.field("duplicate_cache_time", &self.duplicate_cache_time); let _ = builder.field("validate_messages", &self.validate_messages); let _ = builder.field("allow_self_origin", &self.allow_self_origin); diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 1d8d6c052a8..c461abc0d17 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -20,7 +20,7 @@ //! Error types that can result from gossipsub. -use libp2p_core::identity::error::SigningError; +use libp2p_identity::SigningError; /// Error associated with publishing a gossipsub message. #[derive(Debug)] @@ -120,3 +120,37 @@ impl From for PublishError { PublishError::TransformFailed(error) } } + +/// Error associated with Config building. +#[derive(Debug)] +pub enum ConfigBuilderError { + /// Maximum transmission size is too small. + MaxTransmissionSizeTooSmall, + /// Histroy length less than history gossip length. + HistoryLengthTooSmall, + /// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high + MeshParametersInvalid, + /// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2 + MeshOutboundInvalid, + /// unsubscribe_backoff is zero + UnsubscribeBackoffIsZero, + /// Invalid protocol + InvalidProtocol, +} + +impl std::error::Error for ConfigBuilderError {} + +impl std::fmt::Display for ConfigBuilderError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::MaxTransmissionSizeTooSmall => { + write!(f, "Maximum transmission size is too small") + } + Self::HistoryLengthTooSmall => write!(f, "Histroy length less than history gossip length"), + Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"), + Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"), + Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"), + Self::InvalidProtocol => write!(f, "Invalid protocol"), + } + } +} diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index cae3b169033..9538622c0dc 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -21,10 +21,9 @@ use crate::peer_score::RejectReason; use crate::MessageId; use crate::ValidationError; +use instant::Instant; use libp2p_identity::PeerId; -use log::debug; use std::collections::HashMap; -use wasm_timer::Instant; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. #[derive(Default)] @@ -48,7 +47,7 @@ impl GossipPromises { // If a promise for this message id and peer already exists we don't update the expiry! self.promises .entry(message_id.clone()) - .or_insert_with(HashMap::new) + .or_default() .entry(peer) .or_insert(expires); } @@ -85,9 +84,10 @@ impl GossipPromises { if *expires < now { let count = result.entry(*peer_id).or_insert(0); *count += 1; - debug!( - "[Penalty] The peer {} broke the promise to deliver message {} in time!", - peer_id, msg + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" ); false } else { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 9e93a53b279..e91f81776e7 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -20,27 +20,24 @@ use crate::protocol::{GossipsubCodec, ProtocolConfig}; use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc}; +use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; use crate::ValidationError; use asynchronous_codec::Framed; use futures::future::Either; use futures::prelude::*; use futures::StreamExt; use instant::Instant; -use libp2p_core::upgrade::{DeniedUpgrade, NegotiationError, UpgradeError}; +use libp2p_core::upgrade::DeniedUpgrade; use libp2p_swarm::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, - DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, - SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol, }; -use libp2p_swarm::NegotiatedSubstream; +use libp2p_swarm::Stream; use smallvec::SmallVec; use std::{ pin::Pin, task::{Context, Poll}, - time::Duration, }; -use void::Void; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. @@ -61,10 +58,11 @@ pub enum HandlerEvent { } /// A message sent from the behaviour to the handler. +#[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum HandlerIn { /// A gossipsub message to send. - Message(proto::RPC), + Message(RpcOut), /// The peer has joined the mesh. JoinedMesh, /// The peer has left the mesh. @@ -119,9 +117,6 @@ pub struct EnabledHandler { last_io_activity: Instant, - /// The amount of time we keep an idle connection alive. - idle_timeout: Duration, - /// Keeps track of whether this connection is for a peer in the mesh. This is used to make /// decisions about the keep alive state for this connection. in_mesh: bool, @@ -143,9 +138,9 @@ pub enum DisabledHandler { /// State of the inbound substream, opened either by us or by the remote. enum InboundSubstreamState { /// Waiting for a message from the remote. The idle state for an inbound substream. - WaitingInput(Framed), + WaitingInput(Framed), /// The substream is being closed. - Closing(Framed), + Closing(Framed), /// An error occurred during processing. Poisoned, } @@ -153,18 +148,18 @@ enum InboundSubstreamState { /// State of the outbound substream, opened either by us or by the remote. enum OutboundSubstreamState { /// Waiting for the user to send a message. The idle state for an outbound substream. - WaitingOutput(Framed), + WaitingOutput(Framed), /// Waiting to send a message to the remote. - PendingSend(Framed, proto::RPC), + PendingSend(Framed, proto::RPC), /// Waiting to flush the substream so that the data arrives to the remote. - PendingFlush(Framed), + PendingFlush(Framed), /// An error occurred during processing. Poisoned, } impl Handler { /// Builds a new [`Handler`]. - pub fn new(protocol_config: ProtocolConfig, idle_timeout: Duration) -> Self { + pub fn new(protocol_config: ProtocolConfig) -> Self { Handler::Enabled(EnabledHandler { listen_protocol: protocol_config, inbound_substream: None, @@ -176,7 +171,6 @@ impl Handler { peer_kind: None, peer_kind_sent: false, last_io_activity: Instant::now(), - idle_timeout, in_mesh: false, }) } @@ -185,7 +179,7 @@ impl Handler { impl EnabledHandler { fn on_fully_negotiated_inbound( &mut self, - (substream, peer_kind): (Framed, PeerKind), + (substream, peer_kind): (Framed, PeerKind), ) { // update the known kind of peer if self.peer_kind.is_none() { @@ -193,7 +187,7 @@ impl EnabledHandler { } // new inbound substream. Replace the current one, if it exists. - log::trace!("New inbound substream request"); + tracing::trace!("New inbound substream request"); self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); } @@ -225,16 +219,15 @@ impl EnabledHandler { ConnectionHandlerEvent< ::OutboundProtocol, ::OutboundOpenInfo, - ::OutEvent, - ::Error, + ::ToBehaviour, >, > { if !self.peer_kind_sent { if let Some(peer_kind) = self.peer_kind.as_ref() { self.peer_kind_sent = true; - return Poll::Ready(ConnectionHandlerEvent::Custom(HandlerEvent::PeerKind( - peer_kind.clone(), - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::PeerKind(peer_kind.clone()), + )); } } @@ -249,70 +242,6 @@ impl EnabledHandler { }); } - loop { - match std::mem::replace( - &mut self.inbound_substream, - Some(InboundSubstreamState::Poisoned), - ) { - // inbound idle state - Some(InboundSubstreamState::WaitingInput(mut substream)) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(message))) => { - self.last_io_activity = Instant::now(); - self.inbound_substream = - Some(InboundSubstreamState::WaitingInput(substream)); - return Poll::Ready(ConnectionHandlerEvent::Custom(message)); - } - Poll::Ready(Some(Err(error))) => { - log::debug!("Failed to read from inbound stream: {error}"); - // Close this side of the stream. If the - // peer is still around, they will re-establish their - // outbound stream i.e. our inbound stream. - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - } - // peer closed the stream - Poll::Ready(None) => { - log::debug!("Inbound stream closed by remote"); - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - } - Poll::Pending => { - self.inbound_substream = - Some(InboundSubstreamState::WaitingInput(substream)); - break; - } - } - } - Some(InboundSubstreamState::Closing(mut substream)) => { - match Sink::poll_close(Pin::new(&mut substream), cx) { - Poll::Ready(res) => { - if let Err(e) = res { - // Don't close the connection but just drop the inbound substream. - // In case the remote has more to send, they will open up a new - // substream. - log::debug!("Inbound substream error while closing: {e}"); - } - self.inbound_substream = None; - break; - } - Poll::Pending => { - self.inbound_substream = - Some(InboundSubstreamState::Closing(substream)); - break; - } - } - } - None => { - self.inbound_substream = None; - break; - } - Some(InboundSubstreamState::Poisoned) => { - unreachable!("Error occurred during inbound stream processing") - } - } - } - // process outbound stream loop { match std::mem::replace( @@ -341,14 +270,16 @@ impl EnabledHandler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(e) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!( + "Failed to send message on outbound stream: {e}" + ); self.outbound_substream = None; break; } } } Poll::Ready(Err(e)) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!("Failed to send message on outbound stream: {e}"); self.outbound_substream = None; break; } @@ -367,7 +298,7 @@ impl EnabledHandler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(e)) => { - log::debug!("Failed to flush outbound stream: {e}"); + tracing::debug!("Failed to flush outbound stream: {e}"); self.outbound_substream = None; break; } @@ -388,14 +319,77 @@ impl EnabledHandler { } } + loop { + match std::mem::replace( + &mut self.inbound_substream, + Some(InboundSubstreamState::Poisoned), + ) { + // inbound idle state + Some(InboundSubstreamState::WaitingInput(mut substream)) => { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(message))) => { + self.last_io_activity = Instant::now(); + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); + } + Poll::Ready(Some(Err(error))) => { + tracing::debug!("Failed to read from inbound stream: {error}"); + // Close this side of the stream. If the + // peer is still around, they will re-establish their + // outbound stream i.e. our inbound stream. + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + // peer closed the stream + Poll::Ready(None) => { + tracing::debug!("Inbound stream closed by remote"); + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::WaitingInput(substream)); + break; + } + } + } + Some(InboundSubstreamState::Closing(mut substream)) => { + match Sink::poll_close(Pin::new(&mut substream), cx) { + Poll::Ready(res) => { + if let Err(e) = res { + // Don't close the connection but just drop the inbound substream. + // In case the remote has more to send, they will open up a new + // substream. + tracing::debug!("Inbound substream error while closing: {e}"); + } + self.inbound_substream = None; + break; + } + Poll::Pending => { + self.inbound_substream = + Some(InboundSubstreamState::Closing(substream)); + break; + } + } + } + None => { + self.inbound_substream = None; + break; + } + Some(InboundSubstreamState::Poisoned) => { + unreachable!("Error occurred during inbound stream processing") + } + } + } + Poll::Pending } } impl ConnectionHandler for Handler { - type InEvent = HandlerIn; - type OutEvent = HandlerEvent; - type Error = Void; + type FromBehaviour = HandlerIn; + type ToBehaviour = HandlerEvent; type InboundOpenInfo = (); type InboundProtocol = either::Either; type OutboundOpenInfo = (); @@ -415,7 +409,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, message: HandlerIn) { match self { Handler::Enabled(handler) => match message { - HandlerIn::Message(m) => handler.send_queue.push(m), + HandlerIn::Message(m) => handler.send_queue.push(m.into_protobuf()), HandlerIn::JoinedMesh => { handler.in_mesh = true; } @@ -424,51 +418,30 @@ impl ConnectionHandler for Handler { } }, Handler::Disabled(_) => { - log::debug!("Handler is disabled. Dropping message {:?}", message); + tracing::debug!(?message, "Handler is disabled. Dropping message"); } } } - fn connection_keep_alive(&self) -> KeepAlive { - match self { - Handler::Enabled(handler) => { - if handler.in_mesh { - return KeepAlive::Yes; - } - - if let Some( - OutboundSubstreamState::PendingSend(_, _) - | OutboundSubstreamState::PendingFlush(_), - ) = handler.outbound_substream - { - return KeepAlive::Yes; - } - - KeepAlive::Until(handler.last_io_activity + handler.idle_timeout) - } - Handler::Disabled(_) => KeepAlive::No, - } + fn connection_keep_alive(&self) -> bool { + matches!(self, Handler::Enabled(h) if h.in_mesh) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { match self { Handler::Enabled(handler) => handler.poll(cx), Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => { if !*peer_kind_sent { *peer_kind_sent = true; - return Poll::Ready(ConnectionHandlerEvent::Custom(HandlerEvent::PeerKind( - PeerKind::NotSupported, - ))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::PeerKind(PeerKind::NotSupported), + )); } Poll::Pending @@ -492,7 +465,7 @@ impl ConnectionHandler for Handler { handler.inbound_substream_attempts += 1; if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of inbound substreams attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -506,7 +479,7 @@ impl ConnectionHandler for Handler { handler.outbound_substream_attempts += 1; if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of outbound substream attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -526,24 +499,21 @@ impl ConnectionHandler for Handler { handler.on_fully_negotiated_outbound(fully_negotiated_outbound) } ConnectionEvent::DialUpgradeError(DialUpgradeError { - error: ConnectionHandlerUpgrErr::Timeout | ConnectionHandlerUpgrErr::Timer, + error: StreamUpgradeError::Timeout, .. }) => { - log::debug!("Dial upgrade error: Protocol negotiation timeout"); + tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } ConnectionEvent::DialUpgradeError(DialUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)), + error: StreamUpgradeError::Apply(e), .. }) => void::unreachable(e), ConnectionEvent::DialUpgradeError(DialUpgradeError { - error: - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), + error: StreamUpgradeError::NegotiationFailed, .. }) => { // The protocol is not supported - log::debug!( + tracing::debug!( "The remote peer does not support gossipsub on this connection" ); *self = Handler::Disabled(DisabledHandler::ProtocolUnsupported { @@ -551,15 +521,12 @@ impl ConnectionHandler for Handler { }); } ConnectionEvent::DialUpgradeError(DialUpgradeError { - error: - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::ProtocolError(e), - )), + error: StreamUpgradeError::Io(e), .. }) => { - log::debug!("Protocol negotiation failed: {e}") + tracing::debug!("Protocol negotiation failed: {e}") } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + _ => {} } } Handler::Disabled(_) => {} diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index fea17b67f6f..15db5eba21d 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -48,7 +48,7 @@ //! encoded) by setting the `hash_topics` configuration parameter to true. //! //! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! [`libp2p_core::PeerId`] and a nonce (sequence number) of the message. The sequence numbers in +//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in //! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned //! integers. When messages are signed, they are monotonically increasing integers starting from a //! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random. @@ -83,58 +83,13 @@ //! //! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to //! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of -//! [`libp2p_core::PeerId`] and [`Config`]. +//! [`PeerId`](libp2p_identity::PeerId) and [`Config`]. //! //! [`Behaviour`]: struct.Behaviour.html //! ## Example //! -//! An example of initialising a gossipsub compatible swarm: -//! -//! ``` -//! use libp2p_gossipsub::Event; -//! use libp2p_core::{identity::Keypair,transport::{Transport, MemoryTransport}, Multiaddr}; -//! use libp2p_gossipsub::MessageAuthenticity; -//! let local_key = Keypair::generate_ed25519(); -//! let local_peer_id = libp2p_core::PeerId::from(local_key.public()); -//! -//! // Set up an encrypted TCP Transport over the Mplex -//! // This is test transport (memory). -//! let transport = MemoryTransport::default() -//! .upgrade(libp2p_core::upgrade::Version::V1) -//! .authenticate(libp2p_noise::NoiseAuthenticated::xx(&local_key).unwrap()) -//! .multiplex(libp2p_mplex::MplexConfig::new()) -//! .boxed(); -//! -//! // Create a Gossipsub topic -//! let topic = libp2p_gossipsub::IdentTopic::new("example"); -//! -//! // Set the message authenticity - How we expect to publish messages -//! // Here we expect the publisher to sign the message with their key. -//! let message_authenticity = MessageAuthenticity::Signed(local_key); -//! -//! // Create a Swarm to manage peers and events -//! let mut swarm = { -//! // set default parameters for gossipsub -//! let gossipsub_config = libp2p_gossipsub::Config::default(); -//! // build a gossipsub network behaviour -//! let mut gossipsub: libp2p_gossipsub::Behaviour = -//! libp2p_gossipsub::Behaviour::new(message_authenticity, gossipsub_config).unwrap(); -//! // subscribe to the topic -//! gossipsub.subscribe(&topic); -//! // create the swarm (use an executor in a real example) -//! libp2p_swarm::Swarm::without_executor( -//! transport, -//! gossipsub, -//! local_peer_id, -//! ) -//! }; -//! -//! // Listen on a memory transport. -//! let memory: Multiaddr = libp2p_core::multiaddr::Protocol::Memory(10).into(); -//! let addr = swarm.listen_on(memory).unwrap(); -//! println!("Listening on {:?}", addr); -//! ``` +//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat). #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -157,7 +112,7 @@ mod types; pub use self::behaviour::{Behaviour, Event, MessageAuthenticity}; pub use self::config::{Config, ConfigBuilder, ValidationMode, Version}; -pub use self::error::{PublishError, SubscriptionError, ValidationError}; +pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}; pub use self::metrics::Config as MetricsConfig; pub use self::peer_score::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, @@ -170,7 +125,10 @@ pub use self::subscription_filter::{ }; pub use self::topic::{Hasher, Topic, TopicHash}; pub use self::transform::{DataTransform, IdentityTransform}; -pub use self::types::{FastMessageId, Message, MessageAcceptance, MessageId, RawMessage, Rpc}; +pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage}; + +#[deprecated(note = "Will be removed from the public API.")] +pub type Rpc = self::types::Rpc; pub type IdentTopic = Topic; pub type Sha256Topic = Topic; diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index e85a5bf9c6a..ef4a93bc936 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -21,7 +21,6 @@ use crate::topic::TopicHash; use crate::types::{MessageId, RawMessage}; use libp2p_identity::PeerId; -use log::{debug, trace}; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::{ @@ -87,7 +86,7 @@ impl MessageCache { entry.insert((msg, HashSet::default())); self.history[0].push(cache_entry); - trace!("Put message {:?} in mcache", message_id); + tracing::trace!(message=?message_id, "Put message in mcache"); true } } @@ -191,13 +190,13 @@ impl MessageCache { // If GossipsubConfig::validate_messages is true, the implementing // application has to ensure that Gossipsub::validate_message gets called for // each received message within the cache timeout time." - debug!( - "The message with id {} got removed from the cache without being validated.", - &entry.mid + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." ); } } - trace!("Remove message from the cache: {}", &entry.mid); + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); self.iwant_counts.remove(&entry.mid); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 5d3f387a478..b1ea9bfae95 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -24,12 +24,11 @@ use crate::metrics::{Metrics, Penalty}; use crate::time_cache::TimeCache; use crate::{MessageId, TopicHash}; +use instant::Instant; use libp2p_identity::PeerId; -use log::{debug, trace, warn}; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; -use wasm_timer::Instant; mod params; use crate::ValidationError; @@ -221,11 +220,9 @@ impl PeerScore { /// Returns the score for a peer, logging metrics. This is called from the heartbeat and /// increments the metric counts for penalties. pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 { - let peer_stats = match self.peer_stats.get(peer_id) { - Some(v) => v, - None => return 0.0, + let Some(peer_stats) = self.peer_stats.get(peer_id) else { + return 0.0; }; - let mut score = 0.0; // topic scores @@ -274,13 +271,12 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::MessageDeficit); } - debug!( - "[Penalty] The peer {} has a mesh message deliveries deficit of {} in topic\ - {} and will get penalized by {}", - peer_id, - deficit, - topic, - p3 * topic_params.mesh_message_deliveries_weight + tracing::debug!( + peer=%peer_id, + %topic, + %deficit, + penalty=%topic_score, + "[Penalty] The peer has a mesh deliveries deficit and will be penalized" ); } @@ -326,10 +322,11 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::IPColocation); } - debug!( - "[Penalty] The peer {} gets penalized because of too many peers with the ip {}. \ - The surplus is {}. ", - peer_id, ip, surplus + tracing::debug!( + peer=%peer_id, + surplus_ip=%ip, + surplus=%surplus, + "[Penalty] The peer gets penalized because of too many peers with the same ip" ); score += p6 * self.params.ip_colocation_factor_weight; } @@ -347,9 +344,10 @@ impl PeerScore { pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - debug!( - "[Penalty] Behavioral penalty for peer {}, count = {}.", - peer_id, count + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" ); peer_stats.behaviour_penalty += count as f64; } @@ -445,7 +443,7 @@ impl PeerScore { /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { - trace!("Add ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); let peer_stats = self.peer_stats.entry(*peer_id).or_default(); // Mark the peer as connected (currently the default is connected, but we don't want to @@ -454,10 +452,7 @@ impl PeerScore { // Insert the ip peer_stats.known_ips.insert(ip); - self.peer_ips - .entry(ip) - .or_insert_with(HashSet::new) - .insert(*peer_id); + self.peer_ips.entry(ip).or_default().insert(*peer_id); } /// Removes an ip from a peer @@ -465,20 +460,20 @@ impl PeerScore { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { peer_stats.known_ips.remove(ip); if let Some(peer_ids) = self.peer_ips.get_mut(ip) { - trace!("Remove ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); peer_ids.remove(peer_id); } else { - trace!( - "No entry in peer_ips for ip {} which should get removed for peer {}", - ip, - peer_id + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" ); } } else { - trace!( - "No peer_stats for peer {} which should remove the ip {}", - peer_id, - ip + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" ); } } @@ -570,9 +565,7 @@ impl PeerScore { topic_hash: &TopicHash, ) { // adds an empty record with the message id - self.deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + self.deliveries.entry(msg_id.clone()).or_default(); if let Some(callback) = self.message_delivery_time_callback { if self @@ -595,14 +588,16 @@ impl PeerScore { ) { self.mark_first_message_delivery(from, topic_hash); - let record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); // this should be the first delivery trace if record.status != DeliveryStatus::Unknown { - warn!("Unexpected delivery trace: Message from {} was first seen {}s ago and has a delivery status {:?}", from, record.first_seen.elapsed().as_secs(), record.status); + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); return; } @@ -619,9 +614,9 @@ impl PeerScore { /// Similar to `reject_message` except does not require the message id or reason for an invalid message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { - debug!( - "[Penalty] Message from {} rejected because of ValidationError or SelfOrigin", - from + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" ); self.mark_invalid_message_delivery(from, topic_hash); @@ -649,10 +644,7 @@ impl PeerScore { } let peers: Vec<_> = { - let mut record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); // Multiple peers can now reject the same message as we track which peers send us the // message. If we have already updated the status, return. @@ -686,10 +678,7 @@ impl PeerScore { msg_id: &MessageId, topic_hash: &TopicHash, ) { - let record = self - .deliveries - .entry(msg_id.clone()) - .or_insert_with(DeliveryRecord::default); + let record = self.deliveries.entry(msg_id.clone()).or_default(); if record.peers.get(from).is_some() { // we have already seen this duplicate! @@ -780,6 +769,11 @@ impl PeerScore { } } + /// Returns a scoring parameters for a topic if existent. + pub(crate) fn get_topic_params(&self, topic_hash: &TopicHash) -> Option<&TopicScoreParams> { + self.params.topics.get(topic_hash) + } + /// Increments the "invalid message deliveries" counter for all scored topics the message /// is published in. fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { @@ -787,10 +781,11 @@ impl PeerScore { if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { - debug!( - "[Penalty] Peer {} delivered an invalid message in topic {} and gets penalized \ + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ for it", - peer_id, topic_hash ); topic_stats.invalid_message_deliveries += 1f64; } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 48c1e4b2d15..e9600a4d8d8 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -34,10 +34,8 @@ use futures::prelude::*; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; -use log::{debug, warn}; use quick_protobuf::Writer; use std::pin::Pin; -use unsigned_varint::codec; use void::Void; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; @@ -109,12 +107,10 @@ where type Future = Pin> + Send>>; fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); Box::pin(future::ok(( Framed::new( socket, - GossipsubCodec::new(length_codec, self.validation_mode), + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), ), protocol_id.kind, ))) @@ -130,12 +126,10 @@ where type Future = Pin> + Send>>; fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); Box::pin(future::ok(( Framed::new( socket, - GossipsubCodec::new(length_codec, self.validation_mode), + GossipsubCodec::new(self.max_transmit_size, self.validation_mode), ), protocol_id.kind, ))) @@ -152,8 +146,8 @@ pub struct GossipsubCodec { } impl GossipsubCodec { - pub fn new(length_codec: codec::UviBytes, validation_mode: ValidationMode) -> GossipsubCodec { - let codec = quick_protobuf_codec::Codec::new(length_codec.max_len()); + pub fn new(max_length: usize, validation_mode: ValidationMode) -> GossipsubCodec { + let codec = quick_protobuf_codec::Codec::new(max_length); GossipsubCodec { validation_mode, codec, @@ -166,28 +160,19 @@ impl GossipsubCodec { fn verify_signature(message: &proto::Message) -> bool { use quick_protobuf::MessageWrite; - let from = match message.from.as_ref() { - Some(v) => v, - None => { - debug!("Signature verification failed: No source id given"); - return false; - } + let Some(from) = message.from.as_ref() else { + tracing::debug!("Signature verification failed: No source id given"); + return false; }; - let source = match PeerId::from_bytes(from) { - Ok(v) => v, - Err(_) => { - debug!("Signature verification failed: Invalid Peer Id"); - return false; - } + let Ok(source) = PeerId::from_bytes(from) else { + tracing::debug!("Signature verification failed: Invalid Peer Id"); + return false; }; - let signature = match message.signature.as_ref() { - Some(v) => v, - None => { - debug!("Signature verification failed: No signature provided"); - return false; - } + let Some(signature) = message.signature.as_ref() else { + tracing::debug!("Signature verification failed: No signature provided"); + return false; }; // If there is a key value in the protobuf, use that key otherwise the key must be @@ -197,7 +182,7 @@ impl GossipsubCodec { _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { - warn!("Signature verification failed: No valid public key supplied"); + tracing::warn!("Signature verification failed: No valid public key supplied"); return false; } }, @@ -205,7 +190,9 @@ impl GossipsubCodec { // The key must match the peer_id if source != public_key.to_peer_id() { - warn!("Signature verification failed: Public key doesn't match source peer id"); + tracing::warn!( + "Signature verification failed: Public key doesn't match source peer id" + ); return false; } @@ -225,10 +212,10 @@ impl GossipsubCodec { } impl Encoder for GossipsubCodec { - type Item = proto::RPC; + type Item<'a> = proto::RPC; type Error = quick_protobuf_codec::Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { self.codec.encode(item, dst) } } @@ -238,11 +225,9 @@ impl Decoder for GossipsubCodec { type Error = quick_protobuf_codec::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let rpc = match self.codec.decode(src)? { - Some(p) => p, - None => return Ok(None), + let Some(rpc) = self.codec.decode(src)? else { + return Ok(None); }; - // Store valid messages. let mut messages = Vec::with_capacity(rpc.publish.len()); // Store any invalid messages. @@ -276,13 +261,17 @@ impl Decoder for GossipsubCodec { } ValidationMode::Anonymous => { if message.signature.is_some() { - warn!("Signature field was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SignaturePresent); } else if message.seqno.is_some() { - warn!("Sequence number was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SequenceNumberPresent); } else if message.from.is_some() { - warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); invalid_kind = Some(ValidationError::MessageSourcePresent); } } @@ -308,7 +297,7 @@ impl Decoder for GossipsubCodec { // verify message signatures if required if verify_signature && !GossipsubCodec::verify_signature(&message) { - warn!("Invalid signature for received message"); + tracing::warn!("Invalid signature for received message"); // Build the invalid message (ignoring further validation of sequence number // and source) @@ -332,10 +321,10 @@ impl Decoder for GossipsubCodec { if seq_no.is_empty() { None } else if seq_no.len() != 8 { - debug!( - "Invalid sequence number length for received message. SeqNo: {:?} Size: {}", - seq_no, - seq_no.len() + tracing::debug!( + sequence_number=?seq_no, + sequence_length=%seq_no.len(), + "Invalid sequence number length for received message" ); let message = RawMessage { source: None, // don't bother inform the application @@ -355,7 +344,7 @@ impl Decoder for GossipsubCodec { } } else { // sequence number was not present - debug!("Sequence number not present but expected"); + tracing::debug!("Sequence number not present but expected"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -381,7 +370,7 @@ impl Decoder for GossipsubCodec { Ok(peer_id) => Some(peer_id), // valid peer id Err(_) => { // invalid peer id, add to invalid messages - debug!("Message source has an invalid PeerId"); + tracing::debug!("Message source has an invalid PeerId"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -515,7 +504,7 @@ mod tests { use crate::config::Config; use crate::{Behaviour, ConfigBuilder}; use crate::{IdentTopic as Topic, Version}; - use libp2p_core::identity::Keypair; + use libp2p_identity::Keypair; use quickcheck::*; #[derive(Clone, Debug)] @@ -588,12 +577,12 @@ mod tests { let message = message.0; let rpc = Rpc { - messages: vec![message], + messages: vec![message.clone()], subscriptions: vec![], control_msgs: vec![], }; - let mut codec = GossipsubCodec::new(codec::UviBytes::default(), ValidationMode::Strict); + let mut codec = GossipsubCodec::new(u32::MAX as usize, ValidationMode::Strict); let mut buf = BytesMut::new(); codec.encode(rpc.into_protobuf(), &mut buf).unwrap(); let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap(); @@ -602,7 +591,7 @@ mod tests { HandlerEvent::Message { mut rpc, .. } => { rpc.messages[0].validated = true; - assert_eq!(rpc, rpc); + assert_eq!(vec![message], rpc.messages); } _ => panic!("Must decode a message"), } diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 8ec633d0717..09c323d7904 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -20,7 +20,6 @@ use crate::types::Subscription; use crate::TopicHash; -use log::debug; use std::collections::{BTreeSet, HashMap, HashSet}; pub trait TopicSubscriptionFilter { @@ -66,7 +65,7 @@ pub trait TopicSubscriptionFilter { if self.allow_incoming_subscription(s) { true } else { - debug!("Filtered incoming subscription {:?}", s); + tracing::debug!(subscription=?s, "Filtered incoming subscription"); false } }); @@ -222,7 +221,7 @@ mod test { let t1 = TopicHash::from_raw("t1"); let t2 = TopicHash::from_raw("t2"); - let old = BTreeSet::from_iter(vec![t1.clone()].into_iter()); + let old = BTreeSet::from_iter(vec![t1.clone()]); let subscriptions = vec![ Subscription { action: Unsubscribe, diff --git a/protocols/gossipsub/src/time_cache.rs b/protocols/gossipsub/src/time_cache.rs index 46de2642fc4..89fd4afee09 100644 --- a/protocols/gossipsub/src/time_cache.rs +++ b/protocols/gossipsub/src/time_cache.rs @@ -21,13 +21,13 @@ //! This implements a time-based LRU cache for checking gossipsub message duplicates. use fnv::FnvHashMap; +use instant::Instant; use std::collections::hash_map::{ self, Entry::{Occupied, Vacant}, }; use std::collections::VecDeque; use std::time::Duration; -use wasm_timer::Instant; struct ExpiringElement { /// The element that expires @@ -93,10 +93,13 @@ impl<'a, K: 'a, V: 'a> Entry<'a, K, V> where K: Eq + std::hash::Hash + Clone, { - pub(crate) fn or_insert_with V>(self, default: F) -> &'a mut V { + pub(crate) fn or_default(self) -> &'a mut V + where + V: Default, + { match self { Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default()), + Entry::Vacant(entry) => entry.insert(V::default()), } } } @@ -150,10 +153,6 @@ where pub(crate) fn contains_key(&self, key: &Key) -> bool { self.map.contains_key(key) } - - pub(crate) fn get(&self, key: &Key) -> Option<&Value> { - self.map.get(key).map(|e| &e.element) - } } pub(crate) struct DuplicateCache(TimeCache); diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 9c9cd3f97f1..d1b92ff0ba8 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -43,49 +43,33 @@ pub enum MessageAcceptance { Ignore, } -/// Macro for declaring message id types -macro_rules! declare_message_id_type { - ($name: ident, $name_string: expr) => { - #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] - #[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] - pub struct $name(pub Vec); - - impl $name { - pub fn new(value: &[u8]) -> Self { - Self(value.to_vec()) - } - } +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct MessageId(pub Vec); - impl>> From for $name { - fn from(value: T) -> Self { - Self(value.into()) - } - } - - impl std::fmt::Display for $name { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex_fmt::HexFmt(&self.0)) - } - } +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} - impl std::fmt::Debug for $name { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}({})", $name_string, hex_fmt::HexFmt(&self.0)) - } - } - }; +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } } -// A type for gossipsub message ids. -declare_message_id_type!(MessageId, "MessageId"); +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} -// A type for gossipsub fast messsage ids, not to confuse with "real" message ids. -// -// A fast-message-id is an optional message_id that can be used to filter duplicates quickly. On -// high intensive networks with lots of messages, where the message_id is based on the result of -// decompressed traffic, it is beneficial to specify a `fast-message-id` that can identify and -// filter duplicates quickly without performing the overhead of decompression. -declare_message_id_type!(FastMessageId, "FastMessageId"); +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct PeerConnections { @@ -148,6 +132,19 @@ impl RawMessage { } } +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + /// The message sent to the user after a [`RawMessage`] has been transformed by a /// [`crate::DataTransform`]. #[derive(Clone, PartialEq, Eq, Hash)] @@ -236,6 +233,130 @@ pub enum ControlAction { }, } +/// A Gossipsub RPC message sent. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum RpcOut { + /// Publish a Gossipsub message on network. + Publish(RawMessage), + /// Forward a Gossipsub message to the network. + Forward(RawMessage), + /// Subscribe a topic. + Subscribe(TopicHash), + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// List of Gossipsub control messages. + Control(ControlAction), +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish(message) => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + }, + RpcOut::Forward(message) => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + }, + RpcOut::Subscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Control(ControlAction::IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + }), + } + } + } + } +} + /// An RPC received/sent. #[derive(Clone, PartialEq, Eq, Hash)] pub struct Rpc { @@ -330,7 +451,7 @@ impl From for proto::RPC { .into_iter() .map(|info| proto::PeerInfo { peer_id: info.peer_id.map(|id| id.to_bytes()), - /// TODO, see https://github.com/libp2p/specs/pull/217 + // TODO, see https://github.com/libp2p/specs/pull/217 signed_peer_record: None, }) .collect(), diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index e4e4c90d768..c8876428b4e 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -25,11 +25,10 @@ use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; -use log::debug; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; use std::{task::Poll, time::Duration}; - +use tracing_subscriber::EnvFilter; struct Graph { nodes: SelectAll>, } @@ -122,21 +121,23 @@ async fn build_node() -> Swarm { .unwrap(); gossipsub::Behaviour::new(MessageAuthenticity::Author(peer_id), config).unwrap() }); - swarm.listen().await; + swarm.listen().with_memory_addr_external().await; swarm } #[test] fn multi_hop_propagation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } - debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); + tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); async_std::task::block_on(async move { let mut graph = Graph::new_connected(num_nodes as usize, seed).await; diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 69f26e447b8..22c74b28cae 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,4 +1,34 @@ -## 0.43.0 - unreleased +## 0.44.1 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). + +## 0.44.0 + +- Add `Info` to the `libp2p-identify::Event::Pushed` to report pushed info. + See [PR 4527](https://github.com/libp2p/rust-libp2p/pull/4527) +- Remove deprecated `initial_delay`. + Identify requests are always sent instantly after the connection has been established. + See [PR 4735](https://github.com/libp2p/rust-libp2p/pull/4735) +- Don't repeatedly report the same observed address as a `NewExternalAddrCandidate`. + Instead, only report each observed address once per connection. + This allows users to probabilistically deem an address as external if it gets reported as a candidate repeatedly. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). + +## 0.43.1 + +- Handle partial push messages. + Previously, push messages with partial information were ignored. + See [PR 4495]. + +[PR 4495]: https://github.com/libp2p/rust-libp2p/pull/4495 + +## 0.43.0 + +- Observed addresses (aka. external address candidates) of the local node, reported by a remote node via `libp2p-identify`, are no longer automatically considered confirmed external addresses, in other words they are no longer trusted by default. + Instead users need to confirm the reported observed address either manually, or by using `libp2p-autonat`. + In trusted environments users can simply extract observed addresses from a `libp2p-identify::Event::Received { info: libp2p_identify::Info { observed_addr }}` and confirm them via `Swarm::add_external_address`. + See [PR 3954] and [PR 4052]. - Remove deprecated `Identify` prefixed symbols. See [PR 3698]. - Raise MSRV to 1.65. @@ -7,9 +37,19 @@ - Reduce the initial delay before running the identify protocol to 0 and make the option deprecated. See [PR 3545]. +- Fix aborting the answering of an identify request in rare situations. + See [PR 3876]. + +- Actively push changes in listen protocols to remote. + See [PR 3980]. + +[PR 3545]: https://github.com/libp2p/rust-libp2p/pull/3545 [PR 3698]: https://github.com/libp2p/rust-libp2p/pull/3698 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 -[PR 3545]: https://github.com/libp2p/rust-libp2p/pull/3545 +[PR 3876]: https://github.com/libp2p/rust-libp2p/pull/3876 +[PR 3954]: https://github.com/libp2p/rust-libp2p/pull/3954 +[PR 3980]: https://github.com/libp2p/rust-libp2p/pull/3980 +[PR 4052]: https://github.com/libp2p/rust-libp2p/pull/4052 ## 0.42.2 diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 8f94ebc40b6..cd5a26d79ac 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identify" edition = "2021" rust-version = { workspace = true } description = "Nodes identifcation protocol for libp2p" -version = "0.43.0" +version = "0.44.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,29 +11,27 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" -futures = "0.3.28" +asynchronous-codec = { workspace = true } +futures = "0.3.30" futures-timer = "3.0.2" +futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.1" -lru = "0.10.0" +lru = "0.12.1" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" -smallvec = "1.6.1" +smallvec = "1.11.2" thiserror = "1.0" +tracing = "0.1.37" void = "1.0" -either = "1.8.0" +either = "1.9.0" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" -libp2p-mplex = { workspace = true } -libp2p-yamux = { workspace = true } -libp2p-noise = { workspace = true } -libp2p-swarm = { workspace = true, features = ["async-std"] } -libp2p-tcp = { workspace = true, features = ["async-io"] } +libp2p-swarm-test = { path = "../../swarm-test" } +libp2p-swarm = { workspace = true, features = ["macros"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -41,3 +39,6 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 2f410fec0df..e4da898f44c 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -19,18 +19,18 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{self, Handler, InEvent}; -use crate::protocol::{Info, Protocol, UpgradeError}; +use crate::protocol::{Info, UpgradeError}; use libp2p_core::{multiaddr, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p_swarm::{ - AddressScore, ConnectionDenied, ConnectionHandlerUpgrErr, DialError, ExternalAddresses, - ListenAddresses, NetworkBehaviour, NotifyHandler, PollParameters, StreamProtocol, - THandlerInEvent, ToSwarm, + ConnectionDenied, DialError, ExternalAddresses, ListenAddresses, NetworkBehaviour, + NotifyHandler, StreamUpgradeError, THandlerInEvent, ToSwarm, }; use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; use lru::LruCache; +use std::collections::hash_map::Entry; use std::num::NonZeroUsize; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -44,16 +44,15 @@ use std::{ /// about them, and answers identify queries from other nodes. /// /// All external addresses of the local node supposedly observed by remotes -/// are reported via [`ToSwarm::ReportObservedAddr`] with a -/// [score](AddressScore) of `1`. +/// are reported via [`ToSwarm::NewExternalAddrCandidate`]. pub struct Behaviour { config: Config, /// For each peer we're connected to, the observed address to send back to it. connected: HashMap>, - /// Pending requests to be fulfilled, either `Handler` requests for `Behaviour` info - /// to address identification requests, or push requests to peers - /// with current information about the local peer. - requests: Vec, + + /// The address a remote observed for us. + our_observed_addresses: HashMap, + /// Pending events to be emitted when polled. events: VecDeque>, /// The addresses of all peers that we have discovered. @@ -63,15 +62,6 @@ pub struct Behaviour { external_addresses: ExternalAddresses, } -/// A `Behaviour` request to be fulfilled, either `Handler` requests for `Behaviour` info -/// to address identification requests, or push requests to peers -/// with current information about the local peer. -#[derive(Debug, PartialEq, Eq)] -struct Request { - peer_id: PeerId, - protocol: Protocol, -} - /// Configuration for the [`identify::Behaviour`](Behaviour). #[non_exhaustive] #[derive(Debug, Clone)] @@ -86,14 +76,6 @@ pub struct Config { /// /// Defaults to `rust-libp2p/`. pub agent_version: String, - /// The initial delay before the first identification request - /// is sent to a remote on a newly established connection. - /// - /// Defaults to 0ms. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request")] - pub initial_delay: Duration, /// The interval at which identification requests are sent to /// the remote on established connections after the first request, /// i.e. the delay between identification requests. @@ -121,13 +103,11 @@ pub struct Config { impl Config { /// Creates a new configuration for the identify [`Behaviour`] that /// advertises the given protocol version and public key. - #[allow(deprecated)] pub fn new(protocol_version: String, local_public_key: PublicKey) -> Self { Self { protocol_version, agent_version: format!("rust-libp2p/{}", env!("CARGO_PKG_VERSION")), local_public_key, - initial_delay: Duration::from_millis(0), interval: Duration::from_secs(5 * 60), push_listen_addr_updates: false, cache_size: 100, @@ -140,17 +120,6 @@ impl Config { self } - /// Configures the initial delay before the first identification - /// request is sent on a newly established connection to a peer. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request thus also this setter will be removed")] - #[allow(deprecated)] - pub fn with_initial_delay(mut self, d: Duration) -> Self { - self.initial_delay = d; - self - } - /// Configures the interval at which identification requests are /// sent to peers after the initial request. pub fn with_interval(mut self, d: Duration) -> Self { @@ -167,9 +136,6 @@ impl Config { } /// Configures the size of the LRU cache, caching addresses of discovered peers. - /// - /// The [`Swarm`](libp2p_swarm::Swarm) may extend the set of addresses of an outgoing connection attempt via - /// [`Behaviour::addresses_of_peer`]. pub fn with_cache_size(mut self, cache_size: usize) -> Self { self.cache_size = cache_size; self @@ -187,7 +153,7 @@ impl Behaviour { Self { config, connected: HashMap::new(), - requests: Vec::new(), + our_observed_addresses: Default::default(), events: VecDeque::new(), discovered_peers, listen_addresses: Default::default(), @@ -202,17 +168,15 @@ impl Behaviour { { for p in peers { if !self.connected.contains_key(&p) { - log::debug!("Not pushing to {p} because we are not connected"); + tracing::debug!(peer=%p, "Not pushing to peer because we are not connected"); continue; } - let request = Request { + self.events.push_back(ToSwarm::NotifyHandler { peer_id: p, - protocol: Protocol::Push, - }; - if !self.requests.contains(&request) { - self.requests.push(request); - } + handler: NotifyHandler::Any, + event: InEvent::Push, + }); } } @@ -242,13 +206,20 @@ impl Behaviour { } } } + + fn all_addresses(&self) -> HashSet { + self.listen_addresses + .iter() + .chain(self.external_addresses.iter()) + .cloned() + .collect() + } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; - #[allow(deprecated)] fn handle_established_inbound_connection( &mut self, _: ConnectionId, @@ -257,17 +228,16 @@ impl NetworkBehaviour for Behaviour { remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), self.config.protocol_version.clone(), self.config.agent_version.clone(), remote_addr.clone(), + self.all_addresses(), )) } - #[allow(deprecated)] fn handle_established_outbound_connection( &mut self, _: ConnectionId, @@ -276,20 +246,20 @@ impl NetworkBehaviour for Behaviour { _: Endpoint, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), self.config.protocol_version.clone(), self.config.agent_version.clone(), addr.clone(), // TODO: This is weird? That is the public address we dialed, shouldn't need to tell the other party? + self.all_addresses(), )) } fn on_connection_handler_event( &mut self, peer_id: PeerId, - connection_id: ConnectionId, + id: ConnectionId, event: THandlerOutEvent, ) { match event { @@ -305,24 +275,36 @@ impl NetworkBehaviour for Behaviour { let observed = info.observed_addr.clone(); self.events .push_back(ToSwarm::GenerateEvent(Event::Received { peer_id, info })); - self.events.push_back(ToSwarm::ReportObservedAddr { - address: observed, - score: AddressScore::Finite(1), - }); + + match self.our_observed_addresses.entry(id) { + Entry::Vacant(not_yet_observed) => { + not_yet_observed.insert(observed.clone()); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + Entry::Occupied(already_observed) if already_observed.get() == &observed => { + // No-op, we already observed this address. + } + Entry::Occupied(mut already_observed) => { + tracing::info!( + old_address=%already_observed.get(), + new_address=%observed, + "Our observed address on connection {id} changed", + ); + + *already_observed.get_mut() = observed.clone(); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + } } - handler::Event::Identification(peer) => { + handler::Event::Identification => { self.events - .push_back(ToSwarm::GenerateEvent(Event::Sent { peer_id: peer })); + .push_back(ToSwarm::GenerateEvent(Event::Sent { peer_id })); } - handler::Event::IdentificationPushed => { + handler::Event::IdentificationPushed(info) => { self.events - .push_back(ToSwarm::GenerateEvent(Event::Pushed { peer_id })); - } - handler::Event::Identify => { - self.requests.push(Request { - peer_id, - protocol: Protocol::Identify(connection_id), - }); + .push_back(ToSwarm::GenerateEvent(Event::Pushed { peer_id, info })); } handler::Event::IdentificationError(error) => { self.events @@ -331,53 +313,13 @@ impl NetworkBehaviour for Behaviour { } } - fn poll( - &mut self, - _cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); } - // Check for pending requests. - match self.requests.pop() { - Some(Request { - peer_id, - protocol: Protocol::Push, - }) => Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: InEvent { - listen_addrs: self - .listen_addresses - .iter() - .chain(self.external_addresses.iter()) - .cloned() - .collect(), - supported_protocols: supported_protocols(params), - protocol: Protocol::Push, - }, - }), - Some(Request { - peer_id, - protocol: Protocol::Identify(connection_id), - }) => Poll::Ready(ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection_id), - event: InEvent { - listen_addrs: self - .listen_addresses - .iter() - .chain(self.external_addresses.iter()) - .cloned() - .collect(), - supported_protocols: supported_protocols(params), - protocol: Protocol::Identify(connection_id), - }, - }), - None => Poll::Pending, - } + Poll::Pending } fn handle_pending_outbound_connection( @@ -395,9 +337,36 @@ impl NetworkBehaviour for Behaviour { Ok(self.discovered_peers.get(&peer)) } - fn on_swarm_event(&mut self, event: FromSwarm) { - self.listen_addresses.on_swarm_event(&event); - self.external_addresses.on_swarm_event(&event); + fn on_swarm_event(&mut self, event: FromSwarm) { + let listen_addr_changed = self.listen_addresses.on_swarm_event(&event); + let external_addr_changed = self.external_addresses.on_swarm_event(&event); + + if listen_addr_changed || external_addr_changed { + // notify all connected handlers about our changed addresses + let change_events = self + .connected + .iter() + .flat_map(|(peer, map)| map.keys().map(|id| (*peer, id))) + .map(|(peer_id, connection_id)| ToSwarm::NotifyHandler { + peer_id, + handler: NotifyHandler::One(*connection_id), + event: InEvent::AddressesChanged(self.all_addresses()), + }) + .collect::>(); + + self.events.extend(change_events) + } + + if listen_addr_changed && self.config.push_listen_addr_updates { + // trigger an identify push for all connected peers + let push_events = self.connected.keys().map(|peer| ToSwarm::NotifyHandler { + peer_id: *peer, + handler: NotifyHandler::Any, + event: InEvent::Push, + }); + + self.events.extend(push_events); + } match event { FromSwarm::ConnectionEstablished(connection_established) => { @@ -411,30 +380,13 @@ impl NetworkBehaviour for Behaviour { }) => { if remaining_established == 0 { self.connected.remove(&peer_id); - self.requests.retain(|request| { - request - != &Request { - peer_id, - protocol: Protocol::Push, - } - }); } else if let Some(addrs) = self.connected.get_mut(&peer_id) { addrs.remove(&connection_id); } + + self.our_observed_addresses.remove(&connection_id); } FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { - if let Some(peer_id) = peer_id { - if !self.connected.contains_key(&peer_id) { - self.requests.retain(|request| { - request - != &Request { - peer_id, - protocol: Protocol::Push, - } - }); - } - } - if let Some(entry) = peer_id.and_then(|id| self.discovered_peers.get_mut(&id)) { if let DialError::Transport(errors) = error { for (addr, _error) in errors { @@ -443,26 +395,7 @@ impl NetworkBehaviour for Behaviour { } } } - FromSwarm::NewListenAddr(_) | FromSwarm::ExpiredListenAddr(_) => { - if self.config.push_listen_addr_updates { - for p in self.connected.keys() { - let request = Request { - peer_id: *p, - protocol: Protocol::Push, - }; - if !self.requests.contains(&request) { - self.requests.push(request); - } - } - } - } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } } @@ -489,33 +422,25 @@ pub enum Event { Pushed { /// The peer that the information has been sent to. peer_id: PeerId, + /// The full Info struct we pushed to the remote peer. Clients must + /// do some diff'ing to know what has changed since the last push. + info: Info, }, /// Error while attempting to identify the remote. Error { /// The peer with whom the error originated. peer_id: PeerId, /// The error that occurred. - error: ConnectionHandlerUpgrErr, + error: StreamUpgradeError, }, } -fn supported_protocols(params: &impl PollParameters) -> Vec { - // The protocol names can be bytes, but the identify protocol except UTF-8 strings. - // There's not much we can do to solve this conflict except strip non-UTF-8 characters. - params - .supported_protocols() - .filter_map(|p| { - StreamProtocol::try_from_owned(String::from_utf8_lossy(&p).to_string()).ok() - }) - .collect() -} - /// If there is a given peer_id in the multiaddr, make sure it is the same as /// the given peer_id. If there is no peer_id for the peer in the mutiaddr, this returns true. fn multiaddr_matches_peer_id(addr: &Multiaddr, peer_id: &PeerId) -> bool { let last_component = addr.iter().last(); if let Some(multiaddr::Protocol::P2p(multi_addr_peer_id)) = last_component { - return multi_addr_peer_id == *peer_id.as_ref(); + return multi_addr_peer_id == *peer_id; } true } @@ -541,6 +466,7 @@ impl PeerCache { Some(cache) => cache, }; + let addresses = addresses.filter_map(|a| a.with_p2p(peer).ok()); cache.put(peer, HashSet::from_iter(addresses)); } @@ -561,279 +487,6 @@ impl PeerCache { #[cfg(test)] mod tests { use super::*; - use futures::pin_mut; - use futures::prelude::*; - use libp2p_core::{muxing::StreamMuxerBox, transport, upgrade, Transport}; - use libp2p_identity as identity; - use libp2p_identity::PeerId; - use libp2p_mplex::MplexConfig; - use libp2p_noise as noise; - use libp2p_swarm::{Swarm, SwarmBuilder, SwarmEvent}; - use libp2p_tcp as tcp; - use std::time::Duration; - - fn transport() -> (PublicKey, transport::Boxed<(PeerId, StreamMuxerBox)>) { - let id_keys = identity::Keypair::generate_ed25519(); - let pubkey = id_keys.public(); - let transport = tcp::async_io::Transport::new(tcp::Config::default().nodelay(true)) - .upgrade(upgrade::Version::V1) - .authenticate(noise::Config::new(&id_keys).unwrap()) - .multiplex(MplexConfig::new()) - .boxed(); - (pubkey, transport) - } - - #[test] - fn periodic_identify() { - let (mut swarm1, pubkey1) = { - let (pubkey, transport) = transport(); - let protocol = Behaviour::new( - Config::new("a".to_string(), pubkey.clone()).with_agent_version("b".to_string()), - ); - let swarm = - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()) - .build(); - (swarm, pubkey) - }; - - let (mut swarm2, pubkey2) = { - let (pubkey, transport) = transport(); - let protocol = Behaviour::new( - Config::new("c".to_string(), pubkey.clone()).with_agent_version("d".to_string()), - ); - let swarm = - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()) - .build(); - (swarm, pubkey) - }; - - swarm1 - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - - let listen_addr = async_std::task::block_on(async { - loop { - let swarm1_fut = swarm1.select_next_some(); - pin_mut!(swarm1_fut); - if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { - return address; - } - } - }); - swarm2.dial(listen_addr).unwrap(); - - // nb. Either swarm may receive the `Identified` event first, upon which - // it will permit the connection to be closed, as defined by - // `Handler::connection_keep_alive`. Hence the test succeeds if - // either `Identified` event arrives correctly. - async_std::task::block_on(async move { - loop { - let swarm1_fut = swarm1.select_next_some(); - pin_mut!(swarm1_fut); - let swarm2_fut = swarm2.select_next_some(); - pin_mut!(swarm2_fut); - - match future::select(swarm1_fut, swarm2_fut) - .await - .factor_second() - .0 - { - future::Either::Left(SwarmEvent::Behaviour(Event::Received { - info, .. - })) => { - assert_eq!(info.public_key, pubkey2); - assert_eq!(info.protocol_version, "c"); - assert_eq!(info.agent_version, "d"); - assert!(!info.protocols.is_empty()); - assert!(info.listen_addrs.is_empty()); - return; - } - future::Either::Right(SwarmEvent::Behaviour(Event::Received { - info, .. - })) => { - assert_eq!(info.public_key, pubkey1); - assert_eq!(info.protocol_version, "a"); - assert_eq!(info.agent_version, "b"); - assert!(!info.protocols.is_empty()); - assert_eq!(info.listen_addrs.len(), 1); - return; - } - _ => {} - } - } - }) - } - - #[test] - fn identify_push() { - let _ = env_logger::try_init(); - - let (mut swarm1, pubkey1) = { - let (pubkey, transport) = transport(); - let protocol = Behaviour::new(Config::new("a".to_string(), pubkey.clone())); - let swarm = - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()) - .build(); - (swarm, pubkey) - }; - - let (mut swarm2, pubkey2) = { - let (pubkey, transport) = transport(); - let protocol = Behaviour::new( - Config::new("a".to_string(), pubkey.clone()).with_agent_version("b".to_string()), - ); - let swarm = - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()) - .build(); - (swarm, pubkey) - }; - - Swarm::listen_on(&mut swarm1, "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); - - let listen_addr = async_std::task::block_on(async { - loop { - let swarm1_fut = swarm1.select_next_some(); - pin_mut!(swarm1_fut); - if let SwarmEvent::NewListenAddr { address, .. } = swarm1_fut.await { - return address; - } - } - }); - - swarm2.dial(listen_addr).unwrap(); - - async_std::task::block_on(async move { - loop { - let swarm1_fut = swarm1.select_next_some(); - let swarm2_fut = swarm2.select_next_some(); - - { - pin_mut!(swarm1_fut); - pin_mut!(swarm2_fut); - match future::select(swarm1_fut, swarm2_fut) - .await - .factor_second() - .0 - { - future::Either::Left(SwarmEvent::Behaviour(Event::Received { - info, - .. - })) => { - assert_eq!(info.public_key, pubkey2); - assert_eq!(info.protocol_version, "a"); - assert_eq!(info.agent_version, "b"); - assert!(!info.protocols.is_empty()); - assert!(info.listen_addrs.is_empty()); - return; - } - future::Either::Right(SwarmEvent::ConnectionEstablished { .. }) => { - // Once a connection is established, we can initiate an - // active push below. - } - _ => continue, - } - } - - swarm2 - .behaviour_mut() - .push(std::iter::once(pubkey1.to_peer_id())); - } - }) - } - - #[test] - fn discover_peer_after_disconnect() { - let _ = env_logger::try_init(); - - let mut swarm1 = { - let (pubkey, transport) = transport(); - #[allow(deprecated)] - let protocol = Behaviour::new( - Config::new("a".to_string(), pubkey.clone()) - // `swarm1` will set `KeepAlive::No` once it identified `swarm2` and thus - // closes the connection. At this point in time `swarm2` might not yet have - // identified `swarm1`. To give `swarm2` enough time, set an initial delay on - // `swarm1`. - .with_initial_delay(Duration::from_secs(10)), - ); - - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()).build() - }; - - let mut swarm2 = { - let (pubkey, transport) = transport(); - let protocol = Behaviour::new( - Config::new("a".to_string(), pubkey.clone()).with_agent_version("b".to_string()), - ); - - SwarmBuilder::with_async_std_executor(transport, protocol, pubkey.to_peer_id()).build() - }; - - let swarm1_peer_id = *swarm1.local_peer_id(); - - let listener = swarm1 - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - - let listen_addr = async_std::task::block_on(async { - loop { - match swarm1.select_next_some().await { - SwarmEvent::NewListenAddr { - address, - listener_id, - } if listener_id == listener => return address, - _ => {} - } - } - }); - - async_std::task::spawn(async move { - loop { - swarm1.next().await; - } - }); - - swarm2.dial(listen_addr).unwrap(); - - // Wait until we identified. - async_std::task::block_on(async { - loop { - if let SwarmEvent::Behaviour(Event::Received { .. }) = - swarm2.select_next_some().await - { - break; - } - } - }); - - swarm2.disconnect_peer_id(swarm1_peer_id).unwrap(); - - // Wait for connection to close. - async_std::task::block_on(async { - loop { - if let SwarmEvent::ConnectionClosed { peer_id, .. } = - swarm2.select_next_some().await - { - break peer_id; - } - } - }); - - // We should still be able to dial now! - swarm2.dial(swarm1_peer_id).unwrap(); - - let connected_peer = async_std::task::block_on(async { - loop { - if let SwarmEvent::ConnectionEstablished { peer_id, .. } = - swarm2.select_next_some().await - { - break peer_id; - } - } - }); - - assert_eq!(connected_peer, swarm1_peer_id); - } #[test] fn check_multiaddr_matches_peer_id() { @@ -846,8 +499,8 @@ mod tests { let addr_without_peer_id: Multiaddr = addr.clone(); let mut addr_with_other_peer_id = addr.clone(); - addr.push(multiaddr::Protocol::P2p(peer_id.into())); - addr_with_other_peer_id.push(multiaddr::Protocol::P2p(other_peer_id.into())); + addr.push(multiaddr::Protocol::P2p(peer_id)); + addr_with_other_peer_id.push(multiaddr::Protocol::P2p(other_peer_id)); assert!(multiaddr_matches_peer_id(&addr, &peer_id)); assert!(!multiaddr_matches_peer_id( diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index f95cef424b9..f9b77e0b63a 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -18,29 +18,31 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::protocol::{ - self, Identify, InboundPush, Info, OutboundPush, Protocol, Push, UpgradeError, -}; +use crate::protocol::{Info, PushInfo, UpgradeError}; +use crate::{protocol, PROTOCOL_NAME, PUSH_PROTOCOL_NAME}; use either::Either; -use futures::future::BoxFuture; use futures::prelude::*; -use futures::stream::FuturesUnordered; +use futures_bounded::Timeout; use futures_timer::Delay; -use libp2p_core::upgrade::SelectUpgrade; +use libp2p_core::upgrade::{ReadyUpgrade, SelectUpgrade}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, + ProtocolSupport, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - NegotiatedSubstream, StreamProtocol, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, SupportedProtocols, }; -use log::warn; use smallvec::SmallVec; -use std::collections::VecDeque; -use std::{io, pin::Pin, task::Context, task::Poll, time::Duration}; +use std::collections::HashSet; +use std::{task::Context, task::Poll, time::Duration}; +use tracing::Level; + +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; /// Protocol handler for sending and receiving identification requests. /// @@ -49,23 +51,22 @@ use std::{io, pin::Pin, task::Context, task::Poll, time::Duration}; /// permitting the underlying connection to be closed. pub struct Handler { remote_peer_id: PeerId, - inbound_identify_push: Option>>, /// Pending events to yield. events: SmallVec< - [ConnectionHandlerEvent>, (), Event, io::Error>; 4], + [ConnectionHandlerEvent< + Either, ReadyUpgrade>, + (), + Event, + >; 4], >, - /// Streams awaiting `BehaviourInfo` to then send identify requests. - reply_streams: VecDeque, - - /// Pending identification replies, awaiting being sent. - pending_replies: FuturesUnordered>>, + active_streams: futures_bounded::FuturesSet>, /// Future that fires when we need to identify the node again. trigger_next_identify: Delay, - /// Whether the handler should keep the connection alive. - keep_alive: KeepAlive, + /// Whether we have exchanged at least one periodic identify. + exchanged_one_periodic_identify: bool, /// The interval of `trigger_next_identify`, i.e. the recurrent delay. interval: Duration, @@ -83,19 +84,20 @@ pub struct Handler { /// Address observed by or for the remote. observed_addr: Multiaddr, + + /// Identify information about the remote peer. + remote_info: Option, + + local_supported_protocols: SupportedProtocols, + remote_supported_protocols: HashSet, + external_addresses: HashSet, } /// An event from `Behaviour` with the information requested by the `Handler`. #[derive(Debug)] -pub struct InEvent { - /// The addresses that the peer is listening on. - pub listen_addrs: Vec, - - /// The list of protocols supported by the peer, e.g. `/ipfs/ping/1.0.0`. - pub supported_protocols: Vec, - - /// The protocol w.r.t. the information requested. - pub protocol: Protocol, +pub enum InEvent { + AddressesChanged(HashSet), + Push, } /// Event produced by the `Handler`. @@ -105,39 +107,42 @@ pub enum Event { /// We obtained identification information from the remote. Identified(Info), /// We replied to an identification request from the remote. - Identification(PeerId), + Identification, /// We actively pushed our identification information to the remote. - IdentificationPushed, - /// We received a request for identification. - Identify, + IdentificationPushed(Info), /// Failed to identify the remote, or to reply to an identification request. - IdentificationError(ConnectionHandlerUpgrErr), + IdentificationError(StreamUpgradeError), } impl Handler { /// Creates a new `Handler`. pub fn new( - initial_delay: Duration, interval: Duration, remote_peer_id: PeerId, public_key: PublicKey, protocol_version: String, agent_version: String, observed_addr: Multiaddr, + external_addresses: HashSet, ) -> Self { Self { remote_peer_id, - inbound_identify_push: Default::default(), events: SmallVec::new(), - reply_streams: VecDeque::new(), - pending_replies: FuturesUnordered::new(), - trigger_next_identify: Delay::new(initial_delay), - keep_alive: KeepAlive::Yes, + active_streams: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + trigger_next_identify: Delay::new(Duration::ZERO), + exchanged_one_periodic_identify: false, interval, public_key, protocol_version, agent_version, observed_addr, + local_supported_protocols: SupportedProtocols::default(), + remote_supported_protocols: HashSet::default(), + remote_info: Default::default(), + external_addresses, } } @@ -151,24 +156,29 @@ impl Handler { >, ) { match output { - future::Either::Left(substream) => { - self.events - .push(ConnectionHandlerEvent::Custom(Event::Identify)); - if !self.reply_streams.is_empty() { - warn!( - "New inbound identify request from {} while a previous one \ - is still pending. Queueing the new one.", - self.remote_peer_id, - ); + future::Either::Left(stream) => { + let info = self.build_info(); + + if self + .active_streams + .try_push( + protocol::send_identify(stream, info).map_ok(|_| Success::SentIdentify), + ) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity"); + } else { + self.exchanged_one_periodic_identify = true; } - self.reply_streams.push_back(substream); } - future::Either::Right(fut) => { - if self.inbound_identify_push.replace(fut).is_some() { - warn!( - "New inbound identify push stream from {} while still \ - upgrading previous one. Replacing previous with new.", - self.remote_peer_id, + future::Either::Right(stream) => { + if self + .active_streams + .try_push(protocol::recv_push(stream).map_ok(Success::ReceivedIdentifyPush)) + .is_err() + { + tracing::warn!( + "Dropping inbound identify push stream because we are at capacity" ); } } @@ -185,144 +195,188 @@ impl Handler { >, ) { match output { - future::Either::Left(remote_info) => { - self.events - .push(ConnectionHandlerEvent::Custom(Event::Identified( - remote_info, - ))); - self.keep_alive = KeepAlive::No; + future::Either::Left(stream) => { + if self + .active_streams + .try_push(protocol::recv_identify(stream).map_ok(Success::ReceivedIdentify)) + .is_err() + { + tracing::warn!("Dropping outbound identify stream because we are at capacity"); + } + } + future::Either::Right(stream) => { + let info = self.build_info(); + + if self + .active_streams + .try_push( + protocol::send_identify(stream, info).map_ok(Success::SentIdentifyPush), + ) + .is_err() + { + tracing::warn!( + "Dropping outbound identify push stream because we are at capacity" + ); + } } - future::Either::Right(()) => self - .events - .push(ConnectionHandlerEvent::Custom(Event::IdentificationPushed)), } } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { error: err, .. }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - use libp2p_core::upgrade::UpgradeError; - - let err = err.map_upgrade_err(|e| match e { - UpgradeError::Select(e) => UpgradeError::Select(e), - UpgradeError::Apply(Either::Left(ioe)) => UpgradeError::Apply(ioe), - UpgradeError::Apply(Either::Right(ioe)) => UpgradeError::Apply(ioe), - }); - self.events - .push(ConnectionHandlerEvent::Custom(Event::IdentificationError( - err, - ))); - self.keep_alive = KeepAlive::No; - self.trigger_next_identify.reset(self.interval); + fn build_info(&mut self) -> Info { + Info { + public_key: self.public_key.clone(), + protocol_version: self.protocol_version.clone(), + agent_version: self.agent_version.clone(), + listen_addrs: Vec::from_iter(self.external_addresses.iter().cloned()), + protocols: Vec::from_iter(self.local_supported_protocols.iter().cloned()), + observed_addr: self.observed_addr.clone(), + } + } + + fn handle_incoming_info(&mut self, info: &Info) { + self.remote_info.replace(info.clone()); + + self.update_supported_protocols_for_remote(info); + } + + fn update_supported_protocols_for_remote(&mut self, remote_info: &Info) { + let new_remote_protocols = HashSet::from_iter(remote_info.protocols.clone()); + + let remote_added_protocols = new_remote_protocols + .difference(&self.remote_supported_protocols) + .cloned() + .collect::>(); + let remote_removed_protocols = self + .remote_supported_protocols + .difference(&new_remote_protocols) + .cloned() + .collect::>(); + + if !remote_added_protocols.is_empty() { + self.events + .push(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Added(remote_added_protocols), + )); + } + + if !remote_removed_protocols.is_empty() { + self.events + .push(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Removed(remote_removed_protocols), + )); + } + + self.remote_supported_protocols = new_remote_protocols; + } + + fn local_protocols_to_string(&mut self) -> String { + self.local_supported_protocols + .iter() + .map(|p| p.to_string()) + .collect::>() + .join(", ") } } impl ConnectionHandler for Handler { - type InEvent = InEvent; - type OutEvent = Event; - type Error = io::Error; - type InboundProtocol = SelectUpgrade>; - type OutboundProtocol = Either>; + type FromBehaviour = InEvent; + type ToBehaviour = Event; + type InboundProtocol = + SelectUpgrade, ReadyUpgrade>; + type OutboundProtocol = Either, ReadyUpgrade>; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(SelectUpgrade::new(Identify, Push::inbound()), ()) + SubstreamProtocol::new( + SelectUpgrade::new( + ReadyUpgrade::new(PROTOCOL_NAME), + ReadyUpgrade::new(PUSH_PROTOCOL_NAME), + ), + (), + ) } - fn on_behaviour_event( - &mut self, - InEvent { - listen_addrs, - supported_protocols, - protocol, - }: Self::InEvent, - ) { - let info = Info { - public_key: self.public_key.clone(), - protocol_version: self.protocol_version.clone(), - agent_version: self.agent_version.clone(), - listen_addrs, - protocols: supported_protocols, - observed_addr: self.observed_addr.clone(), - }; - - match protocol { - Protocol::Push => { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + match event { + InEvent::AddressesChanged(addresses) => { + self.external_addresses = addresses; + } + InEvent::Push => { self.events .push(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(Either::Right(Push::outbound(info)), ()), + protocol: SubstreamProtocol::new( + Either::Right(ReadyUpgrade::new(PUSH_PROTOCOL_NAME)), + (), + ), }); } - Protocol::Identify(_) => { - let substream = self - .reply_streams - .pop_front() - .expect("A BehaviourInfo reply should have a matching substream."); - let peer = self.remote_peer_id; - let fut = Box::pin(async move { - protocol::send(substream, info).await?; - Ok(peer) - }); - self.pending_replies.push(fut); - } } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { - if !self.events.is_empty() { - return Poll::Ready(self.events.remove(0)); + ) -> Poll> { + if let Some(event) = self.events.pop() { + return Poll::Ready(event); } // Poll the future that fires when we need to identify the node again. - match Future::poll(Pin::new(&mut self.trigger_next_identify), cx) { - Poll::Pending => {} - Poll::Ready(()) => { - self.trigger_next_identify.reset(self.interval); - let ev = ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(Either::Left(Identify), ()), - }; - return Poll::Ready(ev); - } + if let Poll::Ready(()) = self.trigger_next_identify.poll_unpin(cx) { + self.trigger_next_identify.reset(self.interval); + let event = ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new( + Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), + (), + ), + }; + return Poll::Ready(event); } - if let Some(Poll::Ready(res)) = self - .inbound_identify_push - .as_mut() - .map(|f| f.poll_unpin(cx)) - { - self.inbound_identify_push.take(); + match self.active_streams.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Success::ReceivedIdentify(remote_info)))) => { + self.handle_incoming_info(&remote_info); - if let Ok(info) = res { - return Poll::Ready(ConnectionHandlerEvent::Custom(Event::Identified(info))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Identified( + remote_info, + ))); } - } + Poll::Ready(Ok(Ok(Success::SentIdentifyPush(info)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationPushed(info), + )); + } + Poll::Ready(Ok(Ok(Success::SentIdentify))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identification, + )); + } + Poll::Ready(Ok(Ok(Success::ReceivedIdentifyPush(remote_push_info)))) => { + if let Some(mut info) = self.remote_info.clone() { + info.merge(remote_push_info); + self.handle_incoming_info(&info); - // Check for pending replies to send. - match self.pending_replies.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(peer_id))) => Poll::Ready(ConnectionHandlerEvent::Custom( - Event::Identification(peer_id), - )), - Poll::Ready(Some(Err(err))) => Poll::Ready(ConnectionHandlerEvent::Custom( - Event::IdentificationError(ConnectionHandlerUpgrErr::Upgrade( - libp2p_core::upgrade::UpgradeError::Apply(err), - )), - )), - Poll::Ready(None) | Poll::Pending => Poll::Pending, + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::Identified(info), + )); + }; + } + Poll::Ready(Ok(Err(e))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Apply(e)), + )); + } + Poll::Ready(Err(Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError(StreamUpgradeError::Timeout), + )); + } + Poll::Pending => {} } + + Poll::Pending } fn on_connection_event( @@ -341,10 +395,48 @@ impl ConnectionHandler for Handler { ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { self.on_fully_negotiated_outbound(fully_negotiated_outbound) } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { + self.events.push(ConnectionHandlerEvent::NotifyBehaviour( + Event::IdentificationError( + error.map_upgrade_err(|e| void::unreachable(e.into_inner())), + ), + )); + self.trigger_next_identify.reset(self.interval); } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + ConnectionEvent::LocalProtocolsChange(change) => { + let before = tracing::enabled!(Level::DEBUG) + .then(|| self.local_protocols_to_string()) + .unwrap_or_default(); + let protocols_changed = self.local_supported_protocols.on_protocols_change(change); + let after = tracing::enabled!(Level::DEBUG) + .then(|| self.local_protocols_to_string()) + .unwrap_or_default(); + + if protocols_changed && self.exchanged_one_periodic_identify { + tracing::debug!( + peer=%self.remote_peer_id, + %before, + %after, + "Supported listen protocols changed, pushing to peer" + ); + + self.events + .push(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new( + Either::Right(ReadyUpgrade::new(PUSH_PROTOCOL_NAME)), + (), + ), + }); + } + } + _ => {} } } } + +enum Success { + SentIdentify, + ReceivedIdentify(Info), + SentIdentifyPush(Info), + ReceivedIdentifyPush(PushInfo), +} diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index a32c3725039..c6b22b00c0a 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -20,20 +20,14 @@ use crate::proto; use asynchronous_codec::{FramedRead, FramedWrite}; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{ - multiaddr, - upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}, - Multiaddr, -}; +use futures::prelude::*; +use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; -use libp2p_swarm::{ConnectionId, StreamProtocol}; -use log::{debug, trace}; +use libp2p_swarm::StreamProtocol; use std::convert::TryFrom; -use std::{io, iter, pin::Pin}; +use std::io; use thiserror::Error; -use void::Void; const MAX_MESSAGE_SIZE_BYTES: usize = 4096; @@ -41,36 +35,7 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/1.0.0"); pub const PUSH_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/id/push/1.0.0"); -/// The type of the Substream protocol. -#[derive(Debug, PartialEq, Eq)] -pub enum Protocol { - Identify(ConnectionId), - Push, -} - -/// Substream upgrade protocol for `/ipfs/id/1.0.0`. -#[derive(Debug, Clone)] -pub struct Identify; - -/// Substream upgrade protocol for `/ipfs/id/push/1.0.0`. -#[derive(Debug, Clone)] -pub struct Push(T); -pub struct InboundPush(); -pub struct OutboundPush(Info); - -impl Push { - pub fn inbound() -> Self { - Push(InboundPush()) - } -} - -impl Push { - pub fn outbound(info: Info) -> Self { - Push(OutboundPush(info)) - } -} - -/// Information of a peer sent in protocol messages. +/// Identify information of a peer sent in protocol messages. #[derive(Debug, Clone)] pub struct Info { /// The public key of the local peer. @@ -89,95 +54,58 @@ pub struct Info { pub observed_addr: Multiaddr, } -impl UpgradeInfo for Identify { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(PROTOCOL_NAME) - } -} - -impl InboundUpgrade for Identify { - type Output = C; - type Error = UpgradeError; - type Future = future::Ready>; - - fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { - future::ok(socket) - } -} - -impl OutboundUpgrade for Identify -where - C: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = Info; - type Error = UpgradeError; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { - recv(socket).boxed() - } -} - -impl UpgradeInfo for Push { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(PUSH_PROTOCOL_NAME) - } -} - -impl InboundUpgrade for Push -where - C: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = BoxFuture<'static, Result>; - type Error = Void; - type Future = future::Ready>; - - fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { - // Lazily upgrade stream, thus allowing upgrade to happen within identify's handler. - future::ok(recv(socket).boxed()) +impl Info { + pub fn merge(&mut self, info: PushInfo) { + if let Some(public_key) = info.public_key { + self.public_key = public_key; + } + if let Some(protocol_version) = info.protocol_version { + self.protocol_version = protocol_version; + } + if let Some(agent_version) = info.agent_version { + self.agent_version = agent_version; + } + if !info.listen_addrs.is_empty() { + self.listen_addrs = info.listen_addrs; + } + if !info.protocols.is_empty() { + self.protocols = info.protocols; + } + if let Some(observed_addr) = info.observed_addr { + self.observed_addr = observed_addr; + } } } -impl OutboundUpgrade for Push -where - C: AsyncWrite + Unpin + Send + 'static, -{ - type Output = (); - type Error = UpgradeError; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { - send(socket, self.0 .0).boxed() - } +/// Identify push information of a peer sent in protocol messages. +/// Note that missing fields should be ignored, as peers may choose to send partial updates containing only the fields whose values have changed. +#[derive(Debug, Clone)] +pub struct PushInfo { + pub public_key: Option, + pub protocol_version: Option, + pub agent_version: Option, + pub listen_addrs: Vec, + pub protocols: Vec, + pub observed_addr: Option, } -pub(crate) async fn send(io: T, info: Info) -> Result<(), UpgradeError> +pub(crate) async fn send_identify(io: T, info: Info) -> Result where T: AsyncWrite + Unpin, { - trace!("Sending: {:?}", info); + tracing::trace!("Sending: {:?}", info); - let listen_addrs = info - .listen_addrs - .into_iter() - .map(|addr| addr.to_vec()) - .collect(); + let listen_addrs = info.listen_addrs.iter().map(|addr| addr.to_vec()).collect(); let pubkey_bytes = info.public_key.encode_protobuf(); let message = proto::Identify { - agentVersion: Some(info.agent_version), - protocolVersion: Some(info.protocol_version), + agentVersion: Some(info.agent_version.clone()), + protocolVersion: Some(info.protocol_version.clone()), publicKey: Some(pubkey_bytes), listenAddrs: listen_addrs, observedAddr: Some(info.observed_addr.to_vec()), - protocols: info.protocols.into_iter().map(|p| p.to_string()).collect(), + protocols: info.protocols.iter().map(|p| p.to_string()).collect(), }; let mut framed_io = FramedWrite::new( @@ -188,10 +116,32 @@ where framed_io.send(message).await?; framed_io.close().await?; - Ok(()) + Ok(info) } -async fn recv(socket: T) -> Result +pub(crate) async fn recv_push(socket: T) -> Result +where + T: AsyncRead + AsyncWrite + Unpin, +{ + let info = recv(socket).await?.try_into()?; + + tracing::trace!(?info, "Received"); + + Ok(info) +} + +pub(crate) async fn recv_identify(socket: T) -> Result +where + T: AsyncRead + AsyncWrite + Unpin, +{ + let info = recv(socket).await?.try_into()?; + + tracing::trace!(?info, "Received"); + + Ok(info) +} + +async fn recv(socket: T) -> Result where T: AsyncRead + AsyncWrite + Unpin, { @@ -206,61 +156,93 @@ where ) .next() .await - .ok_or(UpgradeError::StreamClosed)?? - .try_into()?; - - trace!("Received: {:?}", info); + .ok_or(UpgradeError::StreamClosed)??; Ok(info) } -impl TryFrom for Info { - type Error = UpgradeError; +fn parse_listen_addrs(listen_addrs: Vec>) -> Vec { + listen_addrs + .into_iter() + .filter_map(|bytes| match Multiaddr::try_from(bytes) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse multiaddr: {e:?}"); + None + } + }) + .collect() +} - fn try_from(msg: proto::Identify) -> Result { - fn parse_multiaddr(bytes: Vec) -> Result { - Multiaddr::try_from(bytes) +fn parse_protocols(protocols: Vec) -> Vec { + protocols + .into_iter() + .filter_map(|p| match StreamProtocol::try_from_owned(p) { + Ok(p) => Some(p), + Err(e) => { + tracing::debug!("Received invalid protocol from peer: {e}"); + None + } + }) + .collect() +} + +fn parse_public_key(public_key: Option>) -> Option { + public_key.and_then(|key| match PublicKey::try_decode_protobuf(&key) { + Ok(k) => Some(k), + Err(e) => { + tracing::debug!("Unable to decode public key: {e:?}"); + None } + }) +} - let listen_addrs = { - let mut addrs = Vec::new(); - for addr in msg.listenAddrs.into_iter() { - match parse_multiaddr(addr) { - Ok(a) => addrs.push(a), - Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); - } - } - } - addrs - }; +fn parse_observed_addr(observed_addr: Option>) -> Option { + observed_addr.and_then(|bytes| match Multiaddr::try_from(bytes) { + Ok(a) => Some(a), + Err(e) => { + tracing::debug!("Unable to parse observed multiaddr: {e:?}"); + None + } + }) +} - let public_key = PublicKey::try_decode_protobuf(&msg.publicKey.unwrap_or_default())?; +impl TryFrom for Info { + type Error = UpgradeError; - let observed_addr = match parse_multiaddr(msg.observedAddr.unwrap_or_default()) { - Ok(a) => a, - Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); - Multiaddr::empty() + fn try_from(msg: proto::Identify) -> Result { + let public_key = { + match parse_public_key(msg.publicKey) { + Some(key) => key, + // This will always produce a DecodingError if the public key is missing. + None => PublicKey::try_decode_protobuf(Default::default())?, } }; + let info = Info { public_key, protocol_version: msg.protocolVersion.unwrap_or_default(), agent_version: msg.agentVersion.unwrap_or_default(), - listen_addrs, - protocols: msg - .protocols - .into_iter() - .filter_map(|p| match StreamProtocol::try_from_owned(p) { - Ok(p) => Some(p), - Err(e) => { - debug!("Received invalid protocol from peer: {e}"); - None - } - }) - .collect(), - observed_addr, + listen_addrs: parse_listen_addrs(msg.listenAddrs), + protocols: parse_protocols(msg.protocols), + observed_addr: parse_observed_addr(msg.observedAddr).unwrap_or(Multiaddr::empty()), + }; + + Ok(info) + } +} + +impl TryFrom for PushInfo { + type Error = UpgradeError; + + fn try_from(msg: proto::Identify) -> Result { + let info = PushInfo { + public_key: parse_public_key(msg.publicKey), + protocol_version: msg.protocolVersion, + agent_version: msg.agentVersion, + listen_addrs: parse_listen_addrs(msg.listenAddrs), + protocols: parse_protocols(msg.protocols), + observed_addr: parse_observed_addr(msg.observedAddr), }; Ok(info) @@ -284,97 +266,7 @@ pub enum UpgradeError { #[cfg(test)] mod tests { use super::*; - use futures::channel::oneshot; - use libp2p_core::{ - upgrade::{self, apply_inbound, apply_outbound}, - Transport, - }; use libp2p_identity as identity; - use libp2p_tcp as tcp; - - #[test] - fn correct_transfer() { - // We open a server and a client, send info from the server to the client, and check that - // they were successfully received. - let send_pubkey = identity::Keypair::generate_ed25519().public(); - let recv_pubkey = send_pubkey.clone(); - - let (tx, rx) = oneshot::channel(); - - let bg_task = async_std::task::spawn(async move { - let mut transport = tcp::async_io::Transport::default().boxed(); - - transport - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - - let addr = transport - .next() - .await - .expect("some event") - .into_new_address() - .expect("listen address"); - tx.send(addr).unwrap(); - - let socket = transport - .next() - .await - .expect("some event") - .into_incoming() - .unwrap() - .0 - .await - .unwrap(); - - let sender = apply_inbound(socket, Identify).await.unwrap(); - - send( - sender, - Info { - public_key: send_pubkey, - protocol_version: "proto_version".to_owned(), - agent_version: "agent_version".to_owned(), - listen_addrs: vec![ - "/ip4/80.81.82.83/tcp/500".parse().unwrap(), - "/ip6/::1/udp/1000".parse().unwrap(), - ], - protocols: vec![ - StreamProtocol::new("/proto1"), - StreamProtocol::new("/proto2"), - ], - observed_addr: "/ip4/100.101.102.103/tcp/5000".parse().unwrap(), - }, - ) - .await - .unwrap(); - }); - - async_std::task::block_on(async move { - let mut transport = tcp::async_io::Transport::default(); - - let socket = transport.dial(rx.await.unwrap()).unwrap().await.unwrap(); - let info = apply_outbound(socket, Identify, upgrade::Version::V1) - .await - .unwrap(); - assert_eq!( - info.observed_addr, - "/ip4/100.101.102.103/tcp/5000".parse().unwrap() - ); - assert_eq!(info.public_key, recv_pubkey); - assert_eq!(info.protocol_version, "proto_version"); - assert_eq!(info.agent_version, "agent_version"); - assert_eq!( - info.listen_addrs, - &[ - "/ip4/80.81.82.83/tcp/500".parse().unwrap(), - "/ip6/::1/udp/1000".parse().unwrap() - ] - ); - assert_eq!(info.protocols, &["/proto1", "/proto2"]); - - bg_task.await; - }); - } #[test] fn skip_invalid_multiaddr() { @@ -400,7 +292,7 @@ mod tests { ), }; - let info = Info::try_from(payload).expect("not to fail"); + let info = PushInfo::try_from(payload).expect("not to fail"); assert_eq!(info.listen_addrs, vec![valid_multiaddr]) } diff --git a/protocols/identify/src/structs.rs b/protocols/identify/src/structs.rs deleted file mode 100644 index 3be9b6f94ad..00000000000 --- a/protocols/identify/src/structs.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Automatically generated rust module for 'structs.proto' file - -#![allow(non_snake_case)] -#![allow(non_upper_case_globals)] -#![allow(non_camel_case_types)] -#![allow(unused_imports)] -#![allow(unknown_lints)] -#![allow(clippy::all)] -#![cfg_attr(rustfmt, rustfmt_skip)] - - -use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; -use quick_protobuf::sizeofs::*; -use super::*; - -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, Default, PartialEq, Clone)] -pub struct Identify { - pub protocolVersion: Option, - pub agentVersion: Option, - pub publicKey: Option>, - pub listenAddrs: Vec>, - pub observedAddr: Option>, - pub protocols: Vec, -} - -impl<'a> MessageRead<'a> for Identify { - fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { - let mut msg = Self::default(); - while !r.is_eof() { - match r.next_tag(bytes) { - Ok(42) => msg.protocolVersion = Some(r.read_string(bytes)?.to_owned()), - Ok(50) => msg.agentVersion = Some(r.read_string(bytes)?.to_owned()), - Ok(10) => msg.publicKey = Some(r.read_bytes(bytes)?.to_owned()), - Ok(18) => msg.listenAddrs.push(r.read_bytes(bytes)?.to_owned()), - Ok(34) => msg.observedAddr = Some(r.read_bytes(bytes)?.to_owned()), - Ok(26) => msg.protocols.push(r.read_string(bytes)?.to_owned()), - Ok(t) => { r.read_unknown(bytes, t)?; } - Err(e) => return Err(e), - } - } - Ok(msg) - } -} - -impl MessageWrite for Identify { - fn get_size(&self) -> usize { - 0 - + self.protocolVersion.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.agentVersion.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.publicKey.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.listenAddrs.iter().map(|s| 1 + sizeof_len((s).len())).sum::() - + self.observedAddr.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.protocols.iter().map(|s| 1 + sizeof_len((s).len())).sum::() - } - - fn write_message(&self, w: &mut Writer) -> Result<()> { - if let Some(ref s) = self.protocolVersion { w.write_with_tag(42, |w| w.write_string(&**s))?; } - if let Some(ref s) = self.agentVersion { w.write_with_tag(50, |w| w.write_string(&**s))?; } - if let Some(ref s) = self.publicKey { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } - for s in &self.listenAddrs { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } - if let Some(ref s) = self.observedAddr { w.write_with_tag(34, |w| w.write_bytes(&**s))?; } - for s in &self.protocols { w.write_with_tag(26, |w| w.write_string(&**s))?; } - Ok(()) - } -} - diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs new file mode 100644 index 00000000000..5cccc09d863 --- /dev/null +++ b/protocols/identify/tests/smoke.rs @@ -0,0 +1,302 @@ +use futures::StreamExt; +use libp2p_core::multiaddr::Protocol; +use libp2p_identify as identify; +use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use std::collections::HashSet; +use std::iter; +use std::time::{Duration, Instant}; +use tracing_subscriber::EnvFilter; + +#[async_std::test] +async fn periodic_identify() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + let swarm1_peer_id = *swarm1.local_peer_id(); + + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("c".to_string(), identity.public()) + .with_agent_version("d".to_string()), + ) + }); + let swarm2_peer_id = *swarm2.local_peer_id(); + + let (swarm1_memory_listen, swarm1_tcp_listen_addr) = + swarm1.listen().with_memory_addr_external().await; + let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; + swarm2.connect(&mut swarm1).await; + + use identify::Event::Received; + use identify::Event::Sent; + + match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { + ( + [Received { info: s1_info, .. }, Sent { .. }], + [Received { info: s2_info, .. }, Sent { .. }], + ) + | ( + [Sent { .. }, Received { info: s1_info, .. }], + [Sent { .. }, Received { info: s2_info, .. }], + ) + | ( + [Received { info: s1_info, .. }, Sent { .. }], + [Sent { .. }, Received { info: s2_info, .. }], + ) + | ( + [Sent { .. }, Received { info: s1_info, .. }], + [Received { info: s2_info, .. }, Sent { .. }], + ) => { + assert_eq!(s1_info.public_key.to_peer_id(), swarm2_peer_id); + assert_eq!(s1_info.protocol_version, "c"); + assert_eq!(s1_info.agent_version, "d"); + assert!(!s1_info.protocols.is_empty()); + assert_eq!( + s1_info.observed_addr, + swarm1_memory_listen + .clone() + .with(Protocol::P2p(swarm1_peer_id)) + ); + assert!(s1_info.listen_addrs.contains(&swarm2_tcp_listen_addr)); + assert!(s1_info.listen_addrs.contains(&swarm2_memory_listen)); + + assert_eq!(s2_info.public_key.to_peer_id(), swarm1_peer_id); + assert_eq!(s2_info.protocol_version, "a"); + assert_eq!(s2_info.agent_version, "b"); + assert!(!s2_info.protocols.is_empty()); + + // Cannot assert observed address of dialer because memory transport uses ephemeral, outgoing ports. + // assert_eq!( + // s2_info.observed_addr, + // swarm2_memory_listen.with(Protocol::P2p(swarm2_peer_id.into())) + // ); + assert!(s2_info.listen_addrs.contains(&swarm1_tcp_listen_addr)); + assert!(s2_info.listen_addrs.contains(&swarm1_memory_listen)); + } + other => panic!("Unexpected events: {other:?}"), + } +} +#[async_std::test] +async fn only_emits_address_candidate_once_per_connection() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()) + .with_interval(Duration::from_secs(1)), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("c".to_string(), identity.public()) + .with_agent_version("d".to_string()), + ) + }); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let swarm_events = futures::stream::poll_fn(|cx| swarm1.poll_next_unpin(cx)) + .take(5) + .collect::>() + .await; + + let infos = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::Behaviour(identify::Event::Received { info, .. }) => Some(info.clone()), + _ => None, + }) + .collect::>(); + + assert!( + infos.len() > 1, + "should exchange identify payload more than once" + ); + + let varying_observed_addresses = infos + .iter() + .map(|i| i.observed_addr.clone()) + .collect::>(); + assert_eq!( + varying_observed_addresses.len(), + 1, + "Observed address should not vary on persistent connection" + ); + + let external_address_candidates = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::NewExternalAddrCandidate { address } => Some(address.clone()), + _ => None, + }) + .collect::>(); + + assert_eq!( + external_address_candidates.len(), + 1, + "To only have one external address candidate" + ); + assert_eq!( + &external_address_candidates[0], + varying_observed_addresses.iter().next().unwrap() + ); +} + +#[async_std::test] +async fn identify_push() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // First, let the periodic identify do its thing. + let ([e1, e2], [e3, e4]) = libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await; + + { + use identify::Event::{Received, Sent}; + + // These can be received in any order, hence assert them here. + assert!(matches!(e1, Received { .. } | Sent { .. })); + assert!(matches!(e2, Received { .. } | Sent { .. })); + assert!(matches!(e3, Received { .. } | Sent { .. })); + assert!(matches!(e4, Received { .. } | Sent { .. })); + } + + // Second, actively push. + swarm2 + .behaviour_mut() + .push(iter::once(*swarm1.local_peer_id())); + + let swarm1_received_info = match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { + ([identify::Event::Received { info, .. }], [identify::Event::Pushed { .. }]) => info, + other => panic!("Unexpected events: {other:?}"), + }; + + assert_eq!( + swarm1_received_info.public_key.to_peer_id(), + *swarm2.local_peer_id() + ); + assert_eq!(swarm1_received_info.protocol_version, "a"); + assert_eq!(swarm1_received_info.agent_version, "b"); + assert!(!swarm1_received_info.protocols.is_empty()); + assert!(swarm1_received_info.listen_addrs.is_empty()); +} + +#[async_std::test] +async fn discover_peer_after_disconnect() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let swarm1_peer_id = *swarm1.local_peer_id(); + async_std::task::spawn(swarm1.loop_on_next()); + + // Wait until we identified. + swarm2 + .wait(|event| { + matches!( + event, + SwarmEvent::Behaviour(identify::Event::Received { .. }) + ) + .then_some(()) + }) + .await; + + swarm2.disconnect_peer_id(swarm1_peer_id).unwrap(); + + // Wait for connection to close. + swarm2 + .wait(|event| matches!(event, SwarmEvent::ConnectionClosed { .. }).then_some(())) + .await; + + // We should still be able to dial now! + swarm2.dial(swarm1_peer_id).unwrap(); + + let connected_peer = swarm2 + .wait(|event| match event { + SwarmEvent::ConnectionEstablished { peer_id, .. } => Some(peer_id), + _ => None, + }) + .await; + + assert_eq!(connected_peer, swarm1_peer_id); +} + +#[async_std::test] +async fn configured_interval_starts_after_first_identify() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let identify_interval = Duration::from_secs(5); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_interval(identify_interval), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let start = Instant::now(); + + // Wait until we identified. + swarm1 + .wait(|event| { + matches!(event, SwarmEvent::Behaviour(identify::Event::Sent { .. })).then_some(()) + }) + .await; + + let time_to_first_identify = Instant::now().duration_since(start); + + assert!(time_to_first_identify < identify_interval) +} diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 525be910308..b27a6943659 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,9 +1,115 @@ -## 0.44.0 - unreleased +## 0.45.3 + +- The progress of the close query iterator shall be decided by ANY of the new peers. + See [PR 4932](https://github.com/libp2p/rust-libp2p/pull/4932). + +## 0.45.2 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). + +## 0.45.1 + +- Fix a bug where calling `Behaviour::remove_address` with an address not in the peer's bucket would remove the peer from the routing table if the bucket has only one address left. + See [PR 4816](https://github.com/libp2p/rust-libp2p/pull/4816) +- Add `std::fmt::Display` implementation on `QueryId`. + See [PR 4814](https://github.com/libp2p/rust-libp2p/pull/4814). + +## 0.45.0 + +- Remove deprecated `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4659](https://github.com/libp2p/rust-libp2p/pull/4659). +- Emit `ModeChanged` event whenever we automatically reconfigure the mode. + See [PR 4503](https://github.com/libp2p/rust-libp2p/pull/4503). +- Make previously "deprecated" `record` module private. + See [PR 4035](https://github.com/libp2p/rust-libp2p/pull/4035). +- Expose hashed bytes of KBucketKey. + See [PR 4698](https://github.com/libp2p/rust-libp2p/pull/4698). +- Remove previously deprecated type-aliases. + Users should follow the convention of importing the `libp2p::kad` module and referring to symbols as `kad::Behaviour` etc. + See [PR 4733](https://github.com/libp2p/rust-libp2p/pull/4733). + +## 0.44.6 + +- Rename `Kademlia` symbols to follow naming convention. + See [PR 4547]. +- Fix a bug where we didn't detect a remote peer moving into client-state. + See [PR 4639](https://github.com/libp2p/rust-libp2p/pull/4639). +- Re-export `NodeStatus`. + See [PR 4645]. +- Deprecate `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4675]. + +[PR 4547]: https://github.com/libp2p/rust-libp2p/pull/4547 +[PR 4645]: https://github.com/libp2p/rust-libp2p/pull/4645 +[PR 4675]: https://github.com/libp2p/rust-libp2p/pull/4675 + + + +## 0.44.5 +- Migrate to `quick-protobuf-codec` crate for codec logic. + See [PR 4501]. + +[PR 4501]: https://github.com/libp2p/rust-libp2p/pull/4501 + +## 0.44.4 + +- Implement common traits on `RoutingUpdate`. + See [PR 4270]. +- Reduce noise of "remote supports our protocol" log. + See [PR 4278]. + +[PR 4270]: https://github.com/libp2p/rust-libp2p/pull/4270 +[PR 4278]: https://github.com/libp2p/rust-libp2p/pull/4278 + +## 0.44.3 + +- Prevent simultaneous dials to peers. + See [PR 4224]. + +[PR 4224]: https://github.com/libp2p/rust-libp2p/pull/4224 + +- Rename missed `KademliaEvent::OutboundQueryCompleted` to `KademliaEvent::OutboundQueryProgressed` in documentation. + See [PR 4257]. + +[PR 4257]: https://github.com/libp2p/rust-libp2p/pull/4257 + +## 0.44.2 + +- Allow to explicitly set `Mode::{Client,Server}`. + See [PR 4132] + +[PR 4132]: https://github.com/libp2p/rust-libp2p/pull/4132 + +## 0.44.1 + +- Expose `KBucketDistance`. + See [PR 4109]. + +[PR 4109]: https://github.com/libp2p/rust-libp2p/pull/4109 + +## 0.44.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove deprecated public modules `handler`, `protocol` and `kbucket`. + See [PR 3896]. + +- Automatically configure client/server mode based on external addresses. + If we have or learn about an external address of our node, e.g. through `Swarm::add_external_address` or automated through `libp2p-autonat`, we operate in server-mode and thus allow inbound requests. + By default, a node is in client-mode and only allows outbound requests. + If you want to maintain the status quo, i.e. always operate in server mode, make sure to add at least one external address through `Swarm::add_external_address`. + See also [Kademlia specification](https://github.com/libp2p/specs/tree/master/kad-dht#client-and-server-mode) for an introduction to Kademlia client/server mode. + See [PR 3877]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3877]: https://github.com/libp2p/rust-libp2p/pull/3877 +[PR 3896]: https://github.com/libp2p/rust-libp2p/pull/3896 ## 0.43.3 diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 561d6e4c424..7acb73f074f 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "libp2p-kad" edition = "2021" -rust-version = "1.65.0" +rust-version = { workspace = true } description = "Kademlia protocol for libp2p" -version = "0.44.0" +version = "0.45.3" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,34 +11,39 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -arrayvec = "0.7.2" +arrayvec = "0.7.4" bytes = "1" -either = "1.5" +either = "1.9" fnv = "1.0" -asynchronous-codec = "0.6" -futures = "0.3.28" -log = "0.4" +asynchronous-codec = { workspace = true } +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } +futures-bounded = { workspace = true } quick-protobuf = "0.8" -libp2p-identity = { workspace = true } +quick-protobuf-codec = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } rand = "0.8" -sha2 = "0.10.0" -smallvec = "1.6.1" +sha2 = "0.10.8" +smallvec = "1.11.2" uint = "0.9" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } void = "1.0" futures-timer = "3.0.2" -instant = "0.1.11" +instant = "0.1.12" serde = { version = "1.0", optional = true, features = ["derive"] } thiserror = "1" +tracing = "0.1.37" [dev-dependencies] -env_logger = "0.10.0" +async-std = { version = "1.12.0", features = ["attributes"] } futures-timer = "3.0" +libp2p-identify = { path = "../identify" } libp2p-noise = { workspace = true } +libp2p-swarm = { path = "../../swarm", features = ["macros"] } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] serde = ["dep:serde", "bytes/serde"] @@ -49,3 +54,6 @@ serde = ["dep:serde", "bytes/serde"] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/kad/src/addresses.rs b/protocols/kad/src/addresses.rs index 3c2af4173fd..0b3dc71e649 100644 --- a/protocols/kad/src/addresses.rs +++ b/protocols/kad/src/addresses.rs @@ -23,6 +23,7 @@ use smallvec::SmallVec; use std::fmt; /// A non-empty list of (unique) addresses of a peer in the routing table. +/// Every address must be a fully-qualified /p2p address. #[derive(Clone)] pub struct Addresses { addrs: SmallVec<[Multiaddr; 6]>, @@ -67,7 +68,7 @@ impl Addresses { /// otherwise unreachable. #[allow(clippy::result_unit_err)] pub fn remove(&mut self, addr: &Multiaddr) -> Result<(), ()> { - if self.addrs.len() == 1 { + if self.addrs.len() == 1 && self.addrs[0] == *addr { return Err(()); } @@ -113,3 +114,76 @@ impl fmt::Debug for Addresses { f.debug_list().entries(self.addrs.iter()).finish() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn given_one_address_when_removing_different_one_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234)]); + + let result = addresses.remove(&tcp_addr(4321)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234)], + "`Addresses` to not change because we tried to remove a non-present address" + ); + } + + #[test] + fn given_one_address_when_removing_correct_one_returns_err() { + let mut addresses = make_addresses([tcp_addr(1234)]); + + let result = addresses.remove(&tcp_addr(1234)); + + assert!(result.is_err()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234)], + "`Addresses` to not be empty because it would have been the last address to be removed" + ); + } + + #[test] + fn given_many_addresses_when_removing_different_one_does_not_remove_and_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234), tcp_addr(4321)]); + + let result = addresses.remove(&tcp_addr(5678)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(1234), tcp_addr(4321)], + "`Addresses` to not change because we tried to remove a non-present address" + ); + } + + #[test] + fn given_many_addresses_when_removing_correct_one_removes_and_returns_ok() { + let mut addresses = make_addresses([tcp_addr(1234), tcp_addr(4321)]); + + let result = addresses.remove(&tcp_addr(1234)); + + assert!(result.is_ok()); + assert_eq!( + addresses.into_vec(), + vec![tcp_addr(4321)], + "`Addresses to no longer contain address was present and then removed`" + ); + } + + /// Helper function to easily initialize Addresses struct with multiple addresses. + fn make_addresses(addresses: impl IntoIterator) -> Addresses { + Addresses { + addrs: SmallVec::from_iter(addresses), + } + } + + /// Helper function to create a tcp Multiaddr with a specific port + fn tcp_addr(port: u16) -> Multiaddr { + format!("/ip4/127.0.0.1/tcp/{port}").parse().unwrap() + } +} diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index ed161d7a6ce..cde4fbb8536 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -23,15 +23,12 @@ mod test; use crate::addresses::Addresses; -use crate::handler_priv::{ - KademliaHandler, KademliaHandlerConfig, KademliaHandlerEvent, KademliaHandlerIn, - KademliaRequestId, -}; +use crate::handler::{Handler, HandlerEvent, HandlerIn, RequestId}; use crate::jobs::*; -use crate::kbucket_priv::{self, Distance, KBucketsTable, NodeStatus}; -use crate::protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig}; +use crate::kbucket::{self, Distance, KBucketsTable, NodeStatus}; +use crate::protocol::{ConnectionType, KadPeer, ProtocolConfig}; use crate::query::{Query, QueryConfig, QueryId, QueryPool, QueryPoolState}; -use crate::record_priv::{ +use crate::record::{ self, store::{self, RecordStore}, ProviderRecord, Record, @@ -46,36 +43,36 @@ use libp2p_swarm::behaviour::{ }; use libp2p_swarm::{ dial_opts::{self, DialOpts}, - ConnectionDenied, ConnectionId, DialError, ExternalAddresses, ListenAddresses, - NetworkBehaviour, NotifyHandler, PollParameters, StreamProtocol, THandler, THandlerInEvent, + ConnectionDenied, ConnectionHandler, ConnectionId, DialError, ExternalAddresses, + ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::{debug, info, warn}; use smallvec::SmallVec; -use std::collections::{BTreeMap, HashSet, VecDeque}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::fmt; use std::num::NonZeroUsize; -use std::task::{Context, Poll}; +use std::task::{Context, Poll, Waker}; use std::time::Duration; use std::vec; use thiserror::Error; +use tracing::Level; pub use crate::query::QueryStats; -/// `Kademlia` is a `NetworkBehaviour` that implements the libp2p +/// `Behaviour` is a `NetworkBehaviour` that implements the libp2p /// Kademlia protocol. -pub struct Kademlia { +pub struct Behaviour { /// The Kademlia routing table. - kbuckets: KBucketsTable, Addresses>, + kbuckets: KBucketsTable, Addresses>, /// The k-bucket insertion strategy. - kbucket_inserts: KademliaBucketInserts, + kbucket_inserts: BucketInserts, /// Configuration of the wire protocol. - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, /// Configuration of [`RecordStore`] filtering. - record_filtering: KademliaStoreInserts, + record_filtering: StoreInserts, /// The currently active (i.e. in-progress) queries. queries: QueryPool, @@ -99,21 +96,24 @@ pub struct Kademlia { /// The TTL of provider records. provider_record_ttl: Option, - /// How long to keep connections alive when they're idle. - connection_idle_timeout: Duration, - /// Queued events to return when the behaviour is being polled. - queued_events: VecDeque>>, + queued_events: VecDeque>, listen_addresses: ListenAddresses, external_addresses: ExternalAddresses, - /// See [`KademliaConfig::caching`]. - caching: KademliaCaching, + connections: HashMap, + + /// See [`Config::caching`]. + caching: Caching, local_peer_id: PeerId, + mode: Mode, + auto_mode: bool, + no_events_waker: Option, + /// The record storage. store: TStore, } @@ -122,7 +122,7 @@ pub struct Kademlia { /// and their addresses into the k-buckets of the Kademlia /// routing table. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum KademliaBucketInserts { +pub enum BucketInserts { /// Whenever a connection to a peer is established as a /// result of a dialing attempt and that peer is not yet /// in the routing table, it is inserted as long as there @@ -132,10 +132,10 @@ pub enum KademliaBucketInserts { /// disconnected peer is evicted from the bucket. OnConnected, /// New peers and addresses are only added to the routing table via - /// explicit calls to [`Kademlia::add_address`]. + /// explicit calls to [`Behaviour::add_address`]. /// /// > **Note**: Even though peers can only get into the - /// > routing table as a result of [`Kademlia::add_address`], + /// > routing table as a result of [`Behaviour::add_address`], /// > routing table entries are still updated as peers /// > connect and disconnect (i.e. the order of the entries /// > as well as the network addresses). @@ -148,65 +148,63 @@ pub enum KademliaBucketInserts { /// This can be used for e.g. signature verification or validating /// the accompanying [`Key`]. /// -/// [`Key`]: crate::record_priv::Key +/// [`Key`]: crate::record::Key #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum KademliaStoreInserts { +pub enum StoreInserts { /// Whenever a (provider) record is received, /// the record is forwarded immediately to the [`RecordStore`]. Unfiltered, /// Whenever a (provider) record is received, an event is emitted. - /// Provider records generate a [`InboundRequest::AddProvider`] under [`KademliaEvent::InboundRequest`], - /// normal records generate a [`InboundRequest::PutRecord`] under [`KademliaEvent::InboundRequest`]. + /// Provider records generate a [`InboundRequest::AddProvider`] under [`Event::InboundRequest`], + /// normal records generate a [`InboundRequest::PutRecord`] under [`Event::InboundRequest`]. /// /// When deemed valid, a (provider) record needs to be explicitly stored in /// the [`RecordStore`] via [`RecordStore::put`] or [`RecordStore::add_provider`], /// whichever is applicable. A mutable reference to the [`RecordStore`] can - /// be retrieved via [`Kademlia::store_mut`]. + /// be retrieved via [`Behaviour::store_mut`]. FilterBoth, } /// The configuration for the `Kademlia` behaviour. /// -/// The configuration is consumed by [`Kademlia::new`]. +/// The configuration is consumed by [`Behaviour::new`]. #[derive(Debug, Clone)] -pub struct KademliaConfig { +pub struct Config { kbucket_pending_timeout: Duration, query_config: QueryConfig, - protocol_config: KademliaProtocolConfig, + protocol_config: ProtocolConfig, record_ttl: Option, record_replication_interval: Option, record_publication_interval: Option, - record_filtering: KademliaStoreInserts, + record_filtering: StoreInserts, provider_record_ttl: Option, provider_publication_interval: Option, - connection_idle_timeout: Duration, - kbucket_inserts: KademliaBucketInserts, - caching: KademliaCaching, + kbucket_inserts: BucketInserts, + caching: Caching, } -impl Default for KademliaConfig { +impl Default for Config { fn default() -> Self { - KademliaConfig { + Config { kbucket_pending_timeout: Duration::from_secs(60), query_config: QueryConfig::default(), protocol_config: Default::default(), record_ttl: Some(Duration::from_secs(36 * 60 * 60)), record_replication_interval: Some(Duration::from_secs(60 * 60)), record_publication_interval: Some(Duration::from_secs(24 * 60 * 60)), - record_filtering: KademliaStoreInserts::Unfiltered, + record_filtering: StoreInserts::Unfiltered, provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)), provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)), - connection_idle_timeout: Duration::from_secs(10), - kbucket_inserts: KademliaBucketInserts::OnConnected, - caching: KademliaCaching::Enabled { max_peers: 1 }, + kbucket_inserts: BucketInserts::OnConnected, + caching: Caching::Enabled { max_peers: 1 }, } } } /// The configuration for Kademlia "write-back" caching after successful -/// lookups via [`Kademlia::get_record`]. +/// lookups via [`Behaviour::get_record`]. #[derive(Debug, Clone)] -pub enum KademliaCaching { +pub enum Caching { /// Caching is disabled and the peers closest to records being looked up /// that do not return a record are not tracked, i.e. /// [`GetRecordOk::FinishedWithNoAdditionalRecord`] is always empty. @@ -214,11 +212,11 @@ pub enum KademliaCaching { /// Up to `max_peers` peers not returning a record that are closest to the key /// being looked up are tracked and returned in [`GetRecordOk::FinishedWithNoAdditionalRecord`]. /// The write-back operation must be performed explicitly, if - /// desired and after choosing a record from the results, via [`Kademlia::put_record_to`]. + /// desired and after choosing a record from the results, via [`Behaviour::put_record_to`]. Enabled { max_peers: u16 }, } -impl KademliaConfig { +impl Config { /// Sets custom protocol names. /// /// Kademlia nodes only communicate with other nodes using the same protocol @@ -263,7 +261,7 @@ impl KademliaConfig { /// This only controls the level of parallelism of an iterative query, not /// the level of parallelism of a query to a fixed set of peers. /// - /// When used with [`KademliaConfig::disjoint_query_paths`] it equals + /// When used with [`Config::disjoint_query_paths`] it equals /// the amount of disjoint paths used. pub fn set_parallelism(&mut self, parallelism: NonZeroUsize) -> &mut Self { self.query_config.parallelism = parallelism; @@ -299,9 +297,9 @@ impl KademliaConfig { /// Sets whether or not records should be filtered before being stored. /// - /// See [`KademliaStoreInserts`] for the different values. - /// Defaults to [`KademliaStoreInserts::Unfiltered`]. - pub fn set_record_filtering(&mut self, filtering: KademliaStoreInserts) -> &mut Self { + /// See [`StoreInserts`] for the different values. + /// Defaults to [`StoreInserts::Unfiltered`]. + pub fn set_record_filtering(&mut self, filtering: StoreInserts) -> &mut Self { self.record_filtering = filtering; self } @@ -368,12 +366,6 @@ impl KademliaConfig { self } - /// Sets the amount of time to keep connections alive when they're idle. - pub fn set_connection_idle_timeout(&mut self, duration: Duration) -> &mut Self { - self.connection_idle_timeout = duration; - self - } - /// Modifies the maximum allowed size of individual Kademlia packets. /// /// It might be necessary to increase this value if trying to put large @@ -384,24 +376,24 @@ impl KademliaConfig { } /// Sets the k-bucket insertion strategy for the Kademlia routing table. - pub fn set_kbucket_inserts(&mut self, inserts: KademliaBucketInserts) -> &mut Self { + pub fn set_kbucket_inserts(&mut self, inserts: BucketInserts) -> &mut Self { self.kbucket_inserts = inserts; self } - /// Sets the [`KademliaCaching`] strategy to use for successful lookups. + /// Sets the [`Caching`] strategy to use for successful lookups. /// - /// The default is [`KademliaCaching::Enabled`] with a `max_peers` of 1. + /// The default is [`Caching::Enabled`] with a `max_peers` of 1. /// Hence, with default settings and a lookup quorum of 1, a successful lookup /// will result in the record being cached at the closest node to the key that /// did not return the record, i.e. the standard Kademlia behaviour. - pub fn set_caching(&mut self, c: KademliaCaching) -> &mut Self { + pub fn set_caching(&mut self, c: Caching) -> &mut Self { self.caching = c; self } } -impl Kademlia +impl Behaviour where TStore: RecordStore + Send + 'static, { @@ -416,8 +408,8 @@ where } /// Creates a new `Kademlia` network behaviour with the given configuration. - pub fn with_config(id: PeerId, store: TStore, config: KademliaConfig) -> Self { - let local_key = kbucket_priv::Key::from(id); + pub fn with_config(id: PeerId, store: TStore, config: Config) -> Self { + let local_key = kbucket::Key::from(id); let put_record_job = config .record_replication_interval @@ -435,7 +427,7 @@ where .provider_publication_interval .map(AddProviderJob::new); - Kademlia { + Behaviour { store, caching: config.caching, kbuckets: KBucketsTable::new(local_key, config.kbucket_pending_timeout), @@ -450,9 +442,12 @@ where put_record_job, record_ttl: config.record_ttl, provider_record_ttl: config.provider_record_ttl, - connection_idle_timeout: config.connection_idle_timeout, external_addresses: Default::default(), local_peer_id: id, + connections: Default::default(), + mode: Mode::Client, + auto_mode: true, + no_events_waker: None, } } @@ -516,14 +511,18 @@ where /// in the DHT. /// /// If the routing table has been updated as a result of this operation, - /// a [`KademliaEvent::RoutingUpdated`] event is emitted. + /// a [`Event::RoutingUpdated`] event is emitted. pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> RoutingUpdate { - let key = kbucket_priv::Key::from(*peer); + // ensuring address is a fully-qualified /p2p multiaddr + let Ok(address) = address.with_p2p(*peer) else { + return RoutingUpdate::Failed; + }; + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket_priv::Entry::Present(mut entry, _) => { + kbucket::Entry::Present(mut entry, _) => { if entry.value().insert(address) { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::RoutingUpdated { peer: *peer, is_new_peer: false, addresses: entry.value().clone(), @@ -533,16 +532,15 @@ where .bucket(&key) .map(|b| b.range()) .expect("Not kbucket::Entry::SelfEntry."), - }, - )) + })) } RoutingUpdate::Success } - kbucket_priv::Entry::Pending(mut entry, _) => { + kbucket::Entry::Pending(mut entry, _) => { entry.value().insert(address); RoutingUpdate::Pending } - kbucket_priv::Entry::Absent(entry) => { + kbucket::Entry::Absent(entry) => { let addresses = Addresses::new(address); let status = if self.connected_peers.contains(peer) { NodeStatus::Connected @@ -550,9 +548,9 @@ where NodeStatus::Disconnected }; match entry.insert(addresses.clone(), status) { - kbucket_priv::InsertResult::Inserted => { + kbucket::InsertResult::Inserted => { self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + Event::RoutingUpdated { peer: *peer, is_new_peer: true, addresses, @@ -566,19 +564,21 @@ where )); RoutingUpdate::Success } - kbucket_priv::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + kbucket::InsertResult::Full => { + tracing::debug!(%peer, "Bucket full. Peer not added to routing table"); RoutingUpdate::Failed } - kbucket_priv::InsertResult::Pending { disconnected } => { + kbucket::InsertResult::Pending { disconnected } => { self.queued_events.push_back(ToSwarm::Dial { - opts: DialOpts::peer_id(disconnected.into_preimage()).build(), + opts: DialOpts::peer_id(disconnected.into_preimage()) + .condition(dial_opts::PeerCondition::NotDialing) + .build(), }); RoutingUpdate::Pending } } } - kbucket_priv::Entry::SelfEntry => RoutingUpdate::Failed, + kbucket::Entry::SelfEntry => RoutingUpdate::Failed, } } @@ -596,24 +596,25 @@ where &mut self, peer: &PeerId, address: &Multiaddr, - ) -> Option, Addresses>> { - let key = kbucket_priv::Key::from(*peer); + ) -> Option, Addresses>> { + let address = &address.to_owned().with_p2p(*peer).ok()?; + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket_priv::Entry::Present(mut entry, _) => { + kbucket::Entry::Present(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. } else { None } } - kbucket_priv::Entry::Pending(mut entry, _) => { + kbucket::Entry::Pending(mut entry, _) => { if entry.value().remove(address).is_err() { Some(entry.remove()) // it is the last address, thus remove the peer. } else { None } } - kbucket_priv::Entry::Absent(..) | kbucket_priv::Entry::SelfEntry => None, + kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, } } @@ -624,20 +625,19 @@ where pub fn remove_peer( &mut self, peer: &PeerId, - ) -> Option, Addresses>> { - let key = kbucket_priv::Key::from(*peer); + ) -> Option, Addresses>> { + let key = kbucket::Key::from(*peer); match self.kbuckets.entry(&key) { - kbucket_priv::Entry::Present(entry, _) => Some(entry.remove()), - kbucket_priv::Entry::Pending(entry, _) => Some(entry.remove()), - kbucket_priv::Entry::Absent(..) | kbucket_priv::Entry::SelfEntry => None, + kbucket::Entry::Present(entry, _) => Some(entry.remove()), + kbucket::Entry::Pending(entry, _) => Some(entry.remove()), + kbucket::Entry::Absent(..) | kbucket::Entry::SelfEntry => None, } } /// Returns an iterator over all non-empty buckets in the routing table. pub fn kbuckets( &mut self, - ) -> impl Iterator, Addresses>> - { + ) -> impl Iterator, Addresses>> { self.kbuckets.iter().filter(|b| !b.is_empty()) } @@ -647,9 +647,9 @@ where pub fn kbucket( &mut self, key: K, - ) -> Option, Addresses>> + ) -> Option, Addresses>> where - K: Into> + Clone, + K: Into> + Clone, { self.kbuckets.bucket(&key.into()) } @@ -657,19 +657,18 @@ where /// Initiates an iterative query for the closest peers to the given key. /// /// The result of the query is delivered in a - /// [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetClosestPeers}`]. + /// [`Event::OutboundQueryProgressed{QueryResult::GetClosestPeers}`]. pub fn get_closest_peers(&mut self, key: K) -> QueryId where - K: Into> + Into> + Clone, + K: Into> + Into> + Clone, { - let target: kbucket_priv::Key = key.clone().into(); + let target: kbucket::Key = key.clone().into(); let key: Vec = key.into(); let info = QueryInfo::GetClosestPeers { key, step: ProgressStep::first(), }; - let peer_keys: Vec> = - self.kbuckets.closest_keys(&target).collect(); + let peer_keys: Vec> = self.kbuckets.closest_keys(&target).collect(); let inner = QueryInner::new(info); self.queries.add_iter_closest(target, peer_keys, inner) } @@ -677,16 +676,16 @@ where /// Returns closest peers to the given key; takes peers from local routing table only. pub fn get_closest_local_peers<'a, K: Clone>( &'a mut self, - key: &'a kbucket_priv::Key, - ) -> impl Iterator> + 'a { + key: &'a kbucket::Key, + ) -> impl Iterator> + 'a { self.kbuckets.closest_keys(key) } /// Performs a lookup for a record in the DHT. /// /// The result of this operation is delivered in a - /// [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetRecord}`]. - pub fn get_record(&mut self, key: record_priv::Key) -> QueryId { + /// [`Event::OutboundQueryProgressed{QueryResult::GetRecord}`]. + pub fn get_record(&mut self, key: record::Key) -> QueryId { let record = if let Some(record) = self.store.get(&key) { if record.is_expired(Instant::now()) { self.store.remove(&key); @@ -703,7 +702,7 @@ where let step = ProgressStep::first(); - let target = kbucket_priv::Key::new(key.clone()); + let target = kbucket::Key::new(key.clone()); let info = if record.is_some() { QueryInfo::GetRecord { key, @@ -727,14 +726,13 @@ where let stats = QueryStats::empty(); if let Some(record) = record { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { id, result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord(record))), step, stats, - }, - )); + })); } id @@ -746,12 +744,12 @@ where /// Returns `Ok` if a record has been stored locally, providing the /// `QueryId` of the initial query that replicates the record in the DHT. /// The result of the query is eventually reported as a - /// [`KademliaEvent::OutboundQueryCompleted{QueryResult::PutRecord}`]. + /// [`Event::OutboundQueryProgressed{QueryResult::PutRecord}`]. /// /// The record is always stored locally with the given expiration. If the record's /// expiration is `None`, the common case, it does not expire in local storage /// but is still replicated with the configured record TTL. To remove the record - /// locally and stop it from being re-published in the DHT, see [`Kademlia::remove_record`]. + /// locally and stop it from being re-published in the DHT, see [`Behaviour::remove_record`]. /// /// After the initial publication of the record, it is subject to (re-)replication /// and (re-)publication as per the configured intervals. Periodic (re-)publication @@ -769,7 +767,7 @@ where .expires .or_else(|| self.record_ttl.map(|ttl| Instant::now() + ttl)); let quorum = quorum.eval(self.queries.config().replication_factor); - let target = kbucket_priv::Key::new(record.key.clone()); + let target = kbucket::Key::new(record.key.clone()); let peers = self.kbuckets.closest_keys(&target); let context = PutRecordContext::Publish; let info = QueryInfo::PutRecord { @@ -837,7 +835,7 @@ where /// This is a _local_ operation. However, it also has the effect that /// the record will no longer be periodically re-published, allowing the /// record to eventually expire throughout the DHT. - pub fn remove_record(&mut self, key: &record_priv::Key) { + pub fn remove_record(&mut self, key: &record::Key) { if let Some(r) = self.store.get(key) { if r.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { self.store.remove(key) @@ -862,13 +860,13 @@ where /// /// Returns `Ok` if bootstrapping has been initiated with a self-lookup, providing the /// `QueryId` for the entire bootstrapping process. The progress of bootstrapping is - /// reported via [`KademliaEvent::OutboundQueryCompleted{QueryResult::Bootstrap}`] events, + /// reported via [`Event::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, /// with one such event per bootstrapping query. /// /// Returns `Err` if bootstrapping is impossible due an empty routing table. /// /// > **Note**: Bootstrapping requires at least one node of the DHT to be known. - /// > See [`Kademlia::add_address`]. + /// > See [`Behaviour::add_address`]. pub fn bootstrap(&mut self) -> Result { let local_key = self.kbuckets.local_key().clone(); let info = QueryInfo::Bootstrap { @@ -897,17 +895,17 @@ where /// The publication of the provider records is periodically repeated as per the /// configured interval, to renew the expiry and account for changes to the DHT /// topology. A provider record may be removed from local storage and - /// thus no longer re-published by calling [`Kademlia::stop_providing`]. + /// thus no longer re-published by calling [`Behaviour::stop_providing`]. /// /// In contrast to the standard Kademlia push-based model for content distribution - /// implemented by [`Kademlia::put_record`], the provider API implements a + /// implemented by [`Behaviour::put_record`], the provider API implements a /// pull-based model that may be used in addition or as an alternative. /// The means by which the actual value is obtained from a provider is out of scope /// of the libp2p Kademlia provider API. /// /// The results of the (repeated) provider announcements sent by this node are - /// reported via [`KademliaEvent::OutboundQueryCompleted{QueryResult::StartProviding}`]. - pub fn start_providing(&mut self, key: record_priv::Key) -> Result { + /// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. + pub fn start_providing(&mut self, key: record::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are // acquired on demand when returning a `ProviderRecord` for the local node. @@ -918,7 +916,7 @@ where local_addrs, ); self.store.add_provider(record)?; - let target = kbucket_priv::Key::new(key.clone()); + let target = kbucket::Key::new(key.clone()); let peers = self.kbuckets.closest_keys(&target); let context = AddProviderContext::Publish; let info = QueryInfo::AddProvider { @@ -935,7 +933,7 @@ where /// /// This is a local operation. The local node will still be considered as a /// provider for the key by other nodes until these provider records expire. - pub fn stop_providing(&mut self, key: &record_priv::Key) { + pub fn stop_providing(&mut self, key: &record::Key) { self.store .remove_provider(key, self.kbuckets.local_key().preimage()); } @@ -943,8 +941,8 @@ where /// Performs a lookup for providers of a value to the given key. /// /// The result of this operation is delivered in a - /// reported via [`KademliaEvent::OutboundQueryCompleted{QueryResult::GetProviders}`]. - pub fn get_providers(&mut self, key: record_priv::Key) -> QueryId { + /// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. + pub fn get_providers(&mut self, key: record::Key) -> QueryId { let providers: HashSet<_> = self .store .providers(&key) @@ -965,7 +963,7 @@ where }, }; - let target = kbucket_priv::Key::new(key.clone()); + let target = kbucket::Key::new(key.clone()); let peers = self.kbuckets.closest_keys(&target); let inner = QueryInner::new(info); let id = self.queries.add_iter_closest(target.clone(), peers, inner); @@ -974,8 +972,8 @@ where let stats = QueryStats::empty(); if !providers.is_empty() { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key, @@ -983,12 +981,108 @@ where })), step, stats, - }, - )); + })); } id } + /// Set the [`Mode`] in which we should operate. + /// + /// By default, we are in [`Mode::Client`] and will swap into [`Mode::Server`] as soon as we have a confirmed, external address via [`FromSwarm::ExternalAddrConfirmed`]. + /// + /// Setting a mode via this function disables this automatic behaviour and unconditionally operates in the specified mode. + /// To reactivate the automatic configuration, pass [`None`] instead. + pub fn set_mode(&mut self, mode: Option) { + match mode { + Some(mode) => { + self.mode = mode; + self.auto_mode = false; + self.reconfigure_mode(); + } + None => { + self.auto_mode = true; + self.determine_mode_from_external_addresses(); + } + } + + if let Some(waker) = self.no_events_waker.take() { + waker.wake(); + } + } + + fn reconfigure_mode(&mut self) { + if self.connections.is_empty() { + return; + } + + let num_connections = self.connections.len(); + + tracing::debug!( + "Re-configuring {} established connection{}", + num_connections, + if num_connections > 1 { "s" } else { "" } + ); + + self.queued_events + .extend( + self.connections + .iter() + .map(|(conn_id, peer_id)| ToSwarm::NotifyHandler { + peer_id: *peer_id, + handler: NotifyHandler::One(*conn_id), + event: HandlerIn::ReconfigureMode { + new_mode: self.mode, + }, + }), + ); + } + + fn determine_mode_from_external_addresses(&mut self) { + let old_mode = self.mode; + + self.mode = match (self.external_addresses.as_slice(), self.mode) { + ([], Mode::Server) => { + tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); + + Mode::Client + } + ([], Mode::Client) => { + // Previously client-mode, now also client-mode because no external addresses. + + Mode::Client + } + (confirmed_external_addresses, Mode::Client) => { + if tracing::enabled!(Level::DEBUG) { + let confirmed_external_addresses = + to_comma_separated_list(confirmed_external_addresses); + + tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); + } + + Mode::Server + } + (confirmed_external_addresses, Mode::Server) => { + debug_assert!( + !confirmed_external_addresses.is_empty(), + "Previous match arm handled empty list" + ); + + // Previously, server-mode, now also server-mode because > 1 external address. Don't log anything to avoid spam. + + Mode::Server + } + }; + + self.reconfigure_mode(); + + if old_mode != self.mode { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::ModeChanged { + new_mode: self.mode, + })); + } + } + /// Processes discovered peers from a successful request in an iterative `Query`. fn discovered<'a, I>(&'a mut self, query_id: &QueryId, source: &PeerId, peers: I) where @@ -997,13 +1091,13 @@ where let local_id = self.kbuckets.local_key().preimage(); let others_iter = peers.filter(|p| &p.node_id != local_id); if let Some(query) = self.queries.get_mut(query_id) { - log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id); + tracing::trace!(peer=%source, query=?query_id, "Request to peer in query succeeded"); for peer in others_iter.clone() { - log::trace!( - "Peer {:?} reported by {:?} in query {:?}.", - peer, - source, - query_id + tracing::trace!( + ?peer, + %source, + query=?query_id, + "Peer reported by source in query" ); let addrs = peer.multiaddrs.iter().cloned().collect(); query.inner.addresses.insert(peer.node_id, addrs); @@ -1017,7 +1111,7 @@ where /// result. fn find_closest( &mut self, - target: &kbucket_priv::Key, + target: &kbucket::Key, source: &PeerId, ) -> Vec { if target == self.kbuckets.local_key() { @@ -1033,7 +1127,7 @@ where } /// Collects all peers who are known to be providers of the value for a given `Multihash`. - fn provider_peers(&mut self, key: &record_priv::Key, source: &PeerId) -> Vec { + fn provider_peers(&mut self, key: &record::Key, source: &PeerId) -> Vec { let kbuckets = &mut self.kbuckets; let connected = &mut self.connected_peers; let listen_addresses = &self.listen_addresses; @@ -1047,9 +1141,9 @@ where let node_id = p.provider; let multiaddrs = p.addresses; let connection_ty = if connected.contains(&node_id) { - KadConnectionType::Connected + ConnectionType::Connected } else { - KadConnectionType::NotConnected + ConnectionType::NotConnected }; if multiaddrs.is_empty() { // The provider is either the local node and we fill in @@ -1067,7 +1161,7 @@ where .collect::>(), ) } else { - let key = kbucket_priv::Key::from(node_id); + let key = kbucket::Key::from(node_id); kbuckets .entry(&key) .view() @@ -1090,13 +1184,13 @@ where } /// Starts an iterative `ADD_PROVIDER` query for the given key. - fn start_add_provider(&mut self, key: record_priv::Key, context: AddProviderContext) { + fn start_add_provider(&mut self, key: record::Key, context: AddProviderContext) { let info = QueryInfo::AddProvider { context, key: key.clone(), phase: AddProviderPhase::GetClosestPeers, }; - let target = kbucket_priv::Key::new(key); + let target = kbucket::Key::new(key); let peers = self.kbuckets.closest_keys(&target); let inner = QueryInner::new(info); self.queries.add_iter_closest(target.clone(), peers, inner); @@ -1105,7 +1199,7 @@ where /// Starts an iterative `PUT_VALUE` query for the given record. fn start_put_record(&mut self, record: Record, quorum: Quorum, context: PutRecordContext) { let quorum = quorum.eval(self.queries.config().replication_factor); - let target = kbucket_priv::Key::new(record.key.clone()); + let target = kbucket::Key::new(record.key.clone()); let peers = self.kbuckets.closest_keys(&target); let info = QueryInfo::PutRecord { record, @@ -1124,16 +1218,16 @@ where address: Option, new_status: NodeStatus, ) { - let key = kbucket_priv::Key::from(peer); + let key = kbucket::Key::from(peer); match self.kbuckets.entry(&key) { - kbucket_priv::Entry::Present(mut entry, old_status) => { + kbucket::Entry::Present(mut entry, old_status) => { if old_status != new_status { entry.update(new_status) } if let Some(address) = address { if entry.value().insert(address) { self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutingUpdated { + Event::RoutingUpdated { peer, is_new_peer: false, addresses: entry.value().clone(), @@ -1149,7 +1243,7 @@ where } } - kbucket_priv::Entry::Pending(mut entry, old_status) => { + kbucket::Entry::Pending(mut entry, old_status) => { if let Some(address) = address { entry.value().insert(address); } @@ -1158,27 +1252,28 @@ where } } - kbucket_priv::Entry::Absent(entry) => { + kbucket::Entry::Absent(entry) => { // Only connected nodes with a known address are newly inserted. if new_status != NodeStatus::Connected { return; } match (address, self.kbucket_inserts) { (None, _) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::UnroutablePeer { peer }, - )); + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::UnroutablePeer { peer })); } - (Some(a), KademliaBucketInserts::Manual) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutablePeer { peer, address: a }, - )); + (Some(a), BucketInserts::Manual) => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::RoutablePeer { + peer, + address: a, + })); } - (Some(a), KademliaBucketInserts::OnConnected) => { + (Some(a), BucketInserts::OnConnected) => { let addresses = Addresses::new(a); match entry.insert(addresses.clone(), new_status) { - kbucket_priv::InsertResult::Inserted => { - let event = KademliaEvent::RoutingUpdated { + kbucket::InsertResult::Inserted => { + let event = Event::RoutingUpdated { peer, is_new_peer: true, addresses, @@ -1191,28 +1286,32 @@ where }; self.queued_events.push_back(ToSwarm::GenerateEvent(event)); } - kbucket_priv::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + kbucket::InsertResult::Full => { + tracing::debug!( + %peer, + "Bucket full. Peer not added to routing table" + ); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::RoutablePeer { peer, address }, + Event::RoutablePeer { peer, address }, )); } - kbucket_priv::InsertResult::Pending { disconnected } => { + kbucket::InsertResult::Pending { disconnected } => { let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::PendingRoutablePeer { peer, address }, + Event::PendingRoutablePeer { peer, address }, )); // `disconnected` might already be in the process of re-connecting. // In other words `disconnected` might have already re-connected but // is not yet confirmed to support the Kademlia protocol via - // [`KademliaHandlerEvent::ProtocolConfirmed`]. + // [`HandlerEvent::ProtocolConfirmed`]. // // Only try dialing peer if not currently connected. if !self.connected_peers.contains(disconnected.preimage()) { self.queued_events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(disconnected.into_preimage()) + .condition(dial_opts::PeerCondition::NotDialing) .build(), }) } @@ -1226,9 +1325,9 @@ where } /// Handles a finished (i.e. successful) query. - fn query_finished(&mut self, q: Query) -> Option { + fn query_finished(&mut self, q: Query) -> Option { let query_id = q.id(); - log::trace!("Query {:?} finished.", query_id); + tracing::trace!(query=?query_id, "Query finished"); let result = q.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1261,13 +1360,13 @@ where // Pr(bucket-253) = 1 - (7/8)^16 ~= 0.88 // Pr(bucket-252) = 1 - (15/16)^16 ~= 0.64 // ... - let mut target = kbucket_priv::Key::from(PeerId::random()); + let mut target = kbucket::Key::from(PeerId::random()); for _ in 0..16 { let d = local_key.distance(&target); if b.contains(&d) { break; } - target = kbucket_priv::Key::from(PeerId::random()); + target = kbucket::Key::from(PeerId::random()); } target }) @@ -1291,7 +1390,7 @@ where step.last = true; }; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::Bootstrap(Ok(BootstrapOk { @@ -1305,7 +1404,7 @@ where QueryInfo::GetClosestPeers { key, mut step } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetClosestPeers(Ok(GetClosestPeersOk { @@ -1319,7 +1418,7 @@ where QueryInfo::GetProviders { mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetProviders(Ok( @@ -1360,13 +1459,13 @@ where .. }, } => match context { - AddProviderContext::Publish => Some(KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Publish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::StartProviding(Ok(AddProviderOk { key })), step: ProgressStep::first_and_last(), }), - AddProviderContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::RepublishProvider(Ok(AddProviderOk { key })), @@ -1390,7 +1489,7 @@ where closest_peers: result.peers.collect(), }) }; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetRecord(results), @@ -1428,7 +1527,7 @@ where get_closest_peers_stats, }, } => { - let mk_result = |key: record_priv::Key| { + let mk_result = |key: record::Key| { if success.len() >= quorum.get() { Ok(PutRecordOk { key }) } else { @@ -1441,21 +1540,21 @@ where }; match context { PutRecordContext::Publish | PutRecordContext::Custom => { - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::PutRecord(mk_result(record.key)), step: ProgressStep::first_and_last(), }) } - PutRecordContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: get_closest_peers_stats.merge(result.stats), result: QueryResult::RepublishRecord(mk_result(record.key)), step: ProgressStep::first_and_last(), }), PutRecordContext::Replicate => { - debug!("Record replicated: {:?}", record.key); + tracing::debug!(record=?record.key, "Record replicated"); None } } @@ -1464,9 +1563,9 @@ where } /// Handles a query that timed out. - fn query_timeout(&mut self, query: Query) -> Option { + fn query_timeout(&mut self, query: Query) -> Option { let query_id = query.id(); - log::trace!("Query {:?} timed out.", query_id); + tracing::trace!(query=?query_id, "Query timed out"); let result = query.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1493,7 +1592,7 @@ where step.last = true; } - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::Bootstrap(Err(BootstrapError::Timeout { @@ -1505,13 +1604,13 @@ where } QueryInfo::AddProvider { context, key, .. } => Some(match context { - AddProviderContext::Publish => KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Publish => Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::StartProviding(Err(AddProviderError::Timeout { key })), step: ProgressStep::first_and_last(), }, - AddProviderContext::Republish => KademliaEvent::OutboundQueryProgressed { + AddProviderContext::Republish => Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::RepublishProvider(Err(AddProviderError::Timeout { key })), @@ -1522,7 +1621,7 @@ where QueryInfo::GetClosestPeers { key, mut step } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetClosestPeers(Err(GetClosestPeersError::Timeout { @@ -1549,14 +1648,14 @@ where }); match context { PutRecordContext::Publish | PutRecordContext::Custom => { - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::PutRecord(err), step: ProgressStep::first_and_last(), }) } - PutRecordContext::Republish => Some(KademliaEvent::OutboundQueryProgressed { + PutRecordContext::Republish => Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::RepublishRecord(err), @@ -1564,11 +1663,14 @@ where }), PutRecordContext::Replicate => match phase { PutRecordPhase::GetClosestPeers => { - warn!("Locating closest peers for replication failed: {:?}", err); + tracing::warn!( + "Locating closest peers for replication failed: {:?}", + err + ); None } PutRecordPhase::PutRecord { .. } => { - debug!("Replicating record failed: {:?}", err); + tracing::debug!("Replicating record failed: {:?}", err); None } }, @@ -1578,7 +1680,7 @@ where QueryInfo::GetRecord { key, mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetRecord(Err(GetRecordError::Timeout { key })), @@ -1589,7 +1691,7 @@ where QueryInfo::GetProviders { key, mut step, .. } => { step.last = true; - Some(KademliaEvent::OutboundQueryProgressed { + Some(Event::OutboundQueryProgressed { id: query_id, stats: result.stats, result: QueryResult::GetProviders(Err(GetProvidersError::Timeout { @@ -1607,7 +1709,7 @@ where &mut self, source: PeerId, connection: ConnectionId, - request_id: KademliaRequestId, + request_id: RequestId, mut record: Record, ) { if record.publisher.as_ref() == Some(self.kbuckets.local_key().preimage()) { @@ -1617,7 +1719,7 @@ where self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::PutRecordRes { + event: HandlerIn::PutRecordRes { key: record.key, value: record.value, request_id, @@ -1632,7 +1734,7 @@ where // number of nodes between the local node and the closest node to the key // (beyond the replication factor). This ensures avoiding over-caching // outside of the k closest nodes to a key. - let target = kbucket_priv::Key::new(record.key.clone()); + let target = kbucket::Key::new(record.key.clone()); let num_between = self.kbuckets.count_nodes_between(&target); let k = self.queries.config().replication_factor.get(); let num_beyond_k = (usize::max(k, num_between) - k) as u32; @@ -1666,15 +1768,15 @@ where // requirement to send back the value in the response, although this // is a waste of resources. match self.record_filtering { - KademliaStoreInserts::Unfiltered => match self.store.put(record.clone()) { + StoreInserts::Unfiltered => match self.store.put(record.clone()) { Ok(()) => { - debug!( - "Record stored: {:?}; {} bytes", - record.key, + tracing::debug!( + record=?record.key, + "Record stored: {} bytes", record.value.len() ); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + Event::InboundRequest { request: InboundRequest::PutRecord { source, connection, @@ -1684,41 +1786,40 @@ where )); } Err(e) => { - info!("Record not stored: {:?}", e); + tracing::info!("Record not stored: {:?}", e); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::Reset(request_id), + event: HandlerIn::Reset(request_id), }); return; } }, - KademliaStoreInserts::FilterBoth => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + StoreInserts::FilterBoth => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::PutRecord { source, connection, record: Some(record.clone()), }, - }, - )); + })); } } } - // The remote receives a [`KademliaHandlerIn::PutRecordRes`] even in the + // The remote receives a [`HandlerIn::PutRecordRes`] even in the // case where the record is discarded due to being expired. Given that - // the remote sent the local node a [`KademliaHandlerEvent::PutRecord`] + // the remote sent the local node a [`HandlerEvent::PutRecord`] // request, the remote perceives the local node as one node among the k // closest nodes to the target. In addition returning - // [`KademliaHandlerIn::PutRecordRes`] does not reveal any internal + // [`HandlerIn::PutRecordRes`] does not reveal any internal // information to a possibly malicious remote node. self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::PutRecordRes { + event: HandlerIn::PutRecordRes { key: record.key, value: record.value, request_id, @@ -1727,7 +1828,7 @@ where } /// Processes a provider record received from a peer. - fn provider_received(&mut self, key: record_priv::Key, provider: KadPeer) { + fn provider_received(&mut self, key: record::Key, provider: KadPeer) { if &provider.node_id != self.kbuckets.local_key().preimage() { let record = ProviderRecord { key, @@ -1736,33 +1837,31 @@ where addresses: provider.multiaddrs, }; match self.record_filtering { - KademliaStoreInserts::Unfiltered => { + StoreInserts::Unfiltered => { if let Err(e) = self.store.add_provider(record) { - info!("Provider record not stored: {:?}", e); + tracing::info!("Provider record not stored: {:?}", e); return; } - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::AddProvider { record: None }, - }, - )); + })); } - KademliaStoreInserts::FilterBoth => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + StoreInserts::FilterBoth => { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::AddProvider { record: Some(record), }, - }, - )); + })); } } } } fn address_failed(&mut self, peer_id: PeerId, address: &Multiaddr) { - let key = kbucket_priv::Key::from(peer_id); + let key = kbucket::Key::from(peer_id); if let Some(addrs) = self.kbuckets.entry(&key).value() { // TODO: Ideally, the address should only be removed if the error can @@ -1771,9 +1870,10 @@ where // of the error is not possible (and also not truly desirable or ergonomic). // The error passed in should rather be a dedicated enum. if addrs.remove(address).is_ok() { - debug!( - "Address '{}' removed from peer '{}' due to error.", - address, peer_id + tracing::debug!( + peer=%peer_id, + %address, + "Address removed from peer due to error." ); } else { // Despite apparently having no reachable address (any longer), @@ -1784,11 +1884,12 @@ where // and is unreachable in the context of another peer pending insertion // into the same bucket. This is handled transparently by the // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` - // within `Kademlia::poll`. - debug!( - "Last remaining address '{}' of peer '{}' is unreachable.", - address, peer_id, - ) + // within `Behaviour::poll`. + tracing::debug!( + peer=%peer_id, + %address, + "Last remaining address of peer is unreachable." + ); } } @@ -1812,29 +1913,8 @@ where self.address_failed(peer_id, addr); } - // When a connection is established, we don't know yet whether the - // remote supports the configured protocol name. Only once a connection - // handler reports [`KademliaHandlerEvent::ProtocolConfirmed`] do we - // update the local routing table. - // Peer's first connection. if other_established == 0 { - // Queue events for sending pending RPCs to the connected peer. - // There can be only one pending RPC for a particular peer and query per definition. - for (peer_id, event) in self.queries.iter_mut().filter_map(|q| { - q.inner - .pending_rpcs - .iter() - .position(|(p, _)| p == &peer_id) - .map(|p| q.inner.pending_rpcs.remove(p)) - }) { - self.queued_events.push_back(ToSwarm::NotifyHandler { - peer_id, - event, - handler: NotifyHandler::Any, - }); - } - self.connected_peers.insert(peer_id); } } @@ -1851,24 +1931,29 @@ where let (old, new) = (old.get_remote_address(), new.get_remote_address()); // Update routing table. - if let Some(addrs) = self.kbuckets.entry(&kbucket_priv::Key::from(peer)).value() { + if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { if addrs.replace(old, new) { - debug!( - "Address '{}' replaced with '{}' for peer '{}'.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address replaced with new address for peer." ); } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as old address wasn't \ - present.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as old address wasn't present.", ); } } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as peer is not present in the \ - routing table.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as peer is not present in the \ + routing table." ); } @@ -1898,17 +1983,10 @@ where } fn on_dial_failure(&mut self, DialFailure { peer_id, error, .. }: DialFailure) { - let peer_id = match peer_id { - Some(id) => id, - // Not interested in dial failures to unknown peers. - None => return, - }; + let Some(peer_id) = peer_id else { return }; match error { - #[allow(deprecated)] - DialError::Banned - | DialError::LocalPeerId { .. } - | DialError::InvalidPeerId { .. } + DialError::LocalPeerId { .. } | DialError::WrongPeerId { .. } | DialError::Aborted | DialError::Denied { .. } @@ -1925,7 +2003,9 @@ where } } DialError::DialPeerConditionFalse( - dial_opts::PeerCondition::Disconnected | dial_opts::PeerCondition::NotDialing, + dial_opts::PeerCondition::Disconnected + | dial_opts::PeerCondition::NotDialing + | dial_opts::PeerCondition::DisconnectedAndNotDialing, ) => { // We might (still) be connected, or about to be connected, thus do not report the // failure to the queries. @@ -1933,8 +2013,6 @@ where DialError::DialPeerConditionFalse(dial_opts::PeerCondition::Always) => { unreachable!("DialPeerCondition::Always can not trigger DialPeerConditionFalse."); } - #[allow(deprecated)] - DialError::ConnectionLimit(_) => {} } } @@ -1943,9 +2021,12 @@ where ConnectionClosed { peer_id, remaining_established, + connection_id, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { + self.connections.remove(&connection_id); + if remaining_established == 0 { for query in self.queries.iter_mut() { query.on_failure(&peer_id); @@ -1954,6 +2035,27 @@ where self.connected_peers.remove(&peer_id); } } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + connection_id: ConnectionId, + peer: PeerId, + ) { + self.connections.insert(connection_id, peer); + // Queue events for sending pending RPCs to the connected peer. + // There can be only one pending RPC for a particular peer and query per definition. + for (_peer_id, event) in self.queries.iter_mut().filter_map(|q| { + q.inner + .pending_rpcs + .iter() + .position(|(p, _)| p == &peer) + .map(|p| q.inner.pending_rpcs.remove(p)) + }) { + handler.on_behaviour_event(event) + } + } } /// Exponentially decrease the given duration (base 2). @@ -1961,53 +2063,57 @@ fn exp_decrease(ttl: Duration, exp: u32) -> Duration { Duration::from_secs(ttl.as_secs().checked_shr(exp).unwrap_or(0)) } -impl NetworkBehaviour for Kademlia +impl NetworkBehaviour for Behaviour where TStore: RecordStore + Send + 'static, { - type ConnectionHandler = KademliaHandler; - type OutEvent = KademliaEvent; + type ConnectionHandler = Handler; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, - _connection_id: ConnectionId, + connection_id: ConnectionId, peer: PeerId, local_addr: &Multiaddr, remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(KademliaHandler::new( - KademliaHandlerConfig { - protocol_config: self.protocol_config.clone(), - allow_listening: true, - idle_timeout: self.connection_idle_timeout, - }, - ConnectedPoint::Listener { - local_addr: local_addr.clone(), - send_back_addr: remote_addr.clone(), - }, + let connected_point = ConnectedPoint::Listener { + local_addr: local_addr.clone(), + send_back_addr: remote_addr.clone(), + }; + + let mut handler = Handler::new( + self.protocol_config.clone(), + connected_point, peer, - )) + self.mode, + ); + self.preload_new_handler(&mut handler, connection_id, peer); + + Ok(handler) } fn handle_established_outbound_connection( &mut self, - _connection_id: ConnectionId, + connection_id: ConnectionId, peer: PeerId, addr: &Multiaddr, role_override: Endpoint, ) -> Result, ConnectionDenied> { - Ok(KademliaHandler::new( - KademliaHandlerConfig { - protocol_config: self.protocol_config.clone(), - allow_listening: true, - idle_timeout: self.connection_idle_timeout, - }, - ConnectedPoint::Dialer { - address: addr.clone(), - role_override, - }, + let connected_point = ConnectedPoint::Dialer { + address: addr.clone(), + role_override, + }; + + let mut handler = Handler::new( + self.protocol_config.clone(), + connected_point, peer, - )) + self.mode, + ); + self.preload_new_handler(&mut handler, connection_id, peer); + + Ok(handler) } fn handle_pending_outbound_connection( @@ -2024,9 +2130,9 @@ where // We should order addresses from decreasing likelyhood of connectivity, so start with // the addresses of that peer in the k-buckets. - let key = kbucket_priv::Key::from(peer_id); + let key = kbucket::Key::from(peer_id); let mut peer_addrs = - if let kbucket_priv::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { + if let kbucket::Entry::Present(mut entry, _) = self.kbuckets.entry(&key) { let addrs = entry.value().iter().cloned().collect::>(); debug_assert!(!addrs.is_empty(), "Empty peer addresses in routing table."); addrs @@ -2051,7 +2157,7 @@ where event: THandlerOutEvent, ) { match event { - KademliaHandlerEvent::ProtocolConfirmed { endpoint } => { + HandlerEvent::ProtocolConfirmed { endpoint } => { debug_assert!(self.connected_peers.contains(&source)); // The remote's address can only be put into the routing table, // and thus shared with other nodes, if the local node is the dialer, @@ -2061,54 +2167,61 @@ where ConnectedPoint::Dialer { address, .. } => Some(address), ConnectedPoint::Listener { .. } => None, }; + self.connection_updated(source, address, NodeStatus::Connected); } - KademliaHandlerEvent::FindNodeReq { key, request_id } => { - let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); + HandlerEvent::ProtocolNotSupported { endpoint } => { + let address = match endpoint { + ConnectedPoint::Dialer { address, .. } => Some(address), + ConnectedPoint::Listener { .. } => None, + }; + self.connection_updated(source, address, NodeStatus::Disconnected); + } + + HandlerEvent::FindNodeReq { key, request_id } => { + let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::FindNode { num_closer_peers: closer_peers.len(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::FindNodeRes { + event: HandlerIn::FindNodeRes { closer_peers, request_id, }, }); } - KademliaHandlerEvent::FindNodeRes { + HandlerEvent::FindNodeRes { closer_peers, - user_data, + query_id, } => { - self.discovered(&user_data, &source, closer_peers.iter()); + self.discovered(&query_id, &source, closer_peers.iter()); } - KademliaHandlerEvent::GetProvidersReq { key, request_id } => { + HandlerEvent::GetProvidersReq { key, request_id } => { let provider_peers = self.provider_peers(&key, &source); - let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); + let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::GetProvider { num_closer_peers: closer_peers.len(), num_provider_peers: provider_peers.len(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::GetProvidersRes { + event: HandlerIn::GetProvidersRes { closer_peers, provider_peers, request_id, @@ -2116,14 +2229,14 @@ where }); } - KademliaHandlerEvent::GetProvidersRes { + HandlerEvent::GetProvidersRes { closer_peers, provider_peers, - user_data, + query_id, } => { let peers = closer_peers.iter().chain(provider_peers.iter()); - self.discovered(&user_data, &source, peers); - if let Some(query) = self.queries.get_mut(&user_data) { + self.discovered(&query_id, &source, peers); + if let Some(query) = self.queries.get_mut(&query_id) { let stats = query.stats().clone(); if let QueryInfo::GetProviders { ref key, @@ -2136,8 +2249,8 @@ where let providers = provider_peers.iter().map(|p| p.node_id).collect(); self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { - id: user_data, + Event::OutboundQueryProgressed { + id: query_id, result: QueryResult::GetProviders(Ok( GetProvidersOk::FoundProviders { key: key.clone(), @@ -2152,22 +2265,21 @@ where } } } - - KademliaHandlerEvent::QueryError { user_data, error } => { - log::debug!( - "Request to {:?} in query {:?} failed with {:?}", - source, - user_data, + HandlerEvent::QueryError { query_id, error } => { + tracing::debug!( + peer=%source, + query=?query_id, + "Request to peer in query failed with {:?}", error ); // If the query to which the error relates is still active, // signal the failure w.r.t. `source`. - if let Some(query) = self.queries.get_mut(&user_data) { + if let Some(query) = self.queries.get_mut(&query_id) { query.on_failure(&source) } } - KademliaHandlerEvent::AddProvider { key, provider } => { + HandlerEvent::AddProvider { key, provider } => { // Only accept a provider record from a legitimate peer. if provider.node_id != source { return; @@ -2176,7 +2288,7 @@ where self.provider_received(key, provider); } - KademliaHandlerEvent::GetRecord { key, request_id } => { + HandlerEvent::GetRecord { key, request_id } => { // Lookup the record locally. let record = match self.store.get(&key) { Some(record) => { @@ -2190,21 +2302,20 @@ where None => None, }; - let closer_peers = self.find_closest(&kbucket_priv::Key::new(key), &source); + let closer_peers = self.find_closest(&kbucket::Key::new(key), &source); - self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::InboundRequest { + self.queued_events + .push_back(ToSwarm::GenerateEvent(Event::InboundRequest { request: InboundRequest::GetRecord { num_closer_peers: closer_peers.len(), present_locally: record.is_some(), }, - }, - )); + })); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), - event: KademliaHandlerIn::GetRecordRes { + event: HandlerIn::GetRecordRes { record, closer_peers, request_id, @@ -2212,12 +2323,12 @@ where }); } - KademliaHandlerEvent::GetRecordRes { + HandlerEvent::GetRecordRes { record, closer_peers, - user_data, + query_id, } => { - if let Some(query) = self.queries.get_mut(&user_data) { + if let Some(query) = self.queries.get_mut(&query_id) { let stats = query.stats().clone(); if let QueryInfo::GetRecord { key, @@ -2234,8 +2345,8 @@ where }; self.queued_events.push_back(ToSwarm::GenerateEvent( - KademliaEvent::OutboundQueryProgressed { - id: user_data, + Event::OutboundQueryProgressed { + id: query_id, result: QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord( record, ))), @@ -2246,10 +2357,10 @@ where *step = step.next(); } else { - log::trace!("Record with key {:?} not found at {}", key, source); - if let KademliaCaching::Enabled { max_peers } = self.caching { - let source_key = kbucket_priv::Key::from(source); - let target_key = kbucket_priv::Key::from(key.clone()); + tracing::trace!(record=?key, %source, "Record not found at source"); + if let Caching::Enabled { max_peers } = self.caching { + let source_key = kbucket::Key::from(source); + let target_key = kbucket::Key::from(key.clone()); let distance = source_key.distance(&target_key); cache_candidates.insert(distance, source); if cache_candidates.len() > max_peers as usize { @@ -2264,15 +2375,15 @@ where } } - self.discovered(&user_data, &source, closer_peers.iter()); + self.discovered(&query_id, &source, closer_peers.iter()); } - KademliaHandlerEvent::PutRecord { record, request_id } => { + HandlerEvent::PutRecord { record, request_id } => { self.record_received(source, connection, request_id, record); } - KademliaHandlerEvent::PutRecordRes { user_data, .. } => { - if let Some(query) = self.queries.get_mut(&user_data) { + HandlerEvent::PutRecordRes { query_id, .. } => { + if let Some(query) = self.queries.get_mut(&query_id) { query.on_success(&source, vec![]); if let QueryInfo::PutRecord { phase: PutRecordPhase::PutRecord { success, .. }, @@ -2287,13 +2398,13 @@ where let peers = success.clone(); let finished = query.try_finish(peers.iter()); if !finished { - debug!( - "PutRecord query ({:?}) reached quorum ({}/{}) with response \ - from peer {} but could not yet finish.", - user_data, + tracing::debug!( + peer=%source, + query=?query_id, + "PutRecord query reached quorum ({}/{}) with response \ + from peer but could not yet finish.", peers.len(), quorum, - source, ); } } @@ -2303,11 +2414,11 @@ where }; } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let now = Instant::now(); // Calculate the available capacity for queries triggered by background jobs. @@ -2354,8 +2465,8 @@ where // Drain applied pending entries from the routing table. if let Some(entry) = self.kbuckets.take_applied_pending() { - let kbucket_priv::Node { key, value } = entry.inserted; - let event = KademliaEvent::RoutingUpdated { + let kbucket::Node { key, value } = entry.inserted; + let event = Event::RoutingUpdated { bucket_range: self .kbuckets .bucket(&key) @@ -2406,7 +2517,9 @@ where } else if &peer_id != self.kbuckets.local_key().preimage() { query.inner.pending_rpcs.push((peer_id, event)); self.queued_events.push_back(ToSwarm::Dial { - opts: DialOpts::peer_id(peer_id).build(), + opts: DialOpts::peer_id(peer_id) + .condition(dial_opts::PeerCondition::NotDialing) + .build(), }); } } @@ -2418,14 +2531,20 @@ where // If no new events have been queued either, signal `NotReady` to // be polled again later. if self.queued_events.is_empty() { + self.no_events_waker = Some(cx.waker().clone()); + return Poll::Pending; } } } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.listen_addresses.on_swarm_event(&event); - self.external_addresses.on_swarm_event(&event); + let external_addresses_changed = self.external_addresses.on_swarm_event(&event); + + if self.auto_mode && external_addresses_changed { + self.determine_mode_from_external_addresses(); + } match event { FromSwarm::ConnectionEstablished(connection_established) => { @@ -2436,14 +2555,7 @@ where } FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::ExpiredListenAddr(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } } @@ -2489,7 +2601,7 @@ pub struct PeerRecord { /// See [`NetworkBehaviour::poll`]. #[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] -pub enum KademliaEvent { +pub enum Event { /// An inbound request has been received and handled. // // Note on the difference between 'request' and 'query': A request is a @@ -2519,7 +2631,7 @@ pub enum KademliaEvent { is_new_peer: bool, /// The full list of known addresses of `peer`. addresses: Addresses, - /// Returns the minimum inclusive and maximum inclusive [`Distance`] for + /// Returns the minimum inclusive and maximum inclusive distance for /// the bucket of the peer. bucket_range: (Distance, Distance), /// The ID of the peer that was evicted from the routing table to make @@ -2530,19 +2642,19 @@ pub enum KademliaEvent { /// A peer has connected for whom no listen address is known. /// /// If the peer is to be added to the routing table, a known - /// listen address for the peer must be provided via [`Kademlia::add_address`]. + /// listen address for the peer must be provided via [`Behaviour::add_address`]. UnroutablePeer { peer: PeerId }, /// A connection to a peer has been established for whom a listen address /// is known but the peer has not been added to the routing table either - /// because [`KademliaBucketInserts::Manual`] is configured or because + /// because [`BucketInserts::Manual`] is configured or because /// the corresponding bucket is full. /// /// If the peer is to be included in the routing table, it must - /// must be explicitly added via [`Kademlia::add_address`], possibly after + /// must be explicitly added via [`Behaviour::add_address`], possibly after /// removing another peer. /// - /// See [`Kademlia::kbucket`] for insight into the contents of + /// See [`Behaviour::kbucket`] for insight into the contents of /// the k-bucket of `peer`. RoutablePeer { peer: PeerId, address: Multiaddr }, @@ -2552,12 +2664,18 @@ pub enum KademliaEvent { /// may not make it into the routing table. /// /// If the peer is to be unconditionally included in the routing table, - /// it should be explicitly added via [`Kademlia::add_address`] after + /// it should be explicitly added via [`Behaviour::add_address`] after /// removing another peer. /// - /// See [`Kademlia::kbucket`] for insight into the contents of + /// See [`Behaviour::kbucket`] for insight into the contents of /// the k-bucket of `peer`. PendingRoutablePeer { peer: PeerId, address: Multiaddr }, + + /// This peer's mode has been updated automatically. + /// + /// This happens in response to an external + /// address being added or removed. + ModeChanged { new_mode: Mode }, } /// Information about progress events. @@ -2602,21 +2720,21 @@ pub enum InboundRequest { num_closer_peers: usize, num_provider_peers: usize, }, - /// A peer sent a [`KademliaHandlerIn::AddProvider`] request. - /// If filtering [`KademliaStoreInserts::FilterBoth`] is enabled, the [`ProviderRecord`] is + /// A peer sent an add provider request. + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`ProviderRecord`] is /// included. /// - /// See [`KademliaStoreInserts`] and [`KademliaConfig::set_record_filtering`] for details.. + /// See [`StoreInserts`] and [`Config::set_record_filtering`] for details.. AddProvider { record: Option }, /// Request to retrieve a record. GetRecord { num_closer_peers: usize, present_locally: bool, }, - /// A peer sent a [`KademliaHandlerIn::PutRecord`] request. - /// If filtering [`KademliaStoreInserts::FilterBoth`] is enabled, the [`Record`] is included. + /// A peer sent a put record request. + /// If filtering [`StoreInserts::FilterBoth`] is enabled, the [`Record`] is included. /// - /// See [`KademliaStoreInserts`] and [`KademliaConfig::set_record_filtering`]. + /// See [`StoreInserts`] and [`Config::set_record_filtering`]. PutRecord { source: PeerId, connection: ConnectionId, @@ -2627,35 +2745,35 @@ pub enum InboundRequest { /// The results of Kademlia queries. #[derive(Debug, Clone)] pub enum QueryResult { - /// The result of [`Kademlia::bootstrap`]. + /// The result of [`Behaviour::bootstrap`]. Bootstrap(BootstrapResult), - /// The result of [`Kademlia::get_closest_peers`]. + /// The result of [`Behaviour::get_closest_peers`]. GetClosestPeers(GetClosestPeersResult), - /// The result of [`Kademlia::get_providers`]. + /// The result of [`Behaviour::get_providers`]. GetProviders(GetProvidersResult), - /// The result of [`Kademlia::start_providing`]. + /// The result of [`Behaviour::start_providing`]. StartProviding(AddProviderResult), /// The result of a (automatic) republishing of a provider record. RepublishProvider(AddProviderResult), - /// The result of [`Kademlia::get_record`]. + /// The result of [`Behaviour::get_record`]. GetRecord(GetRecordResult), - /// The result of [`Kademlia::put_record`]. + /// The result of [`Behaviour::put_record`]. PutRecord(PutRecordResult), /// The result of a (automatic) republishing of a (value-)record. RepublishRecord(PutRecordResult), } -/// The result of [`Kademlia::get_record`]. +/// The result of [`Behaviour::get_record`]. pub type GetRecordResult = Result; -/// The successful result of [`Kademlia::get_record`]. +/// The successful result of [`Behaviour::get_record`]. #[derive(Debug, Clone)] pub enum GetRecordOk { FoundRecord(PeerRecord), @@ -2664,36 +2782,36 @@ pub enum GetRecordOk { /// _to the record key_ (not the local node) that were queried but /// did not return the record, sorted by distance to the record key /// from closest to farthest. How many of these are tracked is configured - /// by [`KademliaConfig::set_caching`]. If the lookup used a quorum of - /// 1, these peers will be sent the record as a means of caching. - /// If the lookup used a quorum > 1, you may wish to use these - /// candidates with [`Kademlia::put_record_to`] after selecting - /// one of the returned records. - cache_candidates: BTreeMap, + /// by [`Config::set_caching`]. + /// + /// Writing back the cache at these peers is a manual operation. + /// ie. you may wish to use these candidates with [`Behaviour::put_record_to`] + /// after selecting one of the returned records. + cache_candidates: BTreeMap, }, } -/// The error result of [`Kademlia::get_record`]. +/// The error result of [`Behaviour::get_record`]. #[derive(Debug, Clone, Error)] pub enum GetRecordError { #[error("the record was not found")] NotFound { - key: record_priv::Key, + key: record::Key, closest_peers: Vec, }, #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record_priv::Key, + key: record::Key, records: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] - Timeout { key: record_priv::Key }, + Timeout { key: record::Key }, } impl GetRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2703,7 +2821,7 @@ impl GetRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { GetRecordError::QuorumFailed { key, .. } => key, GetRecordError::Timeout { key, .. } => key, @@ -2712,28 +2830,28 @@ impl GetRecordError { } } -/// The result of [`Kademlia::put_record`]. +/// The result of [`Behaviour::put_record`]. pub type PutRecordResult = Result; -/// The successful result of [`Kademlia::put_record`]. +/// The successful result of [`Behaviour::put_record`]. #[derive(Debug, Clone)] pub struct PutRecordOk { - pub key: record_priv::Key, + pub key: record::Key, } -/// The error result of [`Kademlia::put_record`]. +/// The error result of [`Behaviour::put_record`]. #[derive(Debug, Clone, Error)] pub enum PutRecordError { #[error("the quorum failed; needed {quorum} peers")] QuorumFailed { - key: record_priv::Key, + key: record::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, }, #[error("the request timed out")] Timeout { - key: record_priv::Key, + key: record::Key, /// [`PeerId`]s of the peers the record was successfully stored on. success: Vec, quorum: NonZeroUsize, @@ -2742,7 +2860,7 @@ pub enum PutRecordError { impl PutRecordError { /// Gets the key of the record for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2751,7 +2869,7 @@ impl PutRecordError { /// Extracts the key of the record for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { PutRecordError::QuorumFailed { key, .. } => key, PutRecordError::Timeout { key, .. } => key, @@ -2759,17 +2877,17 @@ impl PutRecordError { } } -/// The result of [`Kademlia::bootstrap`]. +/// The result of [`Behaviour::bootstrap`]. pub type BootstrapResult = Result; -/// The successful result of [`Kademlia::bootstrap`]. +/// The successful result of [`Behaviour::bootstrap`]. #[derive(Debug, Clone)] pub struct BootstrapOk { pub peer: PeerId, pub num_remaining: u32, } -/// The error result of [`Kademlia::bootstrap`]. +/// The error result of [`Behaviour::bootstrap`]. #[derive(Debug, Clone, Error)] pub enum BootstrapError { #[error("the request timed out")] @@ -2779,17 +2897,17 @@ pub enum BootstrapError { }, } -/// The result of [`Kademlia::get_closest_peers`]. +/// The result of [`Behaviour::get_closest_peers`]. pub type GetClosestPeersResult = Result; -/// The successful result of [`Kademlia::get_closest_peers`]. +/// The successful result of [`Behaviour::get_closest_peers`]. #[derive(Debug, Clone)] pub struct GetClosestPeersOk { pub key: Vec, pub peers: Vec, } -/// The error result of [`Kademlia::get_closest_peers`]. +/// The error result of [`Behaviour::get_closest_peers`]. #[derive(Debug, Clone, Error)] pub enum GetClosestPeersError { #[error("the request timed out")] @@ -2813,14 +2931,14 @@ impl GetClosestPeersError { } } -/// The result of [`Kademlia::get_providers`]. +/// The result of [`Behaviour::get_providers`]. pub type GetProvidersResult = Result; -/// The successful result of [`Kademlia::get_providers`]. +/// The successful result of [`Behaviour::get_providers`]. #[derive(Debug, Clone)] pub enum GetProvidersOk { FoundProviders { - key: record_priv::Key, + key: record::Key, /// The new set of providers discovered. providers: HashSet, }, @@ -2829,19 +2947,19 @@ pub enum GetProvidersOk { }, } -/// The error result of [`Kademlia::get_providers`]. +/// The error result of [`Behaviour::get_providers`]. #[derive(Debug, Clone, Error)] pub enum GetProvidersError { #[error("the request timed out")] Timeout { - key: record_priv::Key, + key: record::Key, closest_peers: Vec, }, } impl GetProvidersError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2849,7 +2967,7 @@ impl GetProvidersError { /// Extracts the key for which the operation failed, /// consuming the error. - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { GetProvidersError::Timeout { key, .. } => key, } @@ -2862,40 +2980,40 @@ pub type AddProviderResult = Result; /// The successful result of publishing a provider record. #[derive(Debug, Clone)] pub struct AddProviderOk { - pub key: record_priv::Key, + pub key: record::Key, } /// The possible errors when publishing a provider record. #[derive(Debug, Clone, Error)] pub enum AddProviderError { #[error("the request timed out")] - Timeout { key: record_priv::Key }, + Timeout { key: record::Key }, } impl AddProviderError { /// Gets the key for which the operation failed. - pub fn key(&self) -> &record_priv::Key { + pub fn key(&self) -> &record::Key { match self { AddProviderError::Timeout { key, .. } => key, } } /// Extracts the key for which the operation failed, - pub fn into_key(self) -> record_priv::Key { + pub fn into_key(self) -> record::Key { match self { AddProviderError::Timeout { key, .. } => key, } } } -impl From, Addresses>> for KadPeer { - fn from(e: kbucket_priv::EntryView, Addresses>) -> KadPeer { +impl From, Addresses>> for KadPeer { + fn from(e: kbucket::EntryView, Addresses>) -> KadPeer { KadPeer { node_id: e.node.key.into_preimage(), multiaddrs: e.node.value.into_vec(), connection_ty: match e.status { - NodeStatus::Connected => KadConnectionType::Connected, - NodeStatus::Disconnected => KadConnectionType::NotConnected, + NodeStatus::Connected => ConnectionType::Connected, + NodeStatus::Disconnected => ConnectionType::NotConnected, }, } } @@ -2913,7 +3031,7 @@ struct QueryInner { /// /// A request is pending if the targeted peer is not currently connected /// and these requests are sent as soon as a connection to the peer is established. - pending_rpcs: SmallVec<[(PeerId, KademliaHandlerIn); K_VALUE.get()]>, + pending_rpcs: SmallVec<[(PeerId, HandlerIn); K_VALUE.get()]>, } impl QueryInner { @@ -2929,33 +3047,33 @@ impl QueryInner { /// The context of a [`QueryInfo::AddProvider`] query. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum AddProviderContext { - /// The context is a [`Kademlia::start_providing`] operation. + /// The context is a [`Behaviour::start_providing`] operation. Publish, /// The context is periodic republishing of provider announcements - /// initiated earlier via [`Kademlia::start_providing`]. + /// initiated earlier via [`Behaviour::start_providing`]. Republish, } /// The context of a [`QueryInfo::PutRecord`] query. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum PutRecordContext { - /// The context is a [`Kademlia::put_record`] operation. + /// The context is a [`Behaviour::put_record`] operation. Publish, /// The context is periodic republishing of records stored - /// earlier via [`Kademlia::put_record`]. + /// earlier via [`Behaviour::put_record`]. Republish, /// The context is periodic replication (i.e. without extending /// the record TTL) of stored records received earlier from another peer. Replicate, /// The context is a custom store operation targeting specific - /// peers initiated by [`Kademlia::put_record_to`]. + /// peers initiated by [`Behaviour::put_record_to`]. Custom, } /// Information about a running query. #[derive(Debug, Clone)] pub enum QueryInfo { - /// A query initiated by [`Kademlia::bootstrap`]. + /// A query initiated by [`Behaviour::bootstrap`]. Bootstrap { /// The targeted peer ID. peer: PeerId, @@ -2965,11 +3083,11 @@ pub enum QueryInfo { /// This is `None` if the initial self-lookup has not /// yet completed and `Some` with an exhausted iterator /// if bootstrapping is complete. - remaining: Option>>, + remaining: Option>>, step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::get_closest_peers`]. + /// A (repeated) query initiated by [`Behaviour::get_closest_peers`]. GetClosestPeers { /// The key being queried (the preimage). key: Vec, @@ -2977,27 +3095,27 @@ pub enum QueryInfo { step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::get_providers`]. + /// A (repeated) query initiated by [`Behaviour::get_providers`]. GetProviders { /// The key for which to search for providers. - key: record_priv::Key, + key: record::Key, /// The number of providers found so far. providers_found: usize, /// Current index of events. step: ProgressStep, }, - /// A (repeated) query initiated by [`Kademlia::start_providing`]. + /// A (repeated) query initiated by [`Behaviour::start_providing`]. AddProvider { /// The record key. - key: record_priv::Key, + key: record::Key, /// The current phase of the query. phase: AddProviderPhase, /// The execution context of the query. context: AddProviderContext, }, - /// A (repeated) query initiated by [`Kademlia::put_record`]. + /// A (repeated) query initiated by [`Behaviour::put_record`]. PutRecord { record: Record, /// The expected quorum of responses w.r.t. the replication factor. @@ -3008,67 +3126,68 @@ pub enum QueryInfo { context: PutRecordContext, }, - /// A (repeated) query initiated by [`Kademlia::get_record`]. + /// A (repeated) query initiated by [`Behaviour::get_record`]. GetRecord { /// The key to look for. - key: record_priv::Key, + key: record::Key, /// Current index of events. step: ProgressStep, /// Did we find at least one record? found_a_record: bool, /// The peers closest to the `key` that were queried but did not return a record, /// i.e. the peers that are candidates for caching the record. - cache_candidates: BTreeMap, + cache_candidates: BTreeMap, }, } impl QueryInfo { /// Creates an event for a handler to issue an outgoing request in the /// context of a query. - fn to_request(&self, query_id: QueryId) -> KademliaHandlerIn { + fn to_request(&self, query_id: QueryId) -> HandlerIn { match &self { - QueryInfo::Bootstrap { peer, .. } => KademliaHandlerIn::FindNodeReq { + QueryInfo::Bootstrap { peer, .. } => HandlerIn::FindNodeReq { key: peer.to_bytes(), - user_data: query_id, + query_id, }, - QueryInfo::GetClosestPeers { key, .. } => KademliaHandlerIn::FindNodeReq { + QueryInfo::GetClosestPeers { key, .. } => HandlerIn::FindNodeReq { key: key.clone(), - user_data: query_id, + query_id, }, - QueryInfo::GetProviders { key, .. } => KademliaHandlerIn::GetProvidersReq { + QueryInfo::GetProviders { key, .. } => HandlerIn::GetProvidersReq { key: key.clone(), - user_data: query_id, + query_id, }, QueryInfo::AddProvider { key, phase, .. } => match phase { - AddProviderPhase::GetClosestPeers => KademliaHandlerIn::FindNodeReq { + AddProviderPhase::GetClosestPeers => HandlerIn::FindNodeReq { key: key.to_vec(), - user_data: query_id, + query_id, }, AddProviderPhase::AddProvider { provider_id, external_addresses, .. - } => KademliaHandlerIn::AddProvider { + } => HandlerIn::AddProvider { key: key.clone(), provider: crate::protocol::KadPeer { node_id: *provider_id, multiaddrs: external_addresses.clone(), - connection_ty: crate::protocol::KadConnectionType::Connected, + connection_ty: crate::protocol::ConnectionType::Connected, }, + query_id, }, }, - QueryInfo::GetRecord { key, .. } => KademliaHandlerIn::GetRecord { + QueryInfo::GetRecord { key, .. } => HandlerIn::GetRecord { key: key.clone(), - user_data: query_id, + query_id, }, QueryInfo::PutRecord { record, phase, .. } => match phase { - PutRecordPhase::GetClosestPeers => KademliaHandlerIn::FindNodeReq { + PutRecordPhase::GetClosestPeers => HandlerIn::FindNodeReq { key: record.key.to_vec(), - user_data: query_id, + query_id, }, - PutRecordPhase::PutRecord { .. } => KademliaHandlerIn::PutRecord { + PutRecordPhase::PutRecord { .. } => HandlerIn::PutRecord { record: record.clone(), - user_data: query_id, + query_id, }, }, } @@ -3174,7 +3293,8 @@ impl fmt::Display for NoKnownPeers { impl std::error::Error for NoKnownPeers {} -/// The possible outcomes of [`Kademlia::add_address`]. +/// The possible outcomes of [`Behaviour::add_address`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RoutingUpdate { /// The given peer and address has been added to the routing /// table. @@ -3182,7 +3302,7 @@ pub enum RoutingUpdate { /// The peer and address is pending insertion into /// the routing table, if a disconnected peer fails /// to respond. If the given peer and address ends up - /// in the routing table, [`KademliaEvent::RoutingUpdated`] + /// in the routing table, [`Event::RoutingUpdated`] /// is eventually emitted. Pending, /// The routing table update failed, either because the @@ -3192,3 +3312,29 @@ pub enum RoutingUpdate { /// peer ID). Failed, } + +#[derive(PartialEq, Copy, Clone, Debug)] +pub enum Mode { + Client, + Server, +} + +impl fmt::Display for Mode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Mode::Client => write!(f, "client"), + Mode::Server => write!(f, "server"), + } + } +} + +fn to_comma_separated_list(confirmed_external_addresses: &[T]) -> String +where + T: ToString, +{ + confirmed_external_addresses + .iter() + .map(|addr| addr.to_string()) + .collect::>() + .join(", ") +} diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index 3f03842aff5..522eebcba92 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -22,8 +22,8 @@ use super::*; -use crate::kbucket_priv::Distance; -use crate::record_priv::{store::MemoryStore, Key}; +use crate::kbucket::Distance; +use crate::record::{store::MemoryStore, Key}; use crate::{K_VALUE, SHA_256_MH}; use futures::{executor::block_on, future::poll_fn, prelude::*}; use futures_timer::Delay; @@ -37,7 +37,8 @@ use libp2p_core::{ use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_noise as noise; -use libp2p_swarm::{ConnectionId, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::behaviour::ConnectionEstablished; +use libp2p_swarm::{self as swarm, ConnectionId, Swarm, SwarmEvent}; use libp2p_yamux as yamux; use quickcheck::*; use rand::{random, rngs::StdRng, thread_rng, Rng, SeedableRng}; @@ -48,13 +49,13 @@ use std::{ u64, }; -type TestSwarm = Swarm>; +type TestSwarm = Swarm>; fn build_node() -> (Multiaddr, TestSwarm) { build_node_with_config(Default::default()) } -fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { +fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) { let local_key = identity::Keypair::generate_ed25519(); let local_public_key = local_key.public(); let transport = MemoryTransport::default() @@ -65,12 +66,19 @@ fn build_node_with_config(cfg: KademliaConfig) -> (Multiaddr, TestSwarm) { let local_id = local_public_key.to_peer_id(); let store = MemoryStore::new(local_id); - let behaviour = Kademlia::with_config(local_id, store, cfg); - - let mut swarm = SwarmBuilder::without_executor(transport, behaviour, local_id).build(); + let behaviour = Behaviour::with_config(local_id, store, cfg); + + let mut swarm = Swarm::new( + transport, + behaviour, + local_id, + swarm::Config::with_async_std_executor() + .with_idle_connection_timeout(Duration::from_secs(5)), + ); let address: Multiaddr = Protocol::Memory(random::()).into(); swarm.listen_on(address.clone()).unwrap(); + swarm.add_external_address(address.clone()); (address, swarm) } @@ -81,7 +89,7 @@ fn build_nodes(num: usize) -> Vec<(Multiaddr, TestSwarm)> { } /// Builds swarms, each listening on a port. Does *not* connect the nodes together. -fn build_nodes_with_config(num: usize, cfg: KademliaConfig) -> Vec<(Multiaddr, TestSwarm)> { +fn build_nodes_with_config(num: usize, cfg: Config) -> Vec<(Multiaddr, TestSwarm)> { (0..num) .map(|_| build_node_with_config(cfg.clone())) .collect() @@ -94,7 +102,7 @@ fn build_connected_nodes(total: usize, step: usize) -> Vec<(Multiaddr, TestSwarm fn build_connected_nodes_with_config( total: usize, step: usize, - cfg: KademliaConfig, + cfg: Config, ) -> Vec<(Multiaddr, TestSwarm)> { let mut swarms = build_nodes_with_config(total, cfg); let swarm_ids: Vec<_> = swarms @@ -120,7 +128,7 @@ fn build_connected_nodes_with_config( fn build_fully_connected_nodes_with_config( total: usize, - cfg: KademliaConfig, + cfg: Config, ) -> Vec<(Multiaddr, TestSwarm)> { let mut swarms = build_nodes_with_config(total, cfg); let swarm_addr_and_peer_id: Vec<_> = swarms @@ -137,7 +145,7 @@ fn build_fully_connected_nodes_with_config( swarms } -fn random_multihash() -> Multihash { +fn random_multihash() -> Multihash<64> { Multihash::wrap(SHA_256_MH, &thread_rng().gen::<[u8; 32]>()).unwrap() } @@ -165,7 +173,7 @@ fn bootstrap() { // or smaller than K_VALUE. let num_group = rng.gen_range(1..(num_total % K_VALUE.get()) + 2); - let mut cfg = KademliaConfig::default(); + let mut cfg = Config::default(); if rng.gen() { cfg.disjoint_query_paths(true); } @@ -189,7 +197,7 @@ fn bootstrap() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::Bootstrap(Ok(ok)), .. @@ -234,10 +242,10 @@ fn bootstrap() { #[test] fn query_iter() { - fn distances(key: &kbucket_priv::Key, peers: Vec) -> Vec { + fn distances(key: &kbucket::Key, peers: Vec) -> Vec { peers .into_iter() - .map(kbucket_priv::Key::from) + .map(kbucket::Key::from) .map(|k| k.distance(key)) .collect() } @@ -253,7 +261,7 @@ fn query_iter() { // Ask the first peer in the list to search a random peer. The search should // propagate forwards through the list of peers. let search_target = PeerId::random(); - let search_target_key = kbucket_priv::Key::from(search_target); + let search_target_key = kbucket::Key::from(search_target); let qid = swarms[0].behaviour_mut().get_closest_peers(search_target); match swarms[0].behaviour_mut().query(&qid) { @@ -279,7 +287,7 @@ fn query_iter() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ok)), .. @@ -290,7 +298,7 @@ fn query_iter() { assert_eq!(swarm_ids[i], expected_swarm_id); assert_eq!(swarm.behaviour_mut().queries.size(), 0); assert!(expected_peer_ids.iter().all(|p| ok.peers.contains(p))); - let key = kbucket_priv::Key::new(ok.key); + let key = kbucket::Key::new(ok.key); assert_eq!(expected_distances, distances(&key, ok.peers)); return Poll::Ready(()); } @@ -313,7 +321,9 @@ fn query_iter() { #[test] fn unresponsive_not_returned_direct() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a // random peer. We make sure that no fake address is returned. @@ -337,12 +347,10 @@ fn unresponsive_not_returned_direct() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(Ok(ok)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetClosestPeers(Ok(ok)), + .. + }))) => { assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 0); return Poll::Ready(()); @@ -397,12 +405,10 @@ fn unresponsive_not_returned_indirect() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetClosestPeers(Ok(ok)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetClosestPeers(Ok(ok)), + .. + }))) => { assert_eq!(&ok.key[..], search_target.to_bytes().as_slice()); assert_eq!(ok.peers.len(), 1); assert_eq!(ok.peers[0], first_peer_id); @@ -445,20 +451,18 @@ fn get_record_not_found() { .map(|(_addr, swarm)| swarm) .collect::>(); - let target_key = record_priv::Key::from(random_multihash()); + let target_key = record::Key::from(random_multihash()); let qid = swarms[0].behaviour_mut().get_record(target_key.clone()); block_on(poll_fn(move |ctx| { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Err(e)), - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Err(e)), + .. + }))) => { assert_eq!(id, qid); if let GetRecordError::NotFound { key, closest_peers } = e { assert_eq!(key, target_key); @@ -494,14 +498,14 @@ fn put_record() { // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); } if filter_records { - config.set_record_filtering(KademliaStoreInserts::FilterBoth); + config.set_record_filtering(StoreInserts::FilterBoth); } let mut swarms = { @@ -573,7 +577,7 @@ fn put_record() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::PutRecord(res), stats, @@ -581,7 +585,7 @@ fn put_record() { }, ))) | Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::RepublishRecord(res), stats, @@ -604,16 +608,14 @@ fn put_record() { } } } - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::InboundRequest { - request: InboundRequest::PutRecord { record, .. }, - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::InboundRequest { + request: InboundRequest::PutRecord { record, .. }, + }))) => { if !drop_records { if let Some(record) = record { assert_eq!( swarm.behaviour().record_filtering, - KademliaStoreInserts::FilterBoth + StoreInserts::FilterBoth ); // Accept the record swarm @@ -624,7 +626,7 @@ fn put_record() { } else { assert_eq!( swarm.behaviour().record_filtering, - KademliaStoreInserts::Unfiltered + StoreInserts::Unfiltered ); } } @@ -653,7 +655,7 @@ fn put_record() { assert_eq!(r.expires, expected.expires); assert_eq!(r.publisher, Some(*swarms[0].local_peer_id())); - let key = kbucket_priv::Key::new(r.key.clone()); + let key = kbucket::Key::new(r.key.clone()); let mut expected = swarms .iter() .skip(1) @@ -661,9 +663,9 @@ fn put_record() { .cloned() .collect::>(); expected.sort_by(|id1, id2| { - kbucket_priv::Key::from(*id1) + kbucket::Key::from(*id1) .distance(&key) - .cmp(&kbucket_priv::Key::from(*id2).distance(&key)) + .cmp(&kbucket::Key::from(*id2).distance(&key)) }); let expected = expected @@ -683,7 +685,7 @@ fn put_record() { }) .collect::>(); - if swarms[0].behaviour().record_filtering != KademliaStoreInserts::Unfiltered + if swarms[0].behaviour().record_filtering != StoreInserts::Unfiltered && drop_records { assert_eq!(actual.len(), 0); @@ -764,14 +766,12 @@ fn get_record() { for swarm in &mut swarms { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Ok(r)), - step: ProgressStep { count, last }, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Ok(r)), + step: ProgressStep { count, last }, + .. + }))) => { assert_eq!(id, qid); if usize::from(count) == 1 { assert!(!last); @@ -828,14 +828,12 @@ fn get_record_many() { swarm.behaviour_mut().query_mut(&qid).unwrap().finish(); } match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - id, - result: QueryResult::GetRecord(Ok(r)), - step: ProgressStep { count: _, last }, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + id, + result: QueryResult::GetRecord(Ok(r)), + step: ProgressStep { count: _, last }, + .. + }))) => { assert_eq!(id, qid); if let GetRecordOk::FoundRecord(r) = r { assert_eq!(r.record, record); @@ -862,14 +860,14 @@ fn get_record_many() { /// network where X is equal to the configured replication factor. #[test] fn add_provider() { - fn prop(keys: Vec, seed: Seed) { + fn prop(keys: Vec, seed: Seed) { let mut rng = StdRng::from_seed(seed.0); let replication_factor = NonZeroUsize::new(rng.gen_range(1..(K_VALUE.get() / 2) + 1)).unwrap(); // At least 4 nodes, 1 under test + 3 bootnodes. let num_total = usize::max(4, replication_factor.get() * 2); - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.set_replication_factor(replication_factor); if rng.gen() { config.disjoint_query_paths(true); @@ -923,14 +921,14 @@ fn add_provider() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::StartProviding(res), .. }, ))) | Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::RepublishProvider(res), .. @@ -992,11 +990,11 @@ fn add_provider() { .map(Swarm::local_peer_id) .cloned() .collect::>(); - let kbucket_key = kbucket_priv::Key::new(key); + let kbucket_key = kbucket::Key::new(key); expected.sort_by(|id1, id2| { - kbucket_priv::Key::from(*id1) + kbucket::Key::from(*id1) .distance(&kbucket_key) - .cmp(&kbucket_priv::Key::from(*id2).distance(&kbucket_key)) + .cmp(&kbucket::Key::from(*id2).distance(&kbucket_key)) }); let expected = expected @@ -1061,10 +1059,11 @@ fn exceed_jobs_max_queries() { loop { if let Poll::Ready(Some(e)) = swarm.poll_next_unpin(ctx) { match e { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(Ok(r)), .. }) => break assert!(r.peers.is_empty()), + SwarmEvent::Behaviour(Event::ModeChanged { .. }) => {} SwarmEvent::Behaviour(e) => panic!("Unexpected event: {e:?}"), _ => {} } @@ -1084,14 +1083,14 @@ fn exp_decr_expiration_overflow() { } // Right shifting a u64 by >63 results in a panic. - prop_no_panic(KademliaConfig::default().record_ttl.unwrap(), 64); + prop_no_panic(Config::default().record_ttl.unwrap(), 64); quickcheck(prop_no_panic as fn(_, _)) } #[test] fn disjoint_query_does_not_finish_before_all_paths_did() { - let mut config = KademliaConfig::default(); + let mut config = Config::default(); config.disjoint_query_paths(true); // I.e. setting the amount disjoint paths to be explored to 2. config.set_parallelism(NonZeroUsize::new(2).unwrap()); @@ -1101,7 +1100,7 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { let mut bob = build_node(); let key = Key::from( - Multihash::wrap(SHA_256_MH, &thread_rng().gen::<[u8; 32]>()) + Multihash::<64>::wrap(SHA_256_MH, &thread_rng().gen::<[u8; 32]>()) .expect("32 array to fit into 64 byte multihash"), ); let record_bob = Record::new(key.clone(), b"bob".to_vec()); @@ -1139,13 +1138,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { for (i, swarm) in [&mut alice, &mut trudy].iter_mut().enumerate() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetRecord(result), - step, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetRecord(result), + step, + .. + }))) => { if i != 0 { panic!("Expected `QueryResult` from Alice.") } @@ -1196,13 +1193,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { for (i, swarm) in [&mut alice, &mut bob].iter_mut().enumerate() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { - result: QueryResult::GetRecord(result), - step, - .. - }, - ))) => { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::OutboundQueryProgressed { + result: QueryResult::GetRecord(result), + step, + .. + }))) => { if i != 0 { panic!("Expected `QueryResult` from Alice.") } @@ -1240,11 +1235,11 @@ fn disjoint_query_does_not_finish_before_all_paths_did() { } /// Tests that peers are not automatically inserted into -/// the routing table with `KademliaBucketInserts::Manual`. +/// the routing table with `BucketInserts::Manual`. #[test] fn manual_bucket_inserts() { - let mut cfg = KademliaConfig::default(); - cfg.set_kbucket_inserts(KademliaBucketInserts::Manual); + let mut cfg = Config::default(); + cfg.set_kbucket_inserts(BucketInserts::Manual); // 1 -> 2 -> [3 -> ...] let mut swarms = build_connected_nodes_with_config(3, 1, cfg); // The peers and their addresses for which we expect `RoutablePeer` events. @@ -1253,7 +1248,7 @@ fn manual_bucket_inserts() { .skip(2) .map(|(a, s)| { let pid = *Swarm::local_peer_id(s); - let addr = a.clone().with(Protocol::P2p(pid.into())); + let addr = a.clone().with(Protocol::P2p(pid)); (addr, pid) }) .collect::>(); @@ -1270,7 +1265,7 @@ fn manual_bucket_inserts() { for (_, swarm) in swarms.iter_mut() { loop { match swarm.poll_next_unpin(ctx) { - Poll::Ready(Some(SwarmEvent::Behaviour(KademliaEvent::RoutablePeer { + Poll::Ready(Some(SwarmEvent::Behaviour(Event::RoutablePeer { peer, address, }))) => { @@ -1302,7 +1297,7 @@ fn network_behaviour_on_address_change() { let old_address: Multiaddr = Protocol::Memory(1).into(); let new_address: Multiaddr = Protocol::Memory(2).into(); - let mut kademlia = Kademlia::new(local_peer_id, MemoryStore::new(local_peer_id)); + let mut kademlia = Behaviour::new(local_peer_id, MemoryStore::new(local_peer_id)); let endpoint = ConnectedPoint::Dialer { address: old_address.clone(), @@ -1336,7 +1331,7 @@ fn network_behaviour_on_address_change() { kademlia.on_connection_handler_event( remote_peer_id, connection_id, - KademliaHandlerEvent::ProtocolConfirmed { endpoint }, + HandlerEvent::ProtocolConfirmed { endpoint }, ); assert_eq!( @@ -1379,7 +1374,7 @@ fn network_behaviour_on_address_change() { #[test] fn get_providers_single() { - fn prop(key: record_priv::Key) { + fn prop(key: record::Key) { let (_, mut single_swarm) = build_node(); single_swarm .behaviour_mut() @@ -1388,10 +1383,11 @@ fn get_providers_single() { block_on(async { match single_swarm.next().await.unwrap() { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { result: QueryResult::StartProviding(Ok(_)), .. }) => {} + SwarmEvent::Behaviour(Event::ModeChanged { .. }) => {} SwarmEvent::Behaviour(e) => panic!("Unexpected event: {e:?}"), _ => {} } @@ -1402,7 +1398,7 @@ fn get_providers_single() { block_on(async { loop { match single_swarm.next().await.unwrap() { - SwarmEvent::Behaviour(KademliaEvent::OutboundQueryProgressed { + SwarmEvent::Behaviour(Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(ok)), step: index, @@ -1432,7 +1428,7 @@ fn get_providers_single() { } fn get_providers_limit() { - fn prop(key: record_priv::Key) { + fn prop(key: record::Key) { let mut swarms = build_nodes(3); // Let first peer know of second peer and second peer know of third peer. @@ -1468,7 +1464,7 @@ fn get_providers_limit() { loop { match swarm.poll_next_unpin(ctx) { Poll::Ready(Some(SwarmEvent::Behaviour( - KademliaEvent::OutboundQueryProgressed { + Event::OutboundQueryProgressed { id, result: QueryResult::GetProviders(Ok(ok)), step: index, diff --git a/protocols/kad/src/handler_priv.rs b/protocols/kad/src/handler.rs similarity index 53% rename from protocols/kad/src/handler_priv.rs rename to protocols/kad/src/handler.rs index b99eb2956ea..5e7c2e21b8b 100644 --- a/protocols/kad/src/handler_priv.rs +++ b/protocols/kad/src/handler.rs @@ -18,32 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::behaviour::Mode; use crate::protocol::{ - KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, - KademliaProtocolConfig, + KadInStreamSink, KadOutStreamSink, KadPeer, KadRequestMsg, KadResponseMsg, ProtocolConfig, }; -use crate::record_priv::{self, Record}; +use crate::record::{self, Record}; +use crate::QueryId; use either::Either; +use futures::channel::oneshot; use futures::prelude::*; use futures::stream::SelectAll; -use instant::Instant; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - NegotiatedSubstream, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, + SupportedProtocols, }; -use log::trace; use std::collections::VecDeque; use std::task::Waker; -use std::{ - error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll, time::Duration, -}; +use std::time::Duration; +use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; -const MAX_NUM_SUBSTREAMS: usize = 32; +const MAX_NUM_STREAMS: usize = 32; /// Protocol handler that manages substreams for the Kademlia protocol /// on a single connection with a peer. @@ -52,28 +49,30 @@ const MAX_NUM_SUBSTREAMS: usize = 32; /// make. /// /// It also handles requests made by the remote. -pub struct KademliaHandler { - /// Configuration for the Kademlia protocol. - config: KademliaHandlerConfig, +pub struct Handler { + /// Configuration of the wire protocol. + protocol_config: ProtocolConfig, + + /// In client mode, we don't accept inbound substreams. + mode: Mode, /// Next unique ID of a connection. next_connec_unique_id: UniqueConnecId, - /// List of active outbound substreams with the state they are in. - outbound_substreams: SelectAll>, + /// List of active outbound streams. + outbound_substreams: + futures_bounded::FuturesTupleSet>, QueryId>, - /// Number of outbound streams being upgraded right now. - num_requested_outbound_streams: usize, + /// Contains one [`oneshot::Sender`] per outbound stream that we have requested. + pending_streams: + VecDeque, StreamUpgradeError>>>, /// List of outbound substreams that are waiting to become active next. /// Contains the request we want to send, and the user data if we expect an answer. - pending_messages: VecDeque<(KadRequestMsg, Option)>, + pending_messages: VecDeque<(KadRequestMsg, QueryId)>, /// List of active inbound substreams with the state they are in. - inbound_substreams: SelectAll>, - - /// Until when to keep the connection alive. - keep_alive: KeepAlive, + inbound_substreams: SelectAll, /// The connected endpoint of the connection that the handler /// is associated with. @@ -83,95 +82,50 @@ pub struct KademliaHandler { remote_peer_id: PeerId, /// The current state of protocol confirmation. - protocol_status: ProtocolStatus, + protocol_status: Option, + + remote_supported_protocols: SupportedProtocols, } /// The states of protocol confirmation that a connection /// handler transitions through. -enum ProtocolStatus { - /// It is as yet unknown whether the remote supports the - /// configured protocol name. - Unconfirmed, - /// The configured protocol name has been confirmed by the remote - /// but has not yet been reported to the `Kademlia` behaviour. - Confirmed, - /// The configured protocol has been confirmed by the remote - /// and the confirmation reported to the `Kademlia` behaviour. - Reported, -} - -/// Configuration of a [`KademliaHandler`]. -#[derive(Debug, Clone)] -pub struct KademliaHandlerConfig { - /// Configuration of the wire protocol. - pub protocol_config: KademliaProtocolConfig, - - /// If false, we deny incoming requests. - pub allow_listening: bool, - - /// Time after which we close an idle connection. - pub idle_timeout: Duration, -} - -/// State of an active outbound substream. -enum OutboundSubstreamState { - /// Waiting to send a message to the remote. - PendingSend( - KadOutStreamSink, - KadRequestMsg, - Option, - ), - /// Waiting to flush the substream so that the data arrives to the remote. - PendingFlush(KadOutStreamSink, Option), - /// Waiting for an answer back from the remote. - // TODO: add timeout - WaitingAnswer(KadOutStreamSink, TUserData), - /// An error happened on the substream and we should report the error to the user. - ReportError(KademliaHandlerQueryErr, TUserData), - /// The substream is being closed. - Closing(KadOutStreamSink), - /// The substream is complete and will not perform any more work. - Done, - Poisoned, +#[derive(Debug, Copy, Clone, PartialEq)] +struct ProtocolStatus { + /// Whether the remote node supports one of our kademlia protocols. + supported: bool, + /// Whether we reported the state to the behaviour. + reported: bool, } /// State of an active inbound substream. -enum InboundSubstreamState { +enum InboundSubstreamState { /// Waiting for a request from the remote. WaitingMessage { /// Whether it is the first message to be awaited on this stream. first: bool, connection_id: UniqueConnecId, - substream: KadInStreamSink, + substream: KadInStreamSink, }, - /// Waiting for the behaviour to send a [`KademliaHandlerIn`] event containing the response. - WaitingBehaviour( - UniqueConnecId, - KadInStreamSink, - Option, - ), + /// Waiting for the behaviour to send a [`HandlerIn`] event containing the response. + WaitingBehaviour(UniqueConnecId, KadInStreamSink, Option), /// Waiting to send an answer back to the remote. - PendingSend( - UniqueConnecId, - KadInStreamSink, - KadResponseMsg, - ), + PendingSend(UniqueConnecId, KadInStreamSink, KadResponseMsg), /// Waiting to flush an answer back to the remote. - PendingFlush(UniqueConnecId, KadInStreamSink), + PendingFlush(UniqueConnecId, KadInStreamSink), /// The substream is being closed. - Closing(KadInStreamSink), + Closing(KadInStreamSink), /// The substream was cancelled in favor of a new one. Cancelled, Poisoned { - phantom: PhantomData, + phantom: PhantomData, }, } -impl InboundSubstreamState { +impl InboundSubstreamState { fn try_answer_with( &mut self, - id: KademliaRequestId, + id: RequestId, msg: KadResponseMsg, ) -> Result<(), KadResponseMsg> { match std::mem::replace( @@ -223,15 +177,13 @@ impl InboundSubstreamState { /// Event produced by the Kademlia handler. #[derive(Debug)] -pub enum KademliaHandlerEvent { +pub enum HandlerEvent { /// The configured protocol name has been confirmed by the peer through - /// a successfully negotiated substream. - /// - /// This event is only emitted once by a handler upon the first - /// successfully negotiated inbound or outbound substream and - /// indicates that the connected peer participates in the Kademlia - /// overlay network identified by the configured protocol name. + /// a successfully negotiated substream or by learning the supported protocols of the remote. ProtocolConfirmed { endpoint: ConnectedPoint }, + /// The configured protocol name(s) are not or no longer supported by the peer on the provided + /// connection and it should be removed from the routing table. + ProtocolNotSupported { endpoint: ConnectedPoint }, /// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes /// returned is not specified, but should be around 20. @@ -239,48 +191,48 @@ pub enum KademliaHandlerEvent { /// The key for which to locate the closest nodes. key: Vec, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to an `KademliaHandlerIn::FindNodeReq`. + /// Response to an `HandlerIn::FindNodeReq`. FindNodeRes { /// Results of the request. closer_peers: Vec, /// The user data passed to the `FindNodeReq`. - user_data: TUserData, + query_id: QueryId, }, /// Same as `FindNodeReq`, but should also return the entries of the local providers list for /// this key. GetProvidersReq { /// The key for which providers are requested. - key: record_priv::Key, + key: record::Key, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to an `KademliaHandlerIn::GetProvidersReq`. + /// Response to an `HandlerIn::GetProvidersReq`. GetProvidersRes { /// Nodes closest to the key. closer_peers: Vec, /// Known providers for this key. provider_peers: Vec, /// The user data passed to the `GetProvidersReq`. - user_data: TUserData, + query_id: QueryId, }, /// An error happened when performing a query. QueryError { /// The error that happened. - error: KademliaHandlerQueryErr, + error: HandlerQueryErr, /// The user data passed to the query. - user_data: TUserData, + query_id: QueryId, }, /// The peer announced itself as a provider of a key. AddProvider { /// The key for which the peer is a provider of the associated value. - key: record_priv::Key, + key: record::Key, /// The peer that is the provider of the value for `key`. provider: KadPeer, }, @@ -288,88 +240,76 @@ pub enum KademliaHandlerEvent { /// Request to get a value from the dht records GetRecord { /// Key for which we should look in the dht - key: record_priv::Key, + key: record::Key, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, - /// Response to a `KademliaHandlerIn::GetRecord`. + /// Response to a `HandlerIn::GetRecord`. GetRecordRes { /// The result is present if the key has been found record: Option, /// Nodes closest to the key. closer_peers: Vec, /// The user data passed to the `GetValue`. - user_data: TUserData, + query_id: QueryId, }, /// Request to put a value in the dht records PutRecord { record: Record, /// Identifier of the request. Needs to be passed back when answering. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Response to a request to store a record. PutRecordRes { /// The key of the stored record. - key: record_priv::Key, + key: record::Key, /// The value of the stored record. value: Vec, /// The user data passed to the `PutValue`. - user_data: TUserData, + query_id: QueryId, }, } /// Error that can happen when requesting an RPC query. #[derive(Debug)] -pub enum KademliaHandlerQueryErr { - /// Error while trying to perform the query. - Upgrade(ConnectionHandlerUpgrErr), +pub enum HandlerQueryErr { /// Received an answer that doesn't correspond to the request. UnexpectedMessage, /// I/O error in the substream. Io(io::Error), } -impl fmt::Display for KademliaHandlerQueryErr { +impl fmt::Display for HandlerQueryErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - KademliaHandlerQueryErr::Upgrade(err) => { - write!(f, "Error while performing Kademlia query: {err}") - } - KademliaHandlerQueryErr::UnexpectedMessage => { + HandlerQueryErr::UnexpectedMessage => { write!( f, "Remote answered our Kademlia RPC query with the wrong message type" ) } - KademliaHandlerQueryErr::Io(err) => { + HandlerQueryErr::Io(err) => { write!(f, "I/O error during a Kademlia RPC query: {err}") } } } } -impl error::Error for KademliaHandlerQueryErr { +impl error::Error for HandlerQueryErr { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - KademliaHandlerQueryErr::Upgrade(err) => Some(err), - KademliaHandlerQueryErr::UnexpectedMessage => None, - KademliaHandlerQueryErr::Io(err) => Some(err), + HandlerQueryErr::UnexpectedMessage => None, + HandlerQueryErr::Io(err) => Some(err), } } } -impl From> for KademliaHandlerQueryErr { - fn from(err: ConnectionHandlerUpgrErr) -> Self { - KademliaHandlerQueryErr::Upgrade(err) - } -} - /// Event to send to the handler. #[derive(Debug)] -pub enum KademliaHandlerIn { +pub enum HandlerIn { /// Resets the (sub)stream associated with the given request ID, /// thus signaling an error to the remote. /// @@ -377,15 +317,18 @@ pub enum KademliaHandlerIn { /// can be used as an alternative to letting requests simply time /// out on the remote peer, thus potentially avoiding some delay /// for the query on the remote. - Reset(KademliaRequestId), + Reset(RequestId), + + /// Change the connection to the specified mode. + ReconfigureMode { new_mode: Mode }, /// Request for the list of nodes whose IDs are the closest to `key`. The number of nodes /// returned is not specified, but should be around 20. FindNodeReq { /// Identifier of the node. key: Vec, - /// Custom user data. Passed back in the out event when the results arrive. - user_data: TUserData, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Response to a `FindNodeReq`. @@ -395,16 +338,16 @@ pub enum KademliaHandlerIn { /// Identifier of the request that was made by the remote. /// /// It is a logic error to use an id of the handler of a different node. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Same as `FindNodeReq`, but should also return the entries of the local providers list for /// this key. GetProvidersReq { /// Identifier being searched. - key: record_priv::Key, - /// Custom user data. Passed back in the out event when the results arrive. - user_data: TUserData, + key: record::Key, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Response to a `GetProvidersReq`. @@ -416,7 +359,7 @@ pub enum KademliaHandlerIn { /// Identifier of the request that was made by the remote. /// /// It is a logic error to use an id of the handler of a different node. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Indicates that this provider is known for this key. @@ -425,17 +368,19 @@ pub enum KademliaHandlerIn { /// succeeded. AddProvider { /// Key for which we should add providers. - key: record_priv::Key, + key: record::Key, /// Known provider for this key. provider: KadPeer, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Request to retrieve a record from the DHT. GetRecord { /// The key of the record. - key: record_priv::Key, - /// Custom data. Passed back in the out event when the results arrive. - user_data: TUserData, + key: record::Key, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Response to a `GetRecord` request. @@ -445,31 +390,31 @@ pub enum KademliaHandlerIn { /// Nodes that are closer to the key we were searching for. closer_peers: Vec, /// Identifier of the request that was made by the remote. - request_id: KademliaRequestId, + request_id: RequestId, }, /// Put a value into the dht records. PutRecord { record: Record, - /// Custom data. Passed back in the out event when the results arrive. - user_data: TUserData, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Response to a `PutRecord`. PutRecordRes { /// Key of the value that was put. - key: record_priv::Key, + key: record::Key, /// Value that was put. value: Vec, /// Identifier of the request that was made by the remote. - request_id: KademliaRequestId, + request_id: RequestId, }, } /// Unique identifier for a request. Must be passed back in order to answer a request from /// the remote. #[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct KademliaRequestId { +pub struct RequestId { /// Unique identifier for an incoming connection. connec_unique_id: UniqueConnecId, } @@ -478,55 +423,70 @@ pub struct KademliaRequestId { #[derive(Debug, Copy, Clone, PartialEq, Eq)] struct UniqueConnecId(u64); -impl KademliaHandler -where - TUserData: Clone + fmt::Debug + Send + 'static + Unpin, -{ - /// Create a [`KademliaHandler`] using the given configuration. +impl Handler { pub fn new( - config: KademliaHandlerConfig, + protocol_config: ProtocolConfig, endpoint: ConnectedPoint, remote_peer_id: PeerId, + mode: Mode, ) -> Self { - let keep_alive = KeepAlive::Until(Instant::now() + config.idle_timeout); + match &endpoint { + ConnectedPoint::Dialer { .. } => { + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New outbound connection" + ); + } + ConnectedPoint::Listener { .. } => { + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New inbound connection" + ); + } + } - KademliaHandler { - config, + Handler { + protocol_config, + mode, endpoint, remote_peer_id, next_connec_unique_id: UniqueConnecId(0), inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - num_requested_outbound_streams: 0, + outbound_substreams: futures_bounded::FuturesTupleSet::new( + Duration::from_secs(10), + MAX_NUM_STREAMS, + ), + pending_streams: Default::default(), pending_messages: Default::default(), - keep_alive, - protocol_status: ProtocolStatus::Unconfirmed, + protocol_status: None, + remote_supported_protocols: Default::default(), } } fn on_fully_negotiated_outbound( &mut self, - FullyNegotiatedOutbound { protocol, info: () }: FullyNegotiatedOutbound< + FullyNegotiatedOutbound { + protocol: stream, + info: (), + }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, >, ) { - if let Some((msg, user_data)) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::PendingSend( - protocol, msg, user_data, - )); - } else { - debug_assert!(false, "Requested outbound stream without message") + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Ok(stream)); } - self.num_requested_outbound_streams -= 1; - - if let ProtocolStatus::Unconfirmed = self.protocol_status { + if self.protocol_status.is_none() { // Upon the first successfully negotiated substream, we know that the // remote is configured with the same protocol name and we want // the behaviour to add this peer to the routing table, if possible. - self.protocol_status = ProtocolStatus::Confirmed; + self.protocol_status = Some(ProtocolStatus { + supported: true, + reported: false, + }); } } @@ -544,14 +504,17 @@ where future::Either::Right(p) => void::unreachable(p), }; - if let ProtocolStatus::Unconfirmed = self.protocol_status { + if self.protocol_status.is_none() { // Upon the first successfully negotiated substream, we know that the // remote is configured with the same protocol name and we want // the behaviour to add this peer to the routing table, if possible. - self.protocol_status = ProtocolStatus::Confirmed; + self.protocol_status = Some(ProtocolStatus { + supported: true, + reported: false, + }); } - if self.inbound_substreams.len() == MAX_NUM_SUBSTREAMS { + if self.inbound_substreams.len() == MAX_NUM_STREAMS { if let Some(s) = self.inbound_substreams.iter_mut().find(|s| { matches!( s, @@ -560,22 +523,21 @@ where ) }) { *s = InboundSubstreamState::Cancelled; - log::debug!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - Removed older substream waiting to be reused.", - self.remote_peer_id, + tracing::debug!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + Removed older substream waiting to be reused." ) } else { - log::warn!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - No older substream waiting to be reused. Dropping new substream.", - self.remote_peer_id, + tracing::warn!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + No older substream waiting to be reused. Dropping new substream." ); return; } } - debug_assert!(self.config.allow_listening); let connec_unique_id = self.next_connec_unique_id; self.next_connec_unique_id.0 += 1; self.inbound_substreams @@ -586,51 +548,67 @@ where }); } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { - info: (), error, .. - }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - // TODO: cache the fact that the remote doesn't support kademlia at all, so that we don't - // continue trying + /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + let result = self.outbound_substreams.try_push( + async move { + let mut stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(|e| match e { + StreamUpgradeError::Timeout => io::ErrorKind::TimedOut.into(), + StreamUpgradeError::Apply(e) => e, + StreamUpgradeError::NegotiationFailed => io::Error::new( + io::ErrorKind::ConnectionRefused, + "protocol not supported", + ), + StreamUpgradeError::Io(e) => e, + })?; + + let has_answer = !matches!(msg, KadRequestMsg::AddProvider { .. }); + + stream.send(msg).await?; + stream.close().await?; + + if !has_answer { + return Ok(None); + } - if let Some((_, Some(user_data))) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::ReportError(error.into(), user_data)); - } + let msg = stream.next().await.ok_or(io::ErrorKind::UnexpectedEof)??; + + Ok(Some(msg)) + }, + id, + ); - self.num_requested_outbound_streams -= 1; + debug_assert!( + result.is_ok(), + "Expected to not create more streams than allowed" + ); } } -impl ConnectionHandler for KademliaHandler -where - TUserData: Clone + fmt::Debug + Send + 'static + Unpin, -{ - type InEvent = KademliaHandlerIn; - type OutEvent = KademliaHandlerEvent; - type Error = io::Error; // TODO: better error type? - type InboundProtocol = Either; - type OutboundProtocol = KademliaProtocolConfig; +impl ConnectionHandler for Handler { + type FromBehaviour = HandlerIn; + type ToBehaviour = HandlerEvent; + type InboundProtocol = Either; + type OutboundProtocol = ProtocolConfig; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - if self.config.allow_listening { - SubstreamProtocol::new(self.config.protocol_config.clone(), ()) - .map_upgrade(Either::Left) - } else { - SubstreamProtocol::new(Either::Right(upgrade::DeniedUpgrade), ()) + match self.mode { + Mode::Server => SubstreamProtocol::new(Either::Left(self.protocol_config.clone()), ()), + Mode::Client => SubstreamProtocol::new(Either::Right(upgrade::DeniedUpgrade), ()), } } - fn on_behaviour_event(&mut self, message: KademliaHandlerIn) { + fn on_behaviour_event(&mut self, message: HandlerIn) { match message { - KademliaHandlerIn::Reset(request_id) => { + HandlerIn::Reset(request_id) => { if let Some(state) = self .inbound_substreams .iter_mut() @@ -644,19 +622,19 @@ where state.close(); } } - KademliaHandlerIn::FindNodeReq { key, user_data } => { + HandlerIn::FindNodeReq { key, query_id } => { let msg = KadRequestMsg::FindNode { key }; - self.pending_messages.push_back((msg, Some(user_data))); + self.pending_messages.push_back((msg, query_id)); } - KademliaHandlerIn::FindNodeRes { + HandlerIn::FindNodeRes { closer_peers, request_id, } => self.answer_pending_request(request_id, KadResponseMsg::FindNode { closer_peers }), - KademliaHandlerIn::GetProvidersReq { key, user_data } => { + HandlerIn::GetProvidersReq { key, query_id } => { let msg = KadRequestMsg::GetProviders { key }; - self.pending_messages.push_back((msg, Some(user_data))); + self.pending_messages.push_back((msg, query_id)); } - KademliaHandlerIn::GetProvidersRes { + HandlerIn::GetProvidersRes { closer_peers, provider_peers, request_id, @@ -667,19 +645,23 @@ where provider_peers, }, ), - KademliaHandlerIn::AddProvider { key, provider } => { + HandlerIn::AddProvider { + key, + provider, + query_id, + } => { let msg = KadRequestMsg::AddProvider { key, provider }; - self.pending_messages.push_back((msg, None)); + self.pending_messages.push_back((msg, query_id)); } - KademliaHandlerIn::GetRecord { key, user_data } => { + HandlerIn::GetRecord { key, query_id } => { let msg = KadRequestMsg::GetValue { key }; - self.pending_messages.push_back((msg, Some(user_data))); + self.pending_messages.push_back((msg, query_id)); } - KademliaHandlerIn::PutRecord { record, user_data } => { + HandlerIn::PutRecord { record, query_id } => { let msg = KadRequestMsg::PutValue { record }; - self.pending_messages.push_back((msg, Some(user_data))); + self.pending_messages.push_back((msg, query_id)); } - KademliaHandlerIn::GetRecordRes { + HandlerIn::GetRecordRes { record, closer_peers, request_id, @@ -692,70 +674,107 @@ where }, ); } - KademliaHandlerIn::PutRecordRes { + HandlerIn::PutRecordRes { key, request_id, value, } => { self.answer_pending_request(request_id, KadResponseMsg::PutValue { key, value }); } - } - } + HandlerIn::ReconfigureMode { new_mode } => { + let peer = self.remote_peer_id; + + match &self.endpoint { + ConnectedPoint::Dialer { .. } => { + tracing::debug!( + %peer, + mode=%new_mode, + "Changed mode on outbound connection" + ) + } + ConnectedPoint::Listener { local_addr, .. } => { + tracing::debug!( + %peer, + mode=%new_mode, + local_address=%local_addr, + "Changed mode on inbound connection assuming that one of our external addresses routes to the local address") + } + } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + self.mode = new_mode; + } + } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - if let ProtocolStatus::Confirmed = self.protocol_status { - self.protocol_status = ProtocolStatus::Reported; - return Poll::Ready(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::ProtocolConfirmed { - endpoint: self.endpoint.clone(), - }, - )); - } + loop { + match &mut self.protocol_status { + Some(status) if !status.reported => { + status.reported = true; + let event = if status.supported { + HandlerEvent::ProtocolConfirmed { + endpoint: self.endpoint.clone(), + } + } else { + HandlerEvent::ProtocolNotSupported { + endpoint: self.endpoint.clone(), + } + }; - if let Poll::Ready(Some(event)) = self.outbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + _ => {} + } - if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + match self.outbound_substreams.poll_unpin(cx) { + Poll::Ready((Ok(Ok(Some(response))), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + process_kad_response(response, query_id), + )) + } + Poll::Ready((Ok(Ok(None)), _)) => { + continue; + } + Poll::Ready((Ok(Err(e)), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(e), + query_id, + }, + )) + } + Poll::Ready((Err(_timeout), query_id)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(io::ErrorKind::TimedOut.into()), + query_id, + }, + )) + } + Poll::Pending => {} + } - let num_in_progress_outbound_substreams = - self.outbound_substreams.len() + self.num_requested_outbound_streams; - if num_in_progress_outbound_substreams < MAX_NUM_SUBSTREAMS - && self.num_requested_outbound_streams < self.pending_messages.len() - { - self.num_requested_outbound_streams += 1; - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.config.protocol_config.clone(), ()), - }); - } + if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { + return Poll::Ready(event); + } - let no_streams = self.outbound_substreams.is_empty() && self.inbound_substreams.is_empty(); - self.keep_alive = match (no_streams, self.keep_alive) { - // No open streams. Preserve the existing idle timeout. - (true, k @ KeepAlive::Until(_)) => k, - // No open streams. Set idle timeout. - (true, _) => KeepAlive::Until(Instant::now() + self.config.idle_timeout), - // Keep alive for open streams. - (false, _) => KeepAlive::Yes, - }; + if self.outbound_substreams.len() < MAX_NUM_STREAMS { + if let Some((msg, id)) = self.pending_messages.pop_front() { + self.queue_new_stream(id, msg); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol_config.clone(), ()), + }); + } + } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -774,188 +793,81 @@ where ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { self.on_fully_negotiated_inbound(fully_negotiated_inbound) } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Err(ev.error)); + } } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + ConnectionEvent::RemoteProtocolsChange(change) => { + let dirty = self.remote_supported_protocols.on_protocols_change(change); + + if dirty { + let remote_supports_our_kademlia_protocols = self + .remote_supported_protocols + .iter() + .any(|p| self.protocol_config.protocol_names().contains(p)); + + self.protocol_status = Some(compute_new_protocol_status( + remote_supports_our_kademlia_protocols, + self.protocol_status, + )) + } + } + _ => {} } } } -impl KademliaHandler -where - TUserData: 'static + Clone + Send + Unpin + fmt::Debug, -{ - fn answer_pending_request(&mut self, request_id: KademliaRequestId, mut msg: KadResponseMsg) { - for state in self.inbound_substreams.iter_mut() { - match state.try_answer_with(request_id, msg) { - Ok(()) => return, - Err(m) => { - msg = m; - } +fn compute_new_protocol_status( + now_supported: bool, + current_status: Option, +) -> ProtocolStatus { + let current_status = match current_status { + None => { + return ProtocolStatus { + supported: now_supported, + reported: false, } } + Some(current) => current, + }; - debug_assert!(false, "Cannot find inbound substream for {request_id:?}") + if now_supported == current_status.supported { + return ProtocolStatus { + supported: now_supported, + reported: true, + }; } -} -impl Default for KademliaHandlerConfig { - fn default() -> Self { - KademliaHandlerConfig { - protocol_config: Default::default(), - allow_listening: true, - idle_timeout: Duration::from_secs(10), - } + if now_supported { + tracing::debug!("Remote now supports our kademlia protocol"); + } else { + tracing::debug!("Remote no longer supports our kademlia protocol"); } -} - -impl Stream for OutboundSubstreamState -where - TUserData: Unpin, -{ - type Item = ConnectionHandlerEvent< - KademliaProtocolConfig, - (), - KademliaHandlerEvent, - io::Error, - >; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - loop { - match std::mem::replace(this, OutboundSubstreamState::Poisoned) { - OutboundSubstreamState::PendingSend(mut substream, msg, user_data) => { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => match substream.start_send_unpin(msg) { - Ok(()) => { - *this = OutboundSubstreamState::PendingFlush(substream, user_data); - } - Err(error) => { - *this = OutboundSubstreamState::Done; - let event = user_data.map(|user_data| { - ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - user_data, - }, - ) - }); - - return Poll::Ready(event); - } - }, - Poll::Pending => { - *this = OutboundSubstreamState::PendingSend(substream, msg, user_data); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = user_data.map(|user_data| { - ConnectionHandlerEvent::Custom(KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - user_data, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::PendingFlush(mut substream, user_data) => { - match substream.poll_flush_unpin(cx) { - Poll::Ready(Ok(())) => { - if let Some(user_data) = user_data { - *this = OutboundSubstreamState::WaitingAnswer(substream, user_data); - } else { - *this = OutboundSubstreamState::Closing(substream); - } - } - Poll::Pending => { - *this = OutboundSubstreamState::PendingFlush(substream, user_data); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = user_data.map(|user_data| { - ConnectionHandlerEvent::Custom(KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - user_data, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::WaitingAnswer(mut substream, user_data) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(msg))) => { - *this = OutboundSubstreamState::Closing(substream); - let event = process_kad_response(msg, user_data); - return Poll::Ready(Some(ConnectionHandlerEvent::Custom(event))); - } - Poll::Pending => { - *this = OutboundSubstreamState::WaitingAnswer(substream, user_data); - return Poll::Pending; - } - Poll::Ready(Some(Err(error))) => { - *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io(error), - user_data, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::Custom(event))); - } - Poll::Ready(None) => { - *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::Io( - io::ErrorKind::UnexpectedEof.into(), - ), - user_data, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::Custom(event))); - } - } - } - OutboundSubstreamState::ReportError(error, user_data) => { - *this = OutboundSubstreamState::Done; - let event = KademliaHandlerEvent::QueryError { error, user_data }; + ProtocolStatus { + supported: now_supported, + reported: false, + } +} - return Poll::Ready(Some(ConnectionHandlerEvent::Custom(event))); - } - OutboundSubstreamState::Closing(mut stream) => match stream.poll_close_unpin(cx) { - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => return Poll::Ready(None), - Poll::Pending => { - *this = OutboundSubstreamState::Closing(stream); - return Poll::Pending; - } - }, - OutboundSubstreamState::Done => { - *this = OutboundSubstreamState::Done; - return Poll::Ready(None); +impl Handler { + fn answer_pending_request(&mut self, request_id: RequestId, mut msg: KadResponseMsg) { + for state in self.inbound_substreams.iter_mut() { + match state.try_answer_with(request_id, msg) { + Ok(()) => return, + Err(m) => { + msg = m; } - OutboundSubstreamState::Poisoned => unreachable!(), } } + + debug_assert!(false, "Cannot find inbound substream for {request_id:?}") } } -impl Stream for InboundSubstreamState -where - TUserData: Unpin, -{ - type Item = ConnectionHandlerEvent< - KademliaProtocolConfig, - (), - KademliaHandlerEvent, - io::Error, - >; +impl futures::Stream for InboundSubstreamState { + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -973,17 +885,17 @@ where mut substream, } => match substream.poll_next_unpin(cx) { Poll::Ready(Some(Ok(KadRequestMsg::Ping))) => { - log::warn!("Kademlia PING messages are unsupported"); + tracing::warn!("Kademlia PING messages are unsupported"); *this = InboundSubstreamState::Closing(substream); } Poll::Ready(Some(Ok(KadRequestMsg::FindNode { key }))) => { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); - return Poll::Ready(Some(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::FindNodeReq { + return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::FindNodeReq { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -992,10 +904,10 @@ where Poll::Ready(Some(Ok(KadRequestMsg::GetProviders { key }))) => { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); - return Poll::Ready(Some(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::GetProvidersReq { + return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::GetProvidersReq { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1007,17 +919,17 @@ where connection_id, substream, }; - return Poll::Ready(Some(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::AddProvider { key, provider }, + return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::AddProvider { key, provider }, ))); } Poll::Ready(Some(Ok(KadRequestMsg::GetValue { key }))) => { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); - return Poll::Ready(Some(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::GetRecord { + return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::GetRecord { key, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1026,10 +938,10 @@ where Poll::Ready(Some(Ok(KadRequestMsg::PutValue { record }))) => { *this = InboundSubstreamState::WaitingBehaviour(connection_id, substream, None); - return Poll::Ready(Some(ConnectionHandlerEvent::Custom( - KademliaHandlerEvent::PutRecord { + return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::PutRecord { record, - request_id: KademliaRequestId { + request_id: RequestId { connec_unique_id: connection_id, }, }, @@ -1047,7 +959,7 @@ where return Poll::Ready(None); } Poll::Ready(Some(Err(e))) => { - trace!("Inbound substream error: {:?}", e); + tracing::trace!("Inbound substream error: {:?}", e); return Poll::Ready(None); } }, @@ -1106,43 +1018,85 @@ where } /// Process a Kademlia message that's supposed to be a response to one of our requests. -fn process_kad_response( - event: KadResponseMsg, - user_data: TUserData, -) -> KademliaHandlerEvent { +fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEvent { // TODO: must check that the response corresponds to the request match event { KadResponseMsg::Pong => { // We never send out pings. - KademliaHandlerEvent::QueryError { - error: KademliaHandlerQueryErr::UnexpectedMessage, - user_data, + HandlerEvent::QueryError { + error: HandlerQueryErr::UnexpectedMessage, + query_id, } } - KadResponseMsg::FindNode { closer_peers } => KademliaHandlerEvent::FindNodeRes { + KadResponseMsg::FindNode { closer_peers } => HandlerEvent::FindNodeRes { closer_peers, - user_data, + query_id, }, KadResponseMsg::GetProviders { closer_peers, provider_peers, - } => KademliaHandlerEvent::GetProvidersRes { + } => HandlerEvent::GetProvidersRes { closer_peers, provider_peers, - user_data, + query_id, }, KadResponseMsg::GetValue { record, closer_peers, - } => KademliaHandlerEvent::GetRecordRes { + } => HandlerEvent::GetRecordRes { record, closer_peers, - user_data, + query_id, }, - KadResponseMsg::PutValue { key, value, .. } => KademliaHandlerEvent::PutRecordRes { + KadResponseMsg::PutValue { key, value, .. } => HandlerEvent::PutRecordRes { key, value, - user_data, + query_id, }, } } + +#[cfg(test)] +mod tests { + use super::*; + use quickcheck::{Arbitrary, Gen}; + use tracing_subscriber::EnvFilter; + + impl Arbitrary for ProtocolStatus { + fn arbitrary(g: &mut Gen) -> Self { + Self { + supported: bool::arbitrary(g), + reported: bool::arbitrary(g), + } + } + } + + #[test] + fn compute_next_protocol_status_test() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + fn prop(now_supported: bool, current: Option) { + let new = compute_new_protocol_status(now_supported, current); + + match current { + None => { + assert!(!new.reported); + assert_eq!(new.supported, now_supported); + } + Some(current) => { + if current.supported == now_supported { + assert!(new.reported); + } else { + assert!(!new.reported); + } + + assert_eq!(new.supported, now_supported); + } + } + } + + quickcheck::quickcheck(prop as fn(_, _)) + } +} diff --git a/protocols/kad/src/jobs.rs b/protocols/kad/src/jobs.rs index cfc4f92941b..f1631ed6ad1 100644 --- a/protocols/kad/src/jobs.rs +++ b/protocols/kad/src/jobs.rs @@ -61,7 +61,7 @@ //! > to the size of all stored records. As a job runs, the records are moved //! > out of the job to the consumer, where they can be dropped after being sent. -use crate::record_priv::{self, store::RecordStore, ProviderRecord, Record}; +use crate::record::{self, store::RecordStore, ProviderRecord, Record}; use futures::prelude::*; use futures_timer::Delay; use instant::Instant; @@ -74,10 +74,10 @@ use std::vec; /// The maximum number of queries towards which background jobs /// are allowed to start new queries on an invocation of -/// `Kademlia::poll`. +/// `Behaviour::poll`. pub(crate) const JOBS_MAX_QUERIES: usize = 100; /// The maximum number of new queries started by a background job -/// per invocation of `Kademlia::poll`. +/// per invocation of `Behaviour::poll`. pub(crate) const JOBS_MAX_NEW_QUERIES: usize = 10; /// A background job run periodically. #[derive(Debug)] @@ -87,6 +87,7 @@ struct PeriodicJob { } impl PeriodicJob { + #[cfg(test)] fn is_running(&self) -> bool { match self.state { PeriodicJobState::Running(..) => true, @@ -96,6 +97,7 @@ impl PeriodicJob { /// Cuts short the remaining delay, if the job is currently waiting /// for the delay to expire. + #[cfg(test)] fn asap(&mut self) { if let PeriodicJobState::Waiting(delay, deadline) = &mut self.state { let new_deadline = Instant::now().checked_sub(Duration::from_secs(1)).unwrap(); @@ -132,7 +134,7 @@ pub(crate) struct PutRecordJob { next_publish: Option, publish_interval: Option, record_ttl: Option, - skipped: HashSet, + skipped: HashSet, inner: PeriodicJob>, } @@ -164,11 +166,12 @@ impl PutRecordJob { /// Adds the key of a record that is ignored on the current or /// next run of the job. - pub(crate) fn skip(&mut self, key: record_priv::Key) { + pub(crate) fn skip(&mut self, key: record::Key) { self.skipped.insert(key); } /// Checks whether the job is currently running. + #[cfg(test)] pub(crate) fn is_running(&self) -> bool { self.inner.is_running() } @@ -177,6 +180,7 @@ impl PutRecordJob { /// for the delay to expire. /// /// The job is guaranteed to run on the next invocation of `poll`. + #[cfg(test)] pub(crate) fn asap(&mut self, publish: bool) { if publish { self.next_publish = Some(Instant::now().checked_sub(Duration::from_secs(1)).unwrap()) @@ -273,6 +277,7 @@ impl AddProviderJob { } /// Checks whether the job is currently running. + #[cfg(test)] pub(crate) fn is_running(&self) -> bool { self.inner.is_running() } @@ -281,6 +286,7 @@ impl AddProviderJob { /// for the delay to expire. /// /// The job is guaranteed to run on the next invocation of `poll`. + #[cfg(test)] pub(crate) fn asap(&mut self) { self.inner.asap() } @@ -330,7 +336,7 @@ impl AddProviderJob { #[cfg(test)] mod tests { use super::*; - use crate::record_priv::store::MemoryStore; + use crate::record::store::MemoryStore; use futures::{executor::block_on, future::poll_fn}; use quickcheck::*; use rand::Rng; diff --git a/protocols/kad/src/kbucket_priv.rs b/protocols/kad/src/kbucket.rs similarity index 96% rename from protocols/kad/src/kbucket_priv.rs rename to protocols/kad/src/kbucket.rs index 5288be44786..b42806fcf3c 100644 --- a/protocols/kad/src/kbucket_priv.rs +++ b/protocols/kad/src/kbucket.rs @@ -72,6 +72,7 @@ mod entry; #[allow(clippy::assign_op_pattern)] mod key; +pub use bucket::NodeStatus; pub use entry::*; use arrayvec::{self, ArrayVec}; @@ -84,7 +85,7 @@ const NUM_BUCKETS: usize = 256; /// A `KBucketsTable` represents a Kademlia routing table. #[derive(Debug, Clone)] -pub struct KBucketsTable { +pub(crate) struct KBucketsTable { /// The key identifying the local peer that owns the routing table. local_key: TKey, /// The buckets comprising the routing table. @@ -154,7 +155,7 @@ where /// The given `pending_timeout` specifies the duration after creation of /// a [`PendingEntry`] after which it becomes eligible for insertion into /// a full bucket, replacing the least-recently (dis)connected node. - pub fn new(local_key: TKey, pending_timeout: Duration) -> Self { + pub(crate) fn new(local_key: TKey, pending_timeout: Duration) -> Self { KBucketsTable { local_key, buckets: (0..NUM_BUCKETS) @@ -165,13 +166,13 @@ where } /// Returns the local key. - pub fn local_key(&self) -> &TKey { + pub(crate) fn local_key(&self) -> &TKey { &self.local_key } /// Returns an `Entry` for the given key, representing the state of the entry /// in the routing table. - pub fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> { + pub(crate) fn entry<'a>(&'a mut self, key: &'a TKey) -> Entry<'a, TKey, TVal> { let index = BucketIndex::new(&self.local_key.as_ref().distance(key)); if let Some(i) = index { let bucket = &mut self.buckets[i.get()]; @@ -188,7 +189,7 @@ where /// /// The buckets are ordered by proximity to the `local_key`, i.e. the first /// bucket is the closest bucket (containing at most one key). - pub fn iter(&mut self) -> impl Iterator> + '_ { + pub(crate) fn iter(&mut self) -> impl Iterator> + '_ { let applied_pending = &mut self.applied_pending; self.buckets.iter_mut().enumerate().map(move |(i, b)| { if let Some(applied) = b.apply_pending() { @@ -204,7 +205,7 @@ where /// Returns the bucket for the distance to the given key. /// /// Returns `None` if the given key refers to the local key. - pub fn bucket(&mut self, key: &K) -> Option> + pub(crate) fn bucket(&mut self, key: &K) -> Option> where K: AsRef, { @@ -232,13 +233,16 @@ where /// buckets are updated accordingly. The fact that a pending entry was applied is /// recorded in the `KBucketsTable` in the form of `AppliedPending` results, which must be /// consumed by calling this function. - pub fn take_applied_pending(&mut self) -> Option> { + pub(crate) fn take_applied_pending(&mut self) -> Option> { self.applied_pending.pop_front() } /// Returns an iterator over the keys closest to `target`, ordered by /// increasing distance. - pub fn closest_keys<'a, T>(&'a mut self, target: &'a T) -> impl Iterator + 'a + pub(crate) fn closest_keys<'a, T>( + &'a mut self, + target: &'a T, + ) -> impl Iterator + 'a where T: AsRef, { @@ -256,7 +260,7 @@ where /// Returns an iterator over the nodes closest to the `target` key, ordered by /// increasing distance. - pub fn closest<'a, T>( + pub(crate) fn closest<'a, T>( &'a mut self, target: &'a T, ) -> impl Iterator> + 'a @@ -286,7 +290,7 @@ where /// /// The number of nodes between the local node and the target are /// calculated by backtracking from the target towards the local key. - pub fn count_nodes_between(&mut self, target: &T) -> usize + pub(crate) fn count_nodes_between(&mut self, target: &T) -> usize where T: AsRef, { @@ -460,7 +464,7 @@ where } } -/// A reference to a bucket in a [`KBucketsTable`]. +/// A reference to a bucket. pub struct KBucketRef<'a, TKey, TVal> { index: BucketIndex, bucket: &'a mut KBucket, @@ -471,7 +475,7 @@ where TKey: Clone + AsRef, TVal: Clone, { - /// Returns the minimum inclusive and maximum inclusive [`Distance`] for + /// Returns the minimum inclusive and maximum inclusive distance for /// this bucket. pub fn range(&self) -> (Distance, Distance) { self.index.range() diff --git a/protocols/kad/src/kbucket_priv/bucket.rs b/protocols/kad/src/kbucket/bucket.rs similarity index 97% rename from protocols/kad/src/kbucket_priv/bucket.rs rename to protocols/kad/src/kbucket/bucket.rs index 78bcd95261b..d70161919e1 100644 --- a/protocols/kad/src/kbucket_priv/bucket.rs +++ b/protocols/kad/src/kbucket/bucket.rs @@ -26,8 +26,7 @@ //! > of the `KBucketsTable` and in particular the public `Entry` API. use super::*; -pub use crate::K_VALUE; - +pub(crate) use crate::K_VALUE; /// A `PendingNode` is a `Node` that is pending insertion into a `KBucket`. #[derive(Debug, Clone)] pub(crate) struct PendingNode { @@ -55,10 +54,6 @@ pub enum NodeStatus { } impl PendingNode { - pub(crate) fn key(&self) -> &TKey { - &self.node.key - } - pub(crate) fn status(&self) -> NodeStatus { self.status } @@ -71,6 +66,7 @@ impl PendingNode { Instant::now() >= self.replace } + #[cfg(test)] pub(crate) fn set_ready_at(&mut self, t: Instant) { self.replace = t; } @@ -130,7 +126,7 @@ pub(crate) struct KBucket { /// The result of inserting an entry into a bucket. #[must_use] #[derive(Debug, Clone, PartialEq, Eq)] -pub enum InsertResult { +pub(crate) enum InsertResult { /// The entry has been successfully inserted. Inserted, /// The entry is pending insertion because the relevant bucket is currently full. @@ -152,12 +148,12 @@ pub enum InsertResult { /// The result of applying a pending node to a bucket, possibly /// replacing an existing node. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct AppliedPending { +pub(crate) struct AppliedPending { /// The key of the inserted pending node. - pub inserted: Node, + pub(crate) inserted: Node, /// The node that has been evicted from the bucket to make room for the /// pending node, if any. - pub evicted: Option>, + pub(crate) evicted: Option>, } impl KBucket @@ -192,11 +188,6 @@ where .filter(|p| p.node.key.as_ref() == key.as_ref()) } - /// Returns a reference to a node in the bucket. - pub(crate) fn get(&self, key: &TKey) -> Option<&Node> { - self.position(key).map(|p| &self.nodes[p.0]) - } - /// Returns an iterator over the nodes in the bucket, together with their status. pub(crate) fn iter(&self) -> impl Iterator, NodeStatus)> { self.nodes @@ -399,22 +390,19 @@ where } } - /// Checks whether the given position refers to a connected node. - pub(crate) fn is_connected(&self, pos: Position) -> bool { - self.status(pos) == NodeStatus::Connected - } - /// Gets the number of entries currently in the bucket. pub(crate) fn num_entries(&self) -> usize { self.nodes.len() } /// Gets the number of entries in the bucket that are considered connected. + #[cfg(test)] pub(crate) fn num_connected(&self) -> usize { self.first_connected_pos.map_or(0, |i| self.nodes.len() - i) } /// Gets the number of entries in the bucket that are considered disconnected. + #[cfg(test)] pub(crate) fn num_disconnected(&self) -> usize { self.nodes.len() - self.num_connected() } diff --git a/protocols/kad/src/kbucket_priv/entry.rs b/protocols/kad/src/kbucket/entry.rs similarity index 81% rename from protocols/kad/src/kbucket_priv/entry.rs rename to protocols/kad/src/kbucket/entry.rs index 7ccf2017e99..c38aac4a483 100644 --- a/protocols/kad/src/kbucket_priv/entry.rs +++ b/protocols/kad/src/kbucket/entry.rs @@ -21,7 +21,7 @@ //! The `Entry` API for quering and modifying the entries of a `KBucketsTable` //! representing the nodes participating in the Kademlia DHT. -pub use super::bucket::{AppliedPending, InsertResult, Node, NodeStatus, K_VALUE}; +pub(crate) use super::bucket::{AppliedPending, InsertResult, Node, NodeStatus, K_VALUE}; pub use super::key::*; use super::*; @@ -74,7 +74,7 @@ impl, TVal> AsRef for EntryView { /// A reference into a single entry of a `KBucketsTable`. #[derive(Debug)] -pub enum Entry<'a, TPeerId, TVal> { +pub(crate) enum Entry<'a, TPeerId, TVal> { /// The entry is present in a bucket. Present(PresentEntry<'a, TPeerId, TVal>, NodeStatus), /// The entry is pending insertion in a bucket. @@ -115,7 +115,7 @@ where /// /// Returns `None` if the entry is neither present in a bucket nor /// pending insertion into a bucket. - pub fn view(&'a mut self) -> Option> { + pub(crate) fn view(&'a mut self) -> Option> { match self { Entry::Present(entry, status) => Some(EntryRefView { node: NodeRefView { @@ -135,25 +135,11 @@ where } } - /// Returns the key of the entry. - /// - /// Returns `None` if the `Key` used to construct this `Entry` is not a valid - /// key for an entry in a bucket, which is the case for the `local_key` of - /// the `KBucketsTable` referring to the local node. - pub fn key(&self) -> Option<&TKey> { - match self { - Entry::Present(entry, _) => Some(entry.key()), - Entry::Pending(entry, _) => Some(entry.key()), - Entry::Absent(entry) => Some(entry.key()), - Entry::SelfEntry => None, - } - } - /// Returns the value associated with the entry. /// /// Returns `None` if the entry is absent from any bucket or refers to the /// local node. - pub fn value(&mut self) -> Option<&mut TVal> { + pub(crate) fn value(&mut self) -> Option<&mut TVal> { match self { Entry::Present(entry, _) => Some(entry.value()), Entry::Pending(entry, _) => Some(entry.value()), @@ -165,8 +151,7 @@ where /// An entry present in a bucket. #[derive(Debug)] -pub struct PresentEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); - +pub(crate) struct PresentEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); impl<'a, TKey, TVal> PresentEntry<'a, TKey, TVal> where TKey: Clone + AsRef, @@ -176,13 +161,8 @@ where PresentEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub fn key(&self) -> &TKey { - self.0.key - } - /// Returns the value associated with the key. - pub fn value(&mut self) -> &mut TVal { + pub(crate) fn value(&mut self) -> &mut TVal { &mut self .0 .bucket @@ -192,12 +172,12 @@ where } /// Sets the status of the entry to the provided [`NodeStatus`]. - pub fn update(&mut self, status: NodeStatus) { + pub(crate) fn update(&mut self, status: NodeStatus) { self.0.bucket.update(self.0.key, status); } /// Removes the entry from the bucket. - pub fn remove(self) -> EntryView { + pub(crate) fn remove(self) -> EntryView { let (node, status, _pos) = self .0 .bucket @@ -209,8 +189,7 @@ where /// An entry waiting for a slot to be available in a bucket. #[derive(Debug)] -pub struct PendingEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); - +pub(crate) struct PendingEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); impl<'a, TKey, TVal> PendingEntry<'a, TKey, TVal> where TKey: Clone + AsRef, @@ -220,13 +199,8 @@ where PendingEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub fn key(&self) -> &TKey { - self.0.key - } - /// Returns the value associated with the key. - pub fn value(&mut self) -> &mut TVal { + pub(crate) fn value(&mut self) -> &mut TVal { self.0 .bucket .pending_mut() @@ -235,13 +209,13 @@ where } /// Updates the status of the pending entry. - pub fn update(self, status: NodeStatus) -> PendingEntry<'a, TKey, TVal> { + pub(crate) fn update(self, status: NodeStatus) -> PendingEntry<'a, TKey, TVal> { self.0.bucket.update_pending(status); PendingEntry::new(self.0.bucket, self.0.key) } /// Removes the pending entry from the bucket. - pub fn remove(self) -> EntryView { + pub(crate) fn remove(self) -> EntryView { let pending = self.0.bucket.remove_pending().expect( "We can only build a PendingEntry if the entry is pending insertion into the bucket; QED", @@ -254,8 +228,7 @@ where /// An entry that is not present in any bucket. #[derive(Debug)] -pub struct AbsentEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); - +pub(crate) struct AbsentEntry<'a, TKey, TVal>(EntryRef<'a, TKey, TVal>); impl<'a, TKey, TVal> AbsentEntry<'a, TKey, TVal> where TKey: Clone + AsRef, @@ -265,13 +238,8 @@ where AbsentEntry(EntryRef { bucket, key }) } - /// Returns the key of the entry. - pub fn key(&self) -> &TKey { - self.0.key - } - /// Attempts to insert the entry into a bucket. - pub fn insert(self, value: TVal, status: NodeStatus) -> InsertResult { + pub(crate) fn insert(self, value: TVal, status: NodeStatus) -> InsertResult { self.0.bucket.insert( Node { key: self.0.key.clone(), diff --git a/protocols/kad/src/kbucket_priv/key.rs b/protocols/kad/src/kbucket/key.rs similarity index 94% rename from protocols/kad/src/kbucket_priv/key.rs rename to protocols/kad/src/kbucket/key.rs index 1c48184078a..bc5d6a53750 100644 --- a/protocols/kad/src/kbucket_priv/key.rs +++ b/protocols/kad/src/kbucket/key.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::record_priv; +use crate::record; use libp2p_core::multihash::Multihash; use libp2p_identity::PeerId; use sha2::digest::generic_array::{typenum::U32, GenericArray}; @@ -77,6 +77,11 @@ impl Key { self.bytes.distance(other) } + /// Exposing the hashed bytes. + pub fn hashed_bytes(&self) -> &[u8] { + &self.bytes.0 + } + /// Returns the uniquely determined key with the given distance to `self`. /// /// This implements the following equivalence: @@ -93,8 +98,8 @@ impl From> for KeyBytes { } } -impl From for Key { - fn from(m: Multihash) -> Self { +impl From> for Key> { + fn from(m: Multihash) -> Self { let bytes = KeyBytes(Sha256::digest(m.to_bytes())); Key { preimage: m, bytes } } @@ -113,8 +118,8 @@ impl From> for Key> { } } -impl From for Key { - fn from(k: record_priv::Key) -> Self { +impl From for Key { + fn from(k: record::Key) -> Self { Key::new(k) } } @@ -205,8 +210,8 @@ mod tests { } } - impl Arbitrary for Key { - fn arbitrary(g: &mut Gen) -> Key { + impl Arbitrary for Key> { + fn arbitrary(g: &mut Gen) -> Key> { let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); Key::from(Multihash::wrap(SHA_256_MH, &hash).unwrap()) } diff --git a/protocols/kad/src/lib.rs b/protocols/kad/src/lib.rs index c3a705900d8..519b67f9d7a 100644 --- a/protocols/kad/src/lib.rs +++ b/protocols/kad/src/lib.rs @@ -26,54 +26,23 @@ //! [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol might be seen as a core protocol. Rust-libp2p //! tries to stay as generic as possible, and does not make this assumption. //! This means that the Identify protocol must be manually hooked up to Kademlia through calls -//! to [`Kademlia::add_address`]. +//! to [`Behaviour::add_address`]. //! If you choose not to use the Identify protocol, and do not provide an alternative peer //! discovery mechanism, a Kademlia node will not discover nodes beyond the network's //! [boot nodes](https://docs.libp2p.io/concepts/glossary/#boot-node). Without the Identify protocol, //! existing nodes in the kademlia network cannot obtain the listen addresses //! of nodes querying them, and thus will not be able to add them to their routing table. -// TODO: we allow dead_code for now because this library contains a lot of unused code that will -// be useful later for record store -#![allow(dead_code)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod handler_priv; -#[deprecated( - note = "The `handler` module will be made private in the future and should not be depended on." -)] -pub mod handler { - pub use super::handler_priv::*; -} - -mod kbucket_priv; -#[deprecated( - note = "The `kbucket` module will be made private in the future and should not be depended on." -)] -pub mod kbucket { - pub use super::kbucket_priv::*; -} - -mod protocol_priv; -#[deprecated( - note = "The `protocol` module will be made private in the future and should not be depended on." -)] -pub mod protocol { - pub use super::protocol_priv::*; -} - -mod record_priv; -#[deprecated( - note = "The `record` module will be made private in the future and should not be depended on." -)] -pub mod record { - pub use super::record_priv::*; -} - mod addresses; mod behaviour; +mod handler; mod jobs; +mod kbucket; +mod protocol; mod query; +mod record; mod proto { #![allow(unreachable_pub)] @@ -89,19 +58,21 @@ pub use behaviour::{ AddProviderContext, AddProviderError, AddProviderOk, AddProviderPhase, AddProviderResult, BootstrapError, BootstrapOk, BootstrapResult, GetClosestPeersError, GetClosestPeersOk, GetClosestPeersResult, GetProvidersError, GetProvidersOk, GetProvidersResult, GetRecordError, - GetRecordOk, GetRecordResult, InboundRequest, NoKnownPeers, PeerRecord, PutRecordContext, + GetRecordOk, GetRecordResult, InboundRequest, Mode, NoKnownPeers, PeerRecord, PutRecordContext, PutRecordError, PutRecordOk, PutRecordPhase, PutRecordResult, QueryInfo, QueryMut, QueryRef, QueryResult, QueryStats, RoutingUpdate, }; pub use behaviour::{ - Kademlia, KademliaBucketInserts, KademliaCaching, KademliaConfig, KademliaEvent, - KademliaStoreInserts, ProgressStep, Quorum, + Behaviour, BucketInserts, Caching, Config, Event, ProgressStep, Quorum, StoreInserts, }; -pub use kbucket_priv::{EntryView, KBucketRef, Key as KBucketKey}; -pub use protocol::KadConnectionType; +pub use kbucket::{ + Distance as KBucketDistance, EntryView, KBucketRef, Key as KBucketKey, NodeStatus, +}; +pub use protocol::ConnectionType; pub use query::QueryId; -pub use record_priv::{store, Key as RecordKey, ProviderRecord, Record}; +pub use record::{store, Key as RecordKey, ProviderRecord, Record}; +use libp2p_swarm::StreamProtocol; use std::num::NonZeroUsize; /// The `k` parameter of the Kademlia specification. @@ -130,6 +101,8 @@ pub const K_VALUE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(20) }; /// The current value is `3`. pub const ALPHA_VALUE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(3) }; +pub const PROTOCOL_NAME: StreamProtocol = protocol::DEFAULT_PROTO_NAME; + /// Constant shared across tests for the [`Multihash`](libp2p_core::multihash::Multihash) type. #[cfg(test)] const SHA_256_MH: u64 = 0x12; diff --git a/protocols/kad/src/protocol_priv.rs b/protocols/kad/src/protocol.rs similarity index 77% rename from protocols/kad/src/protocol_priv.rs rename to protocols/kad/src/protocol.rs index 1801ce935fd..7fe2d1130b1 100644 --- a/protocols/kad/src/protocol_priv.rs +++ b/protocols/kad/src/protocol.rs @@ -20,37 +20,34 @@ //! The Kademlia connection protocol upgrade and associated message types. //! -//! The connection protocol upgrade is provided by [`KademliaProtocolConfig`], with the +//! The connection protocol upgrade is provided by [`ProtocolConfig`], with the //! request and response types [`KadRequestMsg`] and [`KadResponseMsg`], respectively. //! The upgrade's output is a `Sink + Stream` of messages. The `Stream` component is used //! to poll the underlying transport for incoming messages, and the `Sink` component //! is used to send messages to remote peers. use crate::proto; -use crate::record_priv::{self, Record}; -use asynchronous_codec::Framed; +use crate::record::{self, Record}; +use asynchronous_codec::{Decoder, Encoder, Framed}; use bytes::BytesMut; -use codec::UviBytes; use futures::prelude::*; use instant::Instant; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::StreamProtocol; -use quick_protobuf::{BytesReader, Writer}; +use std::marker::PhantomData; use std::{convert::TryFrom, time::Duration}; use std::{io, iter}; -use unsigned_varint::codec; +use tracing::debug; /// The protocol name used for negotiating with multistream-select. -pub const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); - +pub(crate) const DEFAULT_PROTO_NAME: StreamProtocol = StreamProtocol::new("/ipfs/kad/1.0.0"); /// The default maximum size for a varint length-delimited packet. -pub const DEFAULT_MAX_PACKET_SIZE: usize = 16 * 1024; - +pub(crate) const DEFAULT_MAX_PACKET_SIZE: usize = 16 * 1024; /// Status of our connection to a node reported by the Kademlia protocol. #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub enum KadConnectionType { +pub enum ConnectionType { /// Sender hasn't tried to connect to peer. NotConnected = 0, /// Sender is currently connected to peer. @@ -61,26 +58,26 @@ pub enum KadConnectionType { CannotConnect = 3, } -impl From for KadConnectionType { - fn from(raw: proto::ConnectionType) -> KadConnectionType { +impl From for ConnectionType { + fn from(raw: proto::ConnectionType) -> ConnectionType { use proto::ConnectionType::*; match raw { - NOT_CONNECTED => KadConnectionType::NotConnected, - CONNECTED => KadConnectionType::Connected, - CAN_CONNECT => KadConnectionType::CanConnect, - CANNOT_CONNECT => KadConnectionType::CannotConnect, + NOT_CONNECTED => ConnectionType::NotConnected, + CONNECTED => ConnectionType::Connected, + CAN_CONNECT => ConnectionType::CanConnect, + CANNOT_CONNECT => ConnectionType::CannotConnect, } } } -impl From for proto::ConnectionType { - fn from(val: KadConnectionType) -> Self { +impl From for proto::ConnectionType { + fn from(val: ConnectionType) -> Self { use proto::ConnectionType::*; match val { - KadConnectionType::NotConnected => NOT_CONNECTED, - KadConnectionType::Connected => CONNECTED, - KadConnectionType::CanConnect => CAN_CONNECT, - KadConnectionType::CannotConnect => CANNOT_CONNECT, + ConnectionType::NotConnected => NOT_CONNECTED, + ConnectionType::Connected => CONNECTED, + ConnectionType::CanConnect => CAN_CONNECT, + ConnectionType::CannotConnect => CANNOT_CONNECT, } } } @@ -93,7 +90,7 @@ pub struct KadPeer { /// The multiaddresses that the sender think can be used in order to reach the peer. pub multiaddrs: Vec, /// How the sender is connected to that remote. - pub connection_ty: KadConnectionType, + pub connection_ty: ConnectionType, } // Builds a `KadPeer` from a corresponding protobuf message. @@ -107,11 +104,12 @@ impl TryFrom for KadPeer { let mut addrs = Vec::with_capacity(peer.addrs.len()); for addr in peer.addrs.into_iter() { - match Multiaddr::try_from(addr) { - Ok(a) => addrs.push(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + match Multiaddr::try_from(addr).map(|addr| addr.with_p2p(node_id)) { + Ok(Ok(a)) => addrs.push(a), + Ok(Err(a)) => { + debug!("Unable to parse multiaddr: {a} is not compatible with {node_id}") } + Err(e) => debug!("Unable to parse multiaddr: {e}"), }; } @@ -139,13 +137,13 @@ impl From for proto::Peer { // only one request, then we can change the output of the `InboundUpgrade` and // `OutboundUpgrade` to be just a single message #[derive(Debug, Clone)] -pub struct KademliaProtocolConfig { +pub struct ProtocolConfig { protocol_names: Vec, /// Maximum allowed size of a packet. max_packet_size: usize, } -impl KademliaProtocolConfig { +impl ProtocolConfig { /// Returns the configured protocol name. pub fn protocol_names(&self) -> &[StreamProtocol] { &self.protocol_names @@ -163,16 +161,16 @@ impl KademliaProtocolConfig { } } -impl Default for KademliaProtocolConfig { +impl Default for ProtocolConfig { fn default() -> Self { - KademliaProtocolConfig { + ProtocolConfig { protocol_names: iter::once(DEFAULT_PROTO_NAME).collect(), max_packet_size: DEFAULT_MAX_PACKET_SIZE, } } } -impl UpgradeInfo for KademliaProtocolConfig { +impl UpgradeInfo for ProtocolConfig { type Info = StreamProtocol; type InfoIter = std::vec::IntoIter; @@ -181,7 +179,43 @@ impl UpgradeInfo for KademliaProtocolConfig { } } -impl InboundUpgrade for KademliaProtocolConfig +/// Codec for Kademlia inbound and outbound message framing. +pub struct Codec { + codec: quick_protobuf_codec::Codec, + __phantom: PhantomData<(A, B)>, +} +impl Codec { + fn new(max_packet_size: usize) -> Self { + Codec { + codec: quick_protobuf_codec::Codec::new(max_packet_size), + __phantom: PhantomData, + } + } +} + +impl, B> Encoder for Codec { + type Error = io::Error; + type Item<'a> = A; + + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + Ok(self.codec.encode(item.into(), dst)?) + } +} +impl> Decoder for Codec { + type Error = io::Error; + type Item = B; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + self.codec.decode(src)?.map(B::try_from).transpose() + } +} + +/// Sink of responses and stream of requests. +pub(crate) type KadInStreamSink = Framed>; +/// Sink of requests and stream of responses. +pub(crate) type KadOutStreamSink = Framed>; + +impl InboundUpgrade for ProtocolConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -190,36 +224,13 @@ where type Error = io::Error; fn upgrade_inbound(self, incoming: C, _: Self::Info) -> Self::Future { - use quick_protobuf::{MessageRead, MessageWrite}; - - let mut codec = UviBytes::default(); - codec.set_max_len(self.max_packet_size); - - future::ok( - Framed::new(incoming, codec) - .err_into() - .with::<_, _, fn(_) -> _, _>(|response| { - let proto_struct = resp_msg_to_proto(response); - let mut buf = Vec::with_capacity(proto_struct.get_size()); - let mut writer = Writer::new(&mut buf); - proto_struct - .write_message(&mut writer) - .expect("Encoding to succeed"); - future::ready(Ok(io::Cursor::new(buf))) - }) - .and_then::<_, fn(_) -> _>(|bytes| { - let mut reader = BytesReader::from_bytes(&bytes); - let request = match proto::Message::from_reader(&mut reader, &bytes) { - Ok(r) => r, - Err(err) => return future::ready(Err(err.into())), - }; - future::ready(proto_to_req_msg(request)) - }), - ) + let codec = Codec::new(self.max_packet_size); + + future::ok(Framed::new(incoming, codec)) } } -impl OutboundUpgrade for KademliaProtocolConfig +impl OutboundUpgrade for ProtocolConfig where C: AsyncRead + AsyncWrite + Unpin, { @@ -228,53 +239,12 @@ where type Error = io::Error; fn upgrade_outbound(self, incoming: C, _: Self::Info) -> Self::Future { - use quick_protobuf::{MessageRead, MessageWrite}; - - let mut codec = UviBytes::default(); - codec.set_max_len(self.max_packet_size); - - future::ok( - Framed::new(incoming, codec) - .err_into() - .with::<_, _, fn(_) -> _, _>(|request| { - let proto_struct = req_msg_to_proto(request); - let mut buf = Vec::with_capacity(proto_struct.get_size()); - let mut writer = Writer::new(&mut buf); - proto_struct - .write_message(&mut writer) - .expect("Encoding to succeed"); - future::ready(Ok(io::Cursor::new(buf))) - }) - .and_then::<_, fn(_) -> _>(|bytes| { - let mut reader = BytesReader::from_bytes(&bytes); - let response = match proto::Message::from_reader(&mut reader, &bytes) { - Ok(r) => r, - Err(err) => return future::ready(Err(err.into())), - }; - future::ready(proto_to_resp_msg(response)) - }), - ) + let codec = Codec::new(self.max_packet_size); + + future::ok(Framed::new(incoming, codec)) } } -/// Sink of responses and stream of requests. -pub type KadInStreamSink = KadStreamSink; - -/// Sink of requests and stream of responses. -pub type KadOutStreamSink = KadStreamSink; - -pub type KadStreamSink = stream::AndThen< - sink::With< - stream::ErrInto>>>, io::Error>, - io::Cursor>, - A, - future::Ready>, io::Error>>, - fn(A) -> future::Ready>, io::Error>>, - >, - future::Ready>, - fn(BytesMut) -> future::Ready>, ->; - /// Request that we can send to a peer or that we received from a peer. #[derive(Debug, Clone, PartialEq, Eq)] pub enum KadRequestMsg { @@ -292,13 +262,13 @@ pub enum KadRequestMsg { /// this key. GetProviders { /// Identifier being searched. - key: record_priv::Key, + key: record::Key, }, /// Indicates that this list of providers is known for this key. AddProvider { /// Key for which we should add providers. - key: record_priv::Key, + key: record::Key, /// Known provider for this key. provider: KadPeer, }, @@ -306,7 +276,7 @@ pub enum KadRequestMsg { /// Request to get a value from the dht records. GetValue { /// The key we are searching for. - key: record_priv::Key, + key: record::Key, }, /// Request to put a value into the dht records. @@ -344,12 +314,37 @@ pub enum KadResponseMsg { /// Response to a `PutValue`. PutValue { /// The key of the record. - key: record_priv::Key, + key: record::Key, /// Value of the record. value: Vec, }, } +impl From for proto::Message { + fn from(kad_msg: KadRequestMsg) -> Self { + req_msg_to_proto(kad_msg) + } +} +impl From for proto::Message { + fn from(kad_msg: KadResponseMsg) -> Self { + resp_msg_to_proto(kad_msg) + } +} +impl TryFrom for KadRequestMsg { + type Error = io::Error; + + fn try_from(message: proto::Message) -> Result { + proto_to_req_msg(message) + } +} +impl TryFrom for KadResponseMsg { + type Error = io::Error; + + fn try_from(message: proto::Message) -> Result { + proto_to_resp_msg(message) + } +} + /// Converts a `KadRequestMsg` into the corresponding protobuf message for sending. fn req_msg_to_proto(kad_msg: KadRequestMsg) -> proto::Message { match kad_msg { @@ -448,11 +443,11 @@ fn proto_to_req_msg(message: proto::Message) -> Result Ok(KadRequestMsg::PutValue { record }) } proto::MessageType::GET_VALUE => Ok(KadRequestMsg::GetValue { - key: record_priv::Key::from(message.key), + key: record::Key::from(message.key), }), proto::MessageType::FIND_NODE => Ok(KadRequestMsg::FindNode { key: message.key }), proto::MessageType::GET_PROVIDERS => Ok(KadRequestMsg::GetProviders { - key: record_priv::Key::from(message.key), + key: record::Key::from(message.key), }), proto::MessageType::ADD_PROVIDER => { // TODO: for now we don't parse the peer properly, so it is possible that we get @@ -464,7 +459,7 @@ fn proto_to_req_msg(message: proto::Message) -> Result .find_map(|peer| KadPeer::try_from(peer).ok()); if let Some(provider) = provider { - let key = record_priv::Key::from(message.key); + let key = record::Key::from(message.key); Ok(KadRequestMsg::AddProvider { key, provider }) } else { Err(invalid_data("AddProvider message with no valid peer.")) @@ -528,7 +523,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result { - let key = record_priv::Key::from(message.key); + let key = record::Key::from(message.key); let rec = message .record .ok_or_else(|| invalid_data("received PutValue message with no record"))?; @@ -546,7 +541,7 @@ fn proto_to_resp_msg(message: proto::Message) -> Result Result { - let key = record_priv::Key::from(record.key); + let key = record::Key::from(record.key); let value = record.value; let publisher = if !record.publisher.is_empty() { @@ -603,10 +598,34 @@ where mod tests { use super::*; + #[test] + fn append_p2p() { + let peer_id = PeerId::random(); + let multiaddr = "/ip6/2001:db8::/tcp/1234".parse::().unwrap(); + + let payload = proto::Peer { + id: peer_id.to_bytes(), + addrs: vec![multiaddr.to_vec()], + connection: proto::ConnectionType::CAN_CONNECT, + }; + + let peer = KadPeer::try_from(payload).unwrap(); + + assert_eq!(peer.multiaddrs, vec![multiaddr.with_p2p(peer_id).unwrap()]) + } + #[test] fn skip_invalid_multiaddr() { - let valid_multiaddr: Multiaddr = "/ip6/2001:db8::/tcp/1234".parse().unwrap(); - let valid_multiaddr_bytes = valid_multiaddr.to_vec(); + let peer_id = PeerId::random(); + let multiaddr = "/ip6/2001:db8::/tcp/1234".parse::().unwrap(); + + let valid_multiaddr = multiaddr.clone().with_p2p(peer_id).unwrap(); + + let multiaddr_with_incorrect_peer_id = { + let other_peer_id = PeerId::random(); + assert_ne!(peer_id, other_peer_id); + multiaddr.with_p2p(other_peer_id).unwrap() + }; let invalid_multiaddr = { let a = vec![255; 8]; @@ -615,12 +634,16 @@ mod tests { }; let payload = proto::Peer { - id: PeerId::random().to_bytes(), - addrs: vec![valid_multiaddr_bytes, invalid_multiaddr], + id: peer_id.to_bytes(), + addrs: vec![ + valid_multiaddr.to_vec(), + multiaddr_with_incorrect_peer_id.to_vec(), + invalid_multiaddr, + ], connection: proto::ConnectionType::CAN_CONNECT, }; - let peer = KadPeer::try_from(payload).expect("not to fail"); + let peer = KadPeer::try_from(payload).unwrap(); assert_eq!(peer.multiaddrs, vec![valid_multiaddr]) } @@ -631,7 +654,7 @@ mod tests { use futures::{Future, Sink, Stream}; use libp2p_core::{PeerId, PublicKey, Transport}; use multihash::{encode, Hash}; - use protocol::{KadConnectionType, KadPeer, KademliaProtocolConfig}; + use protocol::{ConnectionType, KadPeer, ProtocolConfig}; use std::sync::mpsc; use std::thread; @@ -648,7 +671,7 @@ mod tests { closer_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }], }); test_one(KadMsg::GetProvidersReq { @@ -658,12 +681,12 @@ mod tests { closer_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/100.101.102.103/tcp/20105".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }], provider_peers: vec![KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/200.201.202.203/tcp/1999".parse().unwrap()], - connection_ty: KadConnectionType::NotConnected, + connection_ty: ConnectionType::NotConnected, }], }); test_one(KadMsg::AddProvider { @@ -671,7 +694,7 @@ mod tests { provider_peer: KadPeer { node_id: PeerId::random(), multiaddrs: vec!["/ip4/9.1.2.3/udp/23".parse().unwrap()], - connection_ty: KadConnectionType::Connected, + connection_ty: ConnectionType::Connected, }, }); // TODO: all messages @@ -681,7 +704,7 @@ mod tests { let (tx, rx) = mpsc::channel(); let bg_thread = thread::spawn(move || { - let transport = TcpTransport::default().with_upgrade(KademliaProtocolConfig); + let transport = TcpTransport::default().with_upgrade(ProtocolConfig); let (listener, addr) = transport .listen_on( "/ip4/127.0.0.1/tcp/0".parse().unwrap()) @@ -701,7 +724,7 @@ mod tests { let _ = rt.block_on(future).unwrap(); }); - let transport = TcpTransport::default().with_upgrade(KademliaProtocolConfig); + let transport = TcpTransport::default().with_upgrade(ProtocolConfig); let future = transport .dial(rx.recv().unwrap()) diff --git a/protocols/kad/src/query.rs b/protocols/kad/src/query.rs index d06b4920404..bb240d5864a 100644 --- a/protocols/kad/src/query.rs +++ b/protocols/kad/src/query.rs @@ -26,7 +26,7 @@ use peers::closest::{ use peers::fixed::FixedPeersIter; use peers::PeersIterState; -use crate::kbucket_priv::{Key, KeyBytes}; +use crate::kbucket::{Key, KeyBytes}; use crate::{ALPHA_VALUE, K_VALUE}; use either::Either; use fnv::FnvHashMap; @@ -225,24 +225,30 @@ impl QueryPool { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct QueryId(usize); +impl std::fmt::Display for QueryId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// The configuration for queries in a `QueryPool`. #[derive(Debug, Clone)] pub(crate) struct QueryConfig { /// Timeout of a single query. /// - /// See [`crate::behaviour::KademliaConfig::set_query_timeout`] for details. + /// See [`crate::behaviour::Config::set_query_timeout`] for details. pub(crate) timeout: Duration, /// The replication factor to use. /// - /// See [`crate::behaviour::KademliaConfig::set_replication_factor`] for details. + /// See [`crate::behaviour::Config::set_replication_factor`] for details. pub(crate) replication_factor: NonZeroUsize, /// Allowed level of parallelism for iterative queries. /// - /// See [`crate::behaviour::KademliaConfig::set_parallelism`] for details. + /// See [`crate::behaviour::Config::set_parallelism`] for details. pub(crate) parallelism: NonZeroUsize, /// Whether to use disjoint paths on iterative lookups. /// - /// See [`crate::behaviour::KademliaConfig::disjoint_query_paths`] for details. + /// See [`crate::behaviour::Config::disjoint_query_paths`] for details. pub(crate) disjoint_query_paths: bool, } @@ -326,15 +332,6 @@ impl Query { } } - /// Checks whether the query is currently waiting for a result from `peer`. - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - match &self.peer_iter { - QueryPeerIter::Closest(iter) => iter.is_waiting(peer), - QueryPeerIter::ClosestDisjoint(iter) => iter.is_waiting(peer), - QueryPeerIter::Fixed(iter) => iter.is_waiting(peer), - } - } - /// Advances the state of the underlying peer iterator. fn next(&mut self, now: Instant) -> PeersIterState<'_> { let state = match &mut self.peer_iter { diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 66ea9d9ce52..dc913f1bbca 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -20,7 +20,7 @@ use super::*; -use crate::kbucket_priv::{Distance, Key, KeyBytes}; +use crate::kbucket::{Distance, Key, KeyBytes}; use crate::{ALPHA_VALUE, K_VALUE}; use instant::Instant; use libp2p_identity::PeerId; @@ -175,10 +175,14 @@ impl ClosestPeersIter { }, } - let num_closest = self.closest_peers.len(); - let mut progress = false; - // Incorporate the reported closer peers into the iterator. + // + // The iterator makes progress if: + // 1, the iterator did not yet accumulate enough closest peers. + // OR + // 2, any of the new peers is closer to the target than any peer seen so far + // (i.e. is the first entry after being incorporated) + let mut progress = self.closest_peers.len() < self.config.num_results.get(); for peer in closer_peers { let key = peer.into(); let distance = self.target.distance(&key); @@ -187,11 +191,8 @@ impl ClosestPeersIter { state: PeerState::NotContacted, }; self.closest_peers.entry(distance).or_insert(peer); - // The iterator makes progress if the new peer is either closer to the target - // than any peer seen so far (i.e. is the first entry), or the iterator did - // not yet accumulate enough closest peers. - progress = self.closest_peers.keys().next() == Some(&distance) - || num_closest < self.config.num_results.get(); + + progress = self.closest_peers.keys().next() == Some(&distance) || progress; } // Update the iterator state. @@ -788,6 +789,7 @@ mod tests { QuickCheck::new().tests(10).quickcheck(prop as fn(_)) } + #[test] fn stalled_at_capacity() { fn prop(mut iter: ClosestPeersIter) { iter.state = State::Stalled; diff --git a/protocols/kad/src/query/peers/closest/disjoint.rs b/protocols/kad/src/query/peers/closest/disjoint.rs index 2ea484ed43c..68721f93d7c 100644 --- a/protocols/kad/src/query/peers/closest/disjoint.rs +++ b/protocols/kad/src/query/peers/closest/disjoint.rs @@ -19,7 +19,7 @@ // DEALINGS IN THE SOFTWARE. use super::*; -use crate::kbucket_priv::{Key, KeyBytes}; +use crate::kbucket::{Key, KeyBytes}; use instant::Instant; use libp2p_identity::PeerId; use std::{ @@ -31,7 +31,6 @@ use std::{ /// Wraps around a set of [`ClosestPeersIter`], enforcing a disjoint discovery /// path per configured parallelism according to the S/Kademlia paper. pub(crate) struct ClosestDisjointPeersIter { - config: ClosestPeersIterConfig, target: KeyBytes, /// The set of wrapped [`ClosestPeersIter`]. @@ -51,6 +50,7 @@ pub(crate) struct ClosestDisjointPeersIter { impl ClosestDisjointPeersIter { /// Creates a new iterator with a default configuration. + #[cfg(test)] pub(crate) fn new(target: KeyBytes, known_closest_peers: I) -> Self where I: IntoIterator>, @@ -88,7 +88,6 @@ impl ClosestDisjointPeersIter { let iters_len = iters.len(); ClosestDisjointPeersIter { - config, target: target.into(), iters, iter_order: (0..iters_len) @@ -190,10 +189,6 @@ impl ClosestDisjointPeersIter { updated } - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - self.iters.iter().any(|i| i.is_waiting(peer)) - } - pub(crate) fn next(&mut self, now: Instant) -> PeersIterState<'_> { let mut state = None; @@ -411,9 +406,8 @@ impl>> Iterator for ResultIter { .iter_mut() // Find the iterator with the next closest peer. .fold(Option::<&mut Peekable<_>>::None, |iter_a, iter_b| { - let iter_a = match iter_a { - Some(iter_a) => iter_a, - None => return Some(iter_b), + let Some(iter_a) = iter_a else { + return Some(iter_b); }; match (iter_a.peek(), iter_b.peek()) { diff --git a/protocols/kad/src/query/peers/fixed.rs b/protocols/kad/src/query/peers/fixed.rs index 1169feee87f..50a969380a3 100644 --- a/protocols/kad/src/query/peers/fixed.rs +++ b/protocols/kad/src/query/peers/fixed.rs @@ -115,10 +115,6 @@ impl FixedPeersIter { false } - pub(crate) fn is_waiting(&self, peer: &PeerId) -> bool { - self.peers.get(peer) == Some(&PeerState::Waiting) - } - pub(crate) fn finish(&mut self) { if let State::Waiting { .. } = self.state { self.state = State::Finished diff --git a/protocols/kad/src/record_priv.rs b/protocols/kad/src/record.rs similarity index 97% rename from protocols/kad/src/record_priv.rs rename to protocols/kad/src/record.rs index 2abe32d5894..4eb8e861c6f 100644 --- a/protocols/kad/src/record_priv.rs +++ b/protocols/kad/src/record.rs @@ -66,8 +66,8 @@ impl From> for Key { } } -impl From for Key { - fn from(m: Multihash) -> Key { +impl From> for Key { + fn from(m: Multihash) -> Key { Key::from(m.to_bytes()) } } @@ -168,7 +168,7 @@ mod tests { impl Arbitrary for Key { fn arbitrary(g: &mut Gen) -> Key { let hash: [u8; 32] = core::array::from_fn(|_| u8::arbitrary(g)); - Key::from(Multihash::wrap(SHA_256_MH, &hash).unwrap()) + Key::from(Multihash::<64>::wrap(SHA_256_MH, &hash).unwrap()) } } diff --git a/protocols/kad/src/record_priv/store.rs b/protocols/kad/src/record/store.rs similarity index 100% rename from protocols/kad/src/record_priv/store.rs rename to protocols/kad/src/record/store.rs diff --git a/protocols/kad/src/record_priv/store/memory.rs b/protocols/kad/src/record/store/memory.rs similarity index 94% rename from protocols/kad/src/record_priv/store/memory.rs rename to protocols/kad/src/record/store/memory.rs index 40ad4405873..edeae188ac6 100644 --- a/protocols/kad/src/record_priv/store/memory.rs +++ b/protocols/kad/src/record/store/memory.rs @@ -20,7 +20,7 @@ use super::*; -use crate::kbucket_priv; +use crate::kbucket; use libp2p_identity::PeerId; use smallvec::SmallVec; use std::borrow::Cow; @@ -30,7 +30,7 @@ use std::iter; /// In-memory implementation of a `RecordStore`. pub struct MemoryStore { /// The identity of the peer owning the store. - local_key: kbucket_priv::Key, + local_key: kbucket::Key, /// The configuration of the store. config: MemoryStoreConfig, /// The stored (regular) records. @@ -79,7 +79,7 @@ impl MemoryStore { /// Creates a new `MemoryRecordStore` with the given configuration. pub fn with_config(local_id: PeerId, config: MemoryStoreConfig) -> Self { MemoryStore { - local_key: kbucket_priv::Key::from(local_id), + local_key: kbucket::Key::from(local_id), config, records: HashMap::default(), provided: HashSet::default(), @@ -160,10 +160,10 @@ impl RecordStore for MemoryStore { } else { // It is a new provider record for that key. let local_key = self.local_key.clone(); - let key = kbucket_priv::Key::new(record.key.clone()); - let provider = kbucket_priv::Key::from(record.provider); + let key = kbucket::Key::new(record.key.clone()); + let provider = kbucket::Key::from(record.provider); if let Some(i) = providers.iter().position(|p| { - let pk = kbucket_priv::Key::from(p.provider); + let pk = kbucket::Key::from(p.provider); provider.distance(&key) < pk.distance(&key) }) { // Insert the new provider. @@ -221,12 +221,12 @@ mod tests { use quickcheck::*; use rand::Rng; - fn random_multihash() -> Multihash { + fn random_multihash() -> Multihash<64> { Multihash::wrap(SHA_256_MH, &rand::thread_rng().gen::<[u8; 32]>()).unwrap() } - fn distance(r: &ProviderRecord) -> kbucket_priv::Distance { - kbucket_priv::Key::new(r.key.clone()).distance(&kbucket_priv::Key::from(r.provider)) + fn distance(r: &ProviderRecord) -> kbucket::Distance { + kbucket::Key::new(r.key.clone()).distance(&kbucket::Key::from(r.provider)) } #[test] @@ -255,7 +255,7 @@ mod tests { #[test] fn providers_ordered_by_distance_to_key() { - fn prop(providers: Vec>) -> bool { + fn prop(providers: Vec>) -> bool { let mut store = MemoryStore::new(PeerId::random()); let key = Key::from(random_multihash()); diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs new file mode 100644 index 00000000000..f290a36b727 --- /dev/null +++ b/protocols/kad/tests/client_mode.rs @@ -0,0 +1,186 @@ +use libp2p_identify as identify; +use libp2p_identity as identity; +use libp2p_kad::store::MemoryStore; +use libp2p_kad::{Behaviour, Config, Event, Mode}; +use libp2p_swarm::{Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; +use Event::*; +use MyBehaviourEvent::*; + +#[async_std::test] +async fn server_gets_added_to_routing_table_by_client() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut client = Swarm::new_ephemeral(MyBehaviour::new); + let mut server = Swarm::new_ephemeral(MyBehaviour::new); + + server.listen().with_memory_addr_external().await; + client.connect(&mut server).await; + + let server_peer_id = *server.local_peer_id(); + async_std::task::spawn(server.loop_on_next()); + + let peer = client + .wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }) + .await; + + assert_eq!(peer, server_peer_id); +} + +#[async_std::test] +async fn two_servers_add_each_other_to_routing_table() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); + let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); + + server2.listen().with_memory_addr_external().await; + server1.connect(&mut server2).await; + + let server1_peer_id = *server1.local_peer_id(); + let server2_peer_id = *server2.local_peer_id(); + + match libp2p_swarm_test::drive(&mut server1, &mut server2).await { + ( + [Identify(_), Identify(_), Kad(RoutingUpdated { peer: peer1, .. })] + | [Identify(_), Kad(RoutingUpdated { peer: peer1, .. }), Identify(_)], + [Identify(_), Identify(_)], + ) => { + assert_eq!(peer1, server2_peer_id); + } + other => panic!("Unexpected events: {other:?}"), + } + + server1.listen().with_memory_addr_external().await; + server2.connect(&mut server1).await; + + async_std::task::spawn(server1.loop_on_next()); + + let peer = server2 + .wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }) + .await; + + assert_eq!(peer, server1_peer_id); +} + +#[async_std::test] +async fn adding_an_external_addresses_activates_server_mode_on_existing_connections() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut client = Swarm::new_ephemeral(MyBehaviour::new); + let mut server = Swarm::new_ephemeral(MyBehaviour::new); + let server_peer_id = *server.local_peer_id(); + + let (memory_addr, _) = server.listen().await; + + client.dial(memory_addr.clone()).unwrap(); + + // Do the usual identify send/receive dance. + match libp2p_swarm_test::drive(&mut client, &mut server).await { + ([Identify(_), Identify(_)], [Identify(_), Identify(_)]) => {} + other => panic!("Unexpected events: {other:?}"), + } + + // Server learns its external address (this could be through AutoNAT or some other mechanism). + server.add_external_address(memory_addr); + + // The server reconfigured its connection to the client to be in server mode, pushes that information to client which as a result updates its routing table and triggers a mode change to Mode::Server. + match libp2p_swarm_test::drive(&mut client, &mut server).await { + ( + [Identify(identify::Event::Received { .. }), Kad(RoutingUpdated { peer: peer1, .. })], + [Kad(ModeChanged { new_mode }), Identify(identify::Event::Pushed { .. })], + ) => { + assert_eq!(new_mode, Mode::Server); + assert_eq!(peer1, server_peer_id); + } + other => panic!("Unexpected events: {other:?}"), + } +} + +#[async_std::test] +async fn set_client_to_server_mode() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let mut client = Swarm::new_ephemeral(MyBehaviour::new); + client.behaviour_mut().kad.set_mode(Some(Mode::Client)); + + let mut server = Swarm::new_ephemeral(MyBehaviour::new); + + server.listen().with_memory_addr_external().await; + client.connect(&mut server).await; + + let server_peer_id = *server.local_peer_id(); + + let client_event = client.wait(|e| match e { + SwarmEvent::Behaviour(Kad(RoutingUpdated { peer, .. })) => Some(peer), + _ => None, + }); + let server_event = server.wait(|e| match e { + SwarmEvent::Behaviour(Identify(identify::Event::Received { info, .. })) => Some(info), + _ => None, + }); + + let (peer, info) = futures::future::join(client_event, server_event).await; + + assert_eq!(peer, server_peer_id); + assert!(info + .protocols + .iter() + .all(|proto| libp2p_kad::PROTOCOL_NAME.ne(proto))); + + client.behaviour_mut().kad.set_mode(Some(Mode::Server)); + + async_std::task::spawn(client.loop_on_next()); + + let info = server + .wait(|e| match e { + SwarmEvent::Behaviour(Identify(identify::Event::Received { info, .. })) => Some(info), + _ => None, + }) + .await; + + assert!(info + .protocols + .iter() + .any(|proto| libp2p_kad::PROTOCOL_NAME.eq(proto))); +} + +#[derive(libp2p_swarm::NetworkBehaviour)] +#[behaviour(prelude = "libp2p_swarm::derive_prelude")] +struct MyBehaviour { + identify: identify::Behaviour, + kad: Behaviour, +} + +impl MyBehaviour { + fn new(k: identity::Keypair) -> Self { + let local_peer_id = k.public().to_peer_id(); + + Self { + identify: identify::Behaviour::new(identify::Config::new( + "/test/1.0.0".to_owned(), + k.public(), + )), + kad: Behaviour::with_config( + local_peer_id, + MemoryStore::new(local_peer_id), + Config::default(), + ), + } + } +} diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 661f9779943..cfd02232b07 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,12 +1,26 @@ -## 0.44.0 - unreleased +## 0.45.1 + +- Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. + See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). +- Fix a bug in the `Behaviour::poll` method causing missed mdns packets. + See [PR 4861](https://github.com/libp2p/rust-libp2p/pull/4861). + +## 0.45.0 + +- Don't perform IO in `Behaviour::poll`. + See [PR 4623](https://github.com/libp2p/rust-libp2p/pull/4623). + +## 0.44.0 - Change `mdns::Event` to hold `Vec` and remove `DiscoveredAddrsIter` and `ExpiredAddrsIter`. See [PR 3621]. - + - Raise MSRV to 1.65. See [PR 3715]. - Remove deprecated `Mdns` prefixed items. See [PR 3699]. +- Faster peer discovery with adaptive initial interval. See [PR 3975]. +[PR 3975]: https://github.com/libp2p/rust-libp2p/pull/3975 [PR 3621]: https://github.com/libp2p/rust-libp2p/pull/3621 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3699]: https://github.com/libp2p/rust-libp2p/pull/3699 diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index c7236a8c9d6..b18c87dcc71 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -2,7 +2,7 @@ name = "libp2p-mdns" edition = "2021" rust-version = { workspace = true } -version = "0.44.0" +version = "0.45.1" description = "Implementation of the libp2p mDNS discovery method" authors = ["Parity Technologies "] license = "MIT" @@ -11,34 +11,35 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "1.13.0", optional = true } -data-encoding = "2.3.2" -futures = "0.3.28" -if-watch = "3.0.1" +async-std = { version = "1.12.0", optional = true } +async-io = { version = "2.2.2", optional = true } +data-encoding = "2.5.0" +futures = "0.3.30" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.14" rand = "0.8.3" -smallvec = "1.6.1" -socket2 = { version = "0.5.2", features = ["all"] } -tokio = { version = "1.28", default-features = false, features = ["net", "time"], optional = true} -trust-dns-proto = { version = "0.22.0", default-features = false, features = ["mdns", "tokio-runtime"] } +smallvec = "1.11.2" +socket2 = { version = "0.5.5", features = ["all"] } +tokio = { version = "1.35", default-features = false, features = ["net", "time"], optional = true} +tracing = "0.1.37" +hickory-proto = { version = "0.24.0", default-features = false, features = ["mdns"] } void = "1.0.2" [features] tokio = ["dep:tokio", "if-watch/tokio"] -async-io = ["dep:async-io", "if-watch/smol"] +async-io = ["dep:async-io", "dep:async-std", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } libp2p-yamux = { workspace = true } -tokio = { version = "1.28", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } -libp2p-swarm-test = { workspace = true } +tokio = { version = "1.35", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } +libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "use-async-std" @@ -55,3 +56,6 @@ required-features = ["tokio"] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index 5186ce91cb7..4e3533f26ab 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -25,17 +25,20 @@ mod timer; use self::iface::InterfaceState; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::Config; -use futures::Stream; +use futures::channel::mpsc; +use futures::{Stream, StreamExt}; use if_watch::IfEvent; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionId, ListenAddresses, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; use std::collections::hash_map::{Entry, HashMap}; +use std::future::Future; +use std::sync::{Arc, RwLock}; use std::{cmp, fmt, io, net::IpAddr, pin::Pin, task::Context, task::Poll, time::Instant}; /// An abstraction to allow for compatibility with various async runtimes. @@ -47,16 +50,27 @@ pub trait Provider: 'static { /// The IfWatcher type. type Watcher: Stream> + fmt::Debug + Unpin; + type TaskHandle: Abort; + /// Create a new instance of the `IfWatcher` type. fn new_watcher() -> Result; + + fn spawn(task: impl Future + Send + 'static) -> Self::TaskHandle; +} + +#[allow(unreachable_pub)] // Not re-exported. +pub trait Abort { + fn abort(self); } /// The type of a [`Behaviour`] using the `async-io` implementation. #[cfg(feature = "async-io")] pub mod async_io { use super::Provider; - use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer}; + use crate::behaviour::{socket::asio::AsyncUdpSocket, timer::asio::AsyncTimer, Abort}; + use async_std::task::JoinHandle; use if_watch::smol::IfWatcher; + use std::future::Future; #[doc(hidden)] pub enum AsyncIo {} @@ -65,10 +79,21 @@ pub mod async_io { type Socket = AsyncUdpSocket; type Timer = AsyncTimer; type Watcher = IfWatcher; + type TaskHandle = JoinHandle<()>; fn new_watcher() -> Result { IfWatcher::new() } + + fn spawn(task: impl Future + Send + 'static) -> JoinHandle<()> { + async_std::task::spawn(task) + } + } + + impl Abort for JoinHandle<()> { + fn abort(self) { + async_std::task::spawn(self.cancel()); + } } pub type Behaviour = super::Behaviour; @@ -78,8 +103,10 @@ pub mod async_io { #[cfg(feature = "tokio")] pub mod tokio { use super::Provider; - use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer}; + use crate::behaviour::{socket::tokio::TokioUdpSocket, timer::tokio::TokioTimer, Abort}; use if_watch::tokio::IfWatcher; + use std::future::Future; + use tokio::task::JoinHandle; #[doc(hidden)] pub enum Tokio {} @@ -88,10 +115,21 @@ pub mod tokio { type Socket = TokioUdpSocket; type Timer = TokioTimer; type Watcher = IfWatcher; + type TaskHandle = JoinHandle<()>; fn new_watcher() -> Result { IfWatcher::new() } + + fn spawn(task: impl Future + Send + 'static) -> Self::TaskHandle { + tokio::spawn(task) + } + } + + impl Abort for JoinHandle<()> { + fn abort(self) { + JoinHandle::abort(&self) + } } pub type Behaviour = super::Behaviour; @@ -110,8 +148,11 @@ where /// Iface watcher. if_watch: P::Watcher, - /// Mdns interface states. - iface_states: HashMap>, + /// Handles to tasks running the mDNS queries. + if_tasks: HashMap, + + query_response_receiver: mpsc::Receiver<(PeerId, Multiaddr, Instant)>, + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, /// List of nodes that we have discovered, the address, and when their TTL expires. /// @@ -124,7 +165,11 @@ where /// `None` if `discovered_nodes` is empty. closest_expiration: Option, - listen_addresses: ListenAddresses, + /// The current set of listen addresses. + /// + /// This is shared across all interface tasks using an [`RwLock`]. + /// The [`Behaviour`] updates this upon new [`FromSwarm`] events where as [`InterfaceState`]s read from it to answer inbound mDNS queries. + listen_addresses: Arc>, local_peer_id: PeerId, } @@ -135,10 +180,14 @@ where { /// Builds a new `Mdns` behaviour. pub fn new(config: Config, local_peer_id: PeerId) -> io::Result { + let (tx, rx) = mpsc::channel(10); // Chosen arbitrarily. + Ok(Self { config, if_watch: P::new_watcher()?, - iface_states: Default::default(), + if_tasks: Default::default(), + query_response_receiver: rx, + query_response_sender: tx, discovered_nodes: Default::default(), closest_expiration: Default::default(), listen_addresses: Default::default(), @@ -147,6 +196,7 @@ where } /// Returns true if the given `PeerId` is in the list of nodes discovered through mDNS. + #[deprecated(note = "Use `discovered_nodes` iterator instead.")] pub fn has_node(&self, peer_id: &PeerId) -> bool { self.discovered_nodes().any(|p| p == peer_id) } @@ -157,6 +207,7 @@ where } /// Expires a node before the ttl. + #[deprecated(note = "Unused API. Will be removed in the next release.")] pub fn expire_node(&mut self, peer_id: &PeerId) { let now = Instant::now(); for (peer, _addr, expires) in &mut self.discovered_nodes { @@ -173,7 +224,7 @@ where P: Provider, { type ConnectionHandler = dummy::ConnectionHandler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -224,35 +275,18 @@ where void::unreachable(ev) } - fn on_swarm_event(&mut self, event: FromSwarm) { - self.listen_addresses.on_swarm_event(&event); - - match event { - FromSwarm::NewListener(_) => { - log::trace!("waking interface state because listening address changed"); - for iface in self.iface_states.values_mut() { - iface.fire_timer(); - } - } - FromSwarm::ConnectionClosed(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::DialFailure(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } + fn on_swarm_event(&mut self, event: FromSwarm) { + self.listen_addresses + .write() + .unwrap_or_else(|e| e.into_inner()) + .on_swarm_event(&event); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { // Poll ifwatch. while let Poll::Ready(Some(event)) = Pin::new(&mut self.if_watch).poll_next(cx) { match event { @@ -266,43 +300,52 @@ where { continue; } - if let Entry::Vacant(e) = self.iface_states.entry(addr) { - match InterfaceState::new(addr, self.config.clone(), self.local_peer_id) { + if let Entry::Vacant(e) = self.if_tasks.entry(addr) { + match InterfaceState::::new( + addr, + self.config.clone(), + self.local_peer_id, + self.listen_addresses.clone(), + self.query_response_sender.clone(), + ) { Ok(iface_state) => { - e.insert(iface_state); + e.insert(P::spawn(iface_state)); + } + Err(err) => { + tracing::error!("failed to create `InterfaceState`: {}", err) } - Err(err) => log::error!("failed to create `InterfaceState`: {}", err), } } } Ok(IfEvent::Down(inet)) => { - if self.iface_states.contains_key(&inet.addr()) { - log::info!("dropping instance {}", inet.addr()); - self.iface_states.remove(&inet.addr()); + if let Some(handle) = self.if_tasks.remove(&inet.addr()) { + tracing::info!(instance=%inet.addr(), "dropping instance"); + + handle.abort(); } } - Err(err) => log::error!("if watch returned an error: {}", err), + Err(err) => tracing::error!("if watch returned an error: {}", err), } } // Emit discovered event. let mut discovered = Vec::new(); - for iface_state in self.iface_states.values_mut() { - while let Poll::Ready((peer, addr, expiration)) = - iface_state.poll(cx, &self.listen_addresses) + + while let Poll::Ready(Some((peer, addr, expiration))) = + self.query_response_receiver.poll_next_unpin(cx) + { + if let Some((_, _, cur_expires)) = self + .discovered_nodes + .iter_mut() + .find(|(p, a, _)| *p == peer && *a == addr) { - if let Some((_, _, cur_expires)) = self - .discovered_nodes - .iter_mut() - .find(|(p, a, _)| *p == peer && *a == addr) - { - *cur_expires = cmp::max(*cur_expires, expiration); - } else { - log::info!("discovered: {} {}", peer, addr); - self.discovered_nodes.push((peer, addr.clone(), expiration)); - discovered.push((peer, addr)); - } + *cur_expires = cmp::max(*cur_expires, expiration); + } else { + tracing::info!(%peer, address=%addr, "discovered peer on address"); + self.discovered_nodes.push((peer, addr.clone(), expiration)); + discovered.push((peer, addr)); } } + if !discovered.is_empty() { let event = Event::Discovered(discovered); return Poll::Ready(ToSwarm::GenerateEvent(event)); @@ -313,7 +356,7 @@ where let mut expired = Vec::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { - log::info!("expired: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "expired peer on address"); expired.push((*peer, addr.clone())); return false; } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 0f556e1b237..9302065cde2 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -25,10 +25,14 @@ use self::dns::{build_query, build_query_response, build_service_discovery_respo use self::query::MdnsPacket; use crate::behaviour::{socket::AsyncSocket, timer::Builder}; use crate::Config; +use futures::channel::mpsc; +use futures::{SinkExt, StreamExt}; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::ListenAddresses; use socket2::{Domain, Socket, Type}; +use std::future::Future; +use std::sync::{Arc, RwLock}; use std::{ collections::VecDeque, io, @@ -38,6 +42,30 @@ use std::{ time::{Duration, Instant}, }; +/// Initial interval for starting probe +const INITIAL_TIMEOUT_INTERVAL: Duration = Duration::from_millis(500); + +#[derive(Debug, Clone)] +enum ProbeState { + Probing(Duration), + Finished(Duration), +} + +impl Default for ProbeState { + fn default() -> Self { + ProbeState::Probing(INITIAL_TIMEOUT_INTERVAL) + } +} + +impl ProbeState { + fn interval(&self) -> &Duration { + match self { + ProbeState::Probing(query_interval) => query_interval, + ProbeState::Finished(query_interval) => query_interval, + } + } +} + /// An mDNS instance for a networking interface. To discover all peers when having multiple /// interfaces an [`InterfaceState`] is required for each interface. #[derive(Debug)] @@ -48,6 +76,11 @@ pub(crate) struct InterfaceState { recv_socket: U, /// Send socket. send_socket: U, + + listen_addresses: Arc>, + + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, + /// Buffer used for receiving data from the main socket. /// RFC6762 discourages packets larger than the interface MTU, but allows sizes of up to 9000 /// bytes, if it can be ensured that all participating devices can handle such large packets. @@ -67,7 +100,7 @@ pub(crate) struct InterfaceState { discovered: VecDeque<(PeerId, Multiaddr, Instant)>, /// TTL ttl: Duration, - + probe_state: ProbeState, local_peer_id: PeerId, } @@ -77,8 +110,14 @@ where T: Builder + futures::Stream, { /// Builds a new [`InterfaceState`]. - pub(crate) fn new(addr: IpAddr, config: Config, local_peer_id: PeerId) -> io::Result { - log::info!("creating instance on iface {}", addr); + pub(crate) fn new( + addr: IpAddr, + config: Config, + local_peer_id: PeerId, + listen_addresses: Arc>, + query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, + ) -> io::Result { + tracing::info!(address=%addr, "creating instance on iface address"); let recv_socket = match addr { IpAddr::V4(addr) => { let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(socket2::Protocol::UDP))?; @@ -130,116 +169,161 @@ where addr, recv_socket, send_socket, + listen_addresses, + query_response_sender, recv_buffer: [0; 4096], send_buffer: Default::default(), discovered: Default::default(), query_interval, - timeout: T::interval_at(Instant::now(), query_interval), + timeout: T::interval_at(Instant::now(), INITIAL_TIMEOUT_INTERVAL), multicast_addr, ttl: config.ttl, + probe_state: Default::default(), local_peer_id, }) } pub(crate) fn reset_timer(&mut self) { - self.timeout = T::interval(self.query_interval); + tracing::trace!(address=%self.addr, probe_state=?self.probe_state, "reset timer"); + let interval = *self.probe_state.interval(); + self.timeout = T::interval(interval); } - pub(crate) fn fire_timer(&mut self) { - self.timeout = T::interval_at(Instant::now(), self.query_interval); + fn mdns_socket(&self) -> SocketAddr { + SocketAddr::new(self.multicast_addr, 5353) } +} + +impl Future for InterfaceState +where + U: AsyncSocket, + T: Builder + futures::Stream, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); - pub(crate) fn poll( - &mut self, - cx: &mut Context, - listen_addresses: &ListenAddresses, - ) -> Poll<(PeerId, Multiaddr, Instant)> { loop { // 1st priority: Low latency: Create packet ASAP after timeout. - if Pin::new(&mut self.timeout).poll_next(cx).is_ready() { - log::trace!("sending query on iface {}", self.addr); - self.send_buffer.push_back(build_query()); + if this.timeout.poll_next_unpin(cx).is_ready() { + tracing::trace!(address=%this.addr, "sending query on iface"); + this.send_buffer.push_back(build_query()); + tracing::trace!(address=%this.addr, probe_state=?this.probe_state, "tick"); + + // Stop to probe when the initial interval reach the query interval + if let ProbeState::Probing(interval) = this.probe_state { + let interval = interval * 2; + this.probe_state = if interval >= this.query_interval { + ProbeState::Finished(this.query_interval) + } else { + ProbeState::Probing(interval) + }; + } + + this.reset_timer(); } // 2nd priority: Keep local buffers small: Send packets to remote. - if let Some(packet) = self.send_buffer.pop_front() { - match Pin::new(&mut self.send_socket).poll_write( - cx, - &packet, - SocketAddr::new(self.multicast_addr, 5353), - ) { + if let Some(packet) = this.send_buffer.pop_front() { + match this.send_socket.poll_write(cx, &packet, this.mdns_socket()) { Poll::Ready(Ok(_)) => { - log::trace!("sent packet on iface {}", self.addr); + tracing::trace!(address=%this.addr, "sent packet on iface address"); continue; } Poll::Ready(Err(err)) => { - log::error!("error sending packet on iface {} {}", self.addr, err); + tracing::error!(address=%this.addr, "error sending packet on iface address {}", err); continue; } Poll::Pending => { - self.send_buffer.push_front(packet); + this.send_buffer.push_front(packet); } } } // 3rd priority: Keep local buffers small: Return discovered addresses. - if let Some(discovered) = self.discovered.pop_front() { - return Poll::Ready(discovered); + if this.query_response_sender.poll_ready_unpin(cx).is_ready() { + if let Some(discovered) = this.discovered.pop_front() { + match this.query_response_sender.try_send(discovered) { + Ok(()) => {} + Err(e) if e.is_disconnected() => { + return Poll::Ready(()); + } + Err(e) => { + this.discovered.push_front(e.into_inner()); + } + } + + continue; + } } // 4th priority: Remote work: Answer incoming requests. - match Pin::new(&mut self.recv_socket) - .poll_read(cx, &mut self.recv_buffer) - .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&self.recv_buffer[..len], from)) + match this + .recv_socket + .poll_read(cx, &mut this.recv_buffer) + .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&this.recv_buffer[..len], from)) { Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => { - self.reset_timer(); - log::trace!( - "received query from {} on {}", - query.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%query.remote_addr(), + "received query from remote address on address" ); - self.send_buffer.extend(build_query_response( + this.send_buffer.extend(build_query_response( query.query_id(), - self.local_peer_id, - listen_addresses.iter(), - self.ttl, + this.local_peer_id, + this.listen_addresses + .read() + .unwrap_or_else(|e| e.into_inner()) + .iter(), + this.ttl, )); continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => { - log::trace!( - "received response from {} on {}", - response.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%response.remote_addr(), + "received response from remote address on address" ); - self.discovered - .extend(response.extract_discovered(Instant::now(), self.local_peer_id)); + this.discovered + .extend(response.extract_discovered(Instant::now(), this.local_peer_id)); + + // Stop probing when we have a valid response + if !this.discovered.is_empty() { + this.probe_state = ProbeState::Finished(this.query_interval); + this.reset_timer(); + } continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => { - log::trace!( - "received service discovery from {} on {}", - disc.remote_addr(), - self.addr + tracing::trace!( + address=%this.addr, + remote_address=%disc.remote_addr(), + "received service discovery from remote address on address" ); - self.send_buffer - .push_back(build_service_discovery_response(disc.query_id(), self.ttl)); + this.send_buffer + .push_back(build_service_discovery_response(disc.query_id(), this.ttl)); continue; } Poll::Ready(Err(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { // No more bytes available on the socket to read + continue; } Poll::Ready(Err(err)) => { - log::error!("failed reading datagram: {}", err); + tracing::error!("failed reading datagram: {}", err); + return Poll::Ready(()); } Poll::Ready(Ok(Err(err))) => { - log::debug!("Parsing mdns packet failed: {:?}", err); + tracing::debug!("Parsing mdns packet failed: {:?}", err); + continue; } - Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} + Poll::Ready(Ok(Ok(None))) => continue, + Poll::Pending => {} } return Poll::Pending; diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 6a10497e69f..6cc5550dbe5 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -134,7 +134,7 @@ pub(crate) fn build_query_response<'a>( records.push(txt_record); } Err(e) => { - log::warn!("Excluding address {} from response: {:?}", addr, e); + tracing::warn!(address=%addr, "Excluding address from response: {:?}", e); } } @@ -395,9 +395,9 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { use super::*; + use hickory_proto::op::Message; use libp2p_identity as identity; use std::time::Duration; - use trust_dns_proto::op::Message; #[test] fn build_query_correct() { diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 745926cf658..eeb699fca6b 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -20,17 +20,17 @@ use super::dns; use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use hickory_proto::{ + op::Message, + rr::{Name, RData}, +}; use libp2p_core::{ address_translation, multiaddr::{Multiaddr, Protocol}, }; use libp2p_identity::PeerId; use std::time::Instant; -use std::{convert::TryFrom, fmt, net::SocketAddr, str, time::Duration}; -use trust_dns_proto::{ - op::Message, - rr::{Name, RData}, -}; +use std::{fmt, net::SocketAddr, str, time::Duration}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -47,7 +47,7 @@ impl MdnsPacket { pub(crate) fn new_from_bytes( buf: &[u8], from: SocketAddr, - ) -> Result, trust_dns_proto::error::ProtoError> { + ) -> Result, hickory_proto::error::ProtoError> { let packet = Message::from_vec(buf)?; if packet.query().is_none() { @@ -156,9 +156,8 @@ impl MdnsResponse { return None; } - let record_value = match record.data() { - Some(RData::PTR(record)) => record, - _ => return None, + let RData::PTR(record_value) = record.data()? else { + return None; }; MdnsPeer::new(packet, record_value, record.ttl()) @@ -181,6 +180,7 @@ impl MdnsResponse { peer.addresses().iter().filter_map(move |address| { let new_addr = address_translation(address, &observed)?; + let new_addr = new_addr.with_p2p(*peer.id()).ok()?; Some((*peer.id(), new_addr, new_expiration)) }) @@ -247,33 +247,22 @@ impl MdnsPeer { .flat_map(|txt| txt.iter()) .filter_map(|txt| { // TODO: wrong, txt can be multiple character strings - let addr = match dns::decode_character_string(txt) { - Ok(a) => a, - Err(_) => return None, - }; + let addr = dns::decode_character_string(txt).ok()?; + if !addr.starts_with(b"dnsaddr=") { return None; } - let addr = match str::from_utf8(&addr[8..]) { - Ok(a) => a, - Err(_) => return None, - }; - let mut addr = match addr.parse::() { - Ok(a) => a, - Err(_) => return None, - }; + + let mut addr = str::from_utf8(&addr[8..]).ok()?.parse::().ok()?; + match addr.pop() { Some(Protocol::P2p(peer_id)) => { - if let Ok(peer_id) = PeerId::try_from(peer_id) { - if let Some(pid) = &my_peer_id { - if peer_id != *pid { - return None; - } - } else { - my_peer_id.replace(peer_id); + if let Some(pid) = &my_peer_id { + if peer_id != *pid { + return None; } } else { - return None; + my_peer_id.replace(peer_id); } } _ => return None, @@ -329,8 +318,8 @@ mod tests { let mut addr1: Multiaddr = "/ip4/1.2.3.4/tcp/5000".parse().expect("bad multiaddress"); let mut addr2: Multiaddr = "/ip6/::1/udp/10000".parse().expect("bad multiaddress"); - addr1.push(Protocol::P2p(peer_id.into())); - addr2.push(Protocol::P2p(peer_id.into())); + addr1.push(Protocol::P2p(peer_id)); + addr2.push(Protocol::P2p(peer_id)); let packets = build_query_response( 0xf8f8, @@ -348,9 +337,8 @@ mod tests { if record.name().to_utf8() != SERVICE_NAME_FQDN { return None; } - let record_value = match record.data() { - Some(RData::PTR(record)) => record, - _ => return None, + let Some(RData::PTR(record_value)) = record.data() else { + return None; }; Some(record_value) }) diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index bfc3cd1201d..549f70978af 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -24,17 +24,22 @@ use libp2p_mdns::{async_io::Behaviour, Config}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn test_discovery_async_std_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[async_std::test] async fn test_discovery_async_std_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -45,7 +50,9 @@ async fn test_discovery_async_std_ipv6() { #[async_std::test] async fn test_expired_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), @@ -78,7 +85,9 @@ async fn test_expired_async_std() { #[async_std::test] async fn test_no_expiration_on_close_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(120), query_interval: Duration::from_secs(10), @@ -148,7 +157,20 @@ async fn run_discovery_test(config: Config) { async fn create_swarm(config: Config) -> Swarm { let mut swarm = Swarm::new_ephemeral(|key| Behaviour::new(config, key.public().to_peer_id()).unwrap()); - swarm.listen().await; + + // Manually listen on all interfaces because mDNS only works for non-loopback addresses. + let expected_listener_id = swarm + .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) + .unwrap(); + + swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { listener_id, .. } => { + (listener_id == expected_listener_id).then_some(()) + } + _ => None, + }) + .await; swarm } diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 229418437f4..cf0d9f4bed4 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -19,20 +19,25 @@ // DEALINGS IN THE SOFTWARE.use futures::StreamExt; use futures::future::Either; use libp2p_mdns::{tokio::Behaviour, Config, Event}; -use libp2p_swarm::Swarm; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn test_discovery_tokio_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[tokio::test] async fn test_discovery_tokio_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -43,7 +48,9 @@ async fn test_discovery_tokio_ipv6() { #[tokio::test] async fn test_expired_tokio() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), @@ -104,7 +111,20 @@ async fn run_discovery_test(config: Config) { async fn create_swarm(config: Config) -> Swarm { let mut swarm = Swarm::new_ephemeral(|key| Behaviour::new(config, key.public().to_peer_id()).unwrap()); - swarm.listen().await; + + // Manually listen on all interfaces because mDNS only works for non-loopback addresses. + let expected_listener_id = swarm + .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) + .unwrap(); + + swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { listener_id, .. } => { + (listener_id == expected_listener_id).then_some(()) + } + _ => None, + }) + .await; swarm } diff --git a/protocols/perf/CHANGELOG.md b/protocols/perf/CHANGELOG.md index b3a1028e768..4e448d7f44a 100644 --- a/protocols/perf/CHANGELOG.md +++ b/protocols/perf/CHANGELOG.md @@ -1,4 +1,10 @@ -## 0.2.0 - unreleased +## 0.3.0 + +- Continuously measure on single connection (iperf-style). + See https://github.com/libp2p/test-plans/issues/261 for high level overview. + See [PR 4382](https://github.com/libp2p/rust-libp2p/pull/4382). + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index cff9179817e..944cc44a5ab 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-perf" edition = "2021" rust-version = { workspace = true } description = "libp2p perf protocol implementation" -version = "0.2.0" +version = "0.3.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,26 +12,31 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" -async-std = { version = "1.9.0", features = ["attributes"] } -clap = { version = "4.2.7", features = ["derive"] } -env_logger = "0.10.0" -futures = "0.3.28" -instant = "0.1.11" +clap = { version = "4.4.11", features = ["derive"] } +futures = "0.3.30" +futures-bounded = { workspace = true } +futures-timer = "3.0" +instant = "0.1.12" +libp2p = { workspace = true, features = ["tokio", "tcp", "quic", "tls", "yamux", "dns"] } libp2p-core = { workspace = true } -libp2p-dns = { workspace = true, features = ["async-std"] } -libp2p-identity = { workspace = true } -libp2p-noise = { workspace = true } -libp2p-quic = { workspace = true, features = ["async-std"] } -libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } -libp2p-tcp = { workspace = true, features = ["async-io"] } +libp2p-dns = { workspace = true, features = ["tokio"] } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-quic = { workspace = true, features = ["tokio"] } +libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } +libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-tls = { workspace = true } libp2p-yamux = { workspace = true } -log = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" thiserror = "1.0" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tokio = { version = "1.35", default-features = false, features = ["macros", "rt", "rt-multi-thread"] } void = "1" [dev-dependencies] rand = "0.8" -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -39,3 +44,6 @@ libp2p-swarm-test = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/perf/Dockerfile b/protocols/perf/Dockerfile index aef8eed1cad..6523e3bede1 100644 --- a/protocols/perf/Dockerfile +++ b/protocols/perf/Dockerfile @@ -9,14 +9,10 @@ RUN --mount=type=cache,target=./target \ cargo build --release --package libp2p-perf RUN --mount=type=cache,target=./target \ - mv ./target/release/perf-server /usr/local/bin/perf-server - -RUN --mount=type=cache,target=./target \ - mv ./target/release/perf-client /usr/local/bin/perf-client + mv ./target/release/perf /usr/local/bin/perf FROM debian:bullseye-slim -COPY --from=builder /usr/local/bin/perf-server /usr/local/bin/perf-server -COPY --from=builder /usr/local/bin/perf-client /usr/local/bin/perf-client +COPY --from=builder /usr/local/bin/perf /app/perf -ENTRYPOINT [ "perf-server"] +ENTRYPOINT [ "/app/perf" ] diff --git a/protocols/perf/src/bin/perf-client.rs b/protocols/perf/src/bin/perf-client.rs deleted file mode 100644 index ddf3708ef5c..00000000000 --- a/protocols/perf/src/bin/perf-client.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2023 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use anyhow::{bail, Result}; -use clap::Parser; -use futures::{future::Either, StreamExt}; -use libp2p_core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade, Multiaddr, Transport}; -use libp2p_dns::DnsConfig; -use libp2p_identity::PeerId; -use libp2p_perf::client::RunParams; -use libp2p_swarm::{SwarmBuilder, SwarmEvent}; -use log::info; - -#[derive(Debug, Parser)] -#[clap(name = "libp2p perf client")] -struct Opts { - #[arg(long)] - server_address: Multiaddr, -} - -#[async_std::main] -async fn main() -> Result<()> { - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); - - let opts = Opts::parse(); - - info!("Initiating performance tests with {}", opts.server_address); - - // Create a random PeerId - let local_key = libp2p_identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - - let transport = { - let tcp = - libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::default().port_reuse(true)) - .upgrade(upgrade::Version::V1Lazy) - .authenticate( - libp2p_noise::Config::new(&local_key) - .expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(libp2p_yamux::Config::default()); - - let quic = { - let mut config = libp2p_quic::Config::new(&local_key); - config.support_draft_29 = true; - libp2p_quic::async_std::Transport::new(config) - }; - - let dns = DnsConfig::system(OrTransport::new(quic, tcp)) - .await - .unwrap(); - - dns.map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed() - }; - - let mut swarm = SwarmBuilder::with_async_std_executor( - transport, - libp2p_perf::client::Behaviour::default(), - local_peer_id, - ) - .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) - .build(); - - swarm.dial(opts.server_address.clone()).unwrap(); - let server_peer_id = loop { - match swarm.next().await.unwrap() { - SwarmEvent::ConnectionEstablished { peer_id, .. } => break peer_id, - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - bail!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } - e => panic!("{e:?}"), - } - }; - - info!( - "Connection to {} established. Launching benchmarks.", - opts.server_address - ); - - swarm.behaviour_mut().perf( - server_peer_id, - RunParams { - to_send: 10 * 1024 * 1024, - to_receive: 10 * 1024 * 1024, - }, - )?; - - let stats = loop { - match swarm.next().await.unwrap() { - SwarmEvent::ConnectionEstablished { - peer_id, endpoint, .. - } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - info!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } - SwarmEvent::Behaviour(libp2p_perf::client::Event { id: _, result }) => break result?, - e => panic!("{e:?}"), - } - }; - - let sent_mebibytes = stats.params.to_send as f64 / 1024.0 / 1024.0; - let sent_time = (stats.timers.write_done - stats.timers.write_start).as_secs_f64(); - let sent_bandwidth_mebibit_second = (sent_mebibytes * 8.0) / sent_time; - - let received_mebibytes = stats.params.to_receive as f64 / 1024.0 / 1024.0; - let receive_time = (stats.timers.read_done - stats.timers.write_done).as_secs_f64(); - let receive_bandwidth_mebibit_second = (received_mebibytes * 8.0) / receive_time; - - info!( - "Finished run: Sent {sent_mebibytes:.2} MiB in {sent_time:.2} s with \ - {sent_bandwidth_mebibit_second:.2} MiBit/s and received \ - {received_mebibytes:.2} MiB in {receive_time:.2} s with \ - {receive_bandwidth_mebibit_second:.2} MiBit/s", - ); - - Ok(()) -} diff --git a/protocols/perf/src/bin/perf-server.rs b/protocols/perf/src/bin/perf-server.rs deleted file mode 100644 index 9219ed85723..00000000000 --- a/protocols/perf/src/bin/perf-server.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2023 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use clap::Parser; -use futures::{future::Either, StreamExt}; -use libp2p_core::{muxing::StreamMuxerBox, transport::OrTransport, upgrade, Transport}; -use libp2p_dns::DnsConfig; -use libp2p_identity::PeerId; -use libp2p_swarm::{SwarmBuilder, SwarmEvent}; -use log::{error, info}; - -#[derive(Debug, Parser)] -#[clap(name = "libp2p perf server")] -struct Opts {} - -#[async_std::main] -async fn main() { - env_logger::init(); - - let _opts = Opts::parse(); - - // Create a random PeerId - let local_key = libp2p_identity::Keypair::generate_ed25519(); - let local_peer_id = PeerId::from(local_key.public()); - println!("Local peer id: {local_peer_id}"); - - let transport = { - let tcp = - libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::default().port_reuse(true)) - .upgrade(upgrade::Version::V1Lazy) - .authenticate( - libp2p_noise::Config::new(&local_key) - .expect("Signing libp2p-noise static DH keypair failed."), - ) - .multiplex(libp2p_yamux::Config::default()); - - let quic = { - let mut config = libp2p_quic::Config::new(&local_key); - config.support_draft_29 = true; - libp2p_quic::async_std::Transport::new(config) - }; - - let dns = DnsConfig::system(OrTransport::new(quic, tcp)) - .await - .unwrap(); - - dns.map(|either_output, _| match either_output { - Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)), - }) - .boxed() - }; - - let mut swarm = SwarmBuilder::with_async_std_executor( - transport, - libp2p_perf::server::Behaviour::default(), - local_peer_id, - ) - .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) - .build(); - - swarm - .listen_on("/ip4/0.0.0.0/tcp/4001".parse().unwrap()) - .unwrap(); - - swarm - .listen_on("/ip4/0.0.0.0/udp/4001/quic-v1".parse().unwrap()) - .unwrap(); - - loop { - match swarm.next().await.unwrap() { - SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address}"); - } - SwarmEvent::IncomingConnection { .. } => {} - e @ SwarmEvent::IncomingConnectionError { .. } => { - error!("{e:?}"); - } - SwarmEvent::ConnectionEstablished { - peer_id, endpoint, .. - } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); - } - SwarmEvent::ConnectionClosed { .. } => {} - SwarmEvent::Behaviour(libp2p_perf::server::Event { - remote_peer_id, - stats, - }) => { - let received_mebibytes = stats.params.received as f64 / 1024.0 / 1024.0; - let receive_time = (stats.timers.read_done - stats.timers.read_start).as_secs_f64(); - let receive_bandwidth_mebibit_second = (received_mebibytes * 8.0) / receive_time; - - let sent_mebibytes = stats.params.sent as f64 / 1024.0 / 1024.0; - let sent_time = (stats.timers.write_done - stats.timers.read_done).as_secs_f64(); - let sent_bandwidth_mebibit_second = (sent_mebibytes * 8.0) / sent_time; - - info!( - "Finished run with {}: Received {:.2} MiB in {:.2} s with {:.2} MiBit/s and sent {:.2} MiB in {:.2} s with {:.2} MiBit/s", - remote_peer_id, - received_mebibytes, - receive_time, - receive_bandwidth_mebibit_second, - sent_mebibytes, - sent_time, - sent_bandwidth_mebibit_second, - ) - } - e => panic!("{e:?}"), - } - } -} diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs new file mode 100644 index 00000000000..9ac8f0a6cde --- /dev/null +++ b/protocols/perf/src/bin/perf.rs @@ -0,0 +1,294 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{net::SocketAddr, str::FromStr}; + +use anyhow::{bail, Result}; +use clap::Parser; +use futures::StreamExt; +use instant::{Duration, Instant}; +use libp2p::core::{multiaddr::Protocol, upgrade, Multiaddr}; +use libp2p::identity::PeerId; +use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p::SwarmBuilder; +use libp2p_perf::{client, server}; +use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; +use serde::{Deserialize, Serialize}; +use tracing_subscriber::EnvFilter; + +#[derive(Debug, Parser)] +#[clap(name = "libp2p perf client")] +struct Opts { + #[arg(long)] + server_address: Option, + #[arg(long)] + transport: Option, + #[arg(long)] + upload_bytes: Option, + #[arg(long)] + download_bytes: Option, + + /// Run in server mode. + #[clap(long)] + run_server: bool, +} + +/// Supported transports by rust-libp2p. +#[derive(Clone, Debug)] +pub enum Transport { + Tcp, + QuicV1, +} + +impl FromStr for Transport { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + Ok(match s { + "tcp" => Self::Tcp, + "quic-v1" => Self::QuicV1, + other => bail!("unknown transport {other}"), + }) + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let opts = Opts::parse(); + match opts { + Opts { + server_address: Some(server_address), + transport: None, + upload_bytes: None, + download_bytes: None, + run_server: true, + } => server(server_address).await?, + Opts { + server_address: Some(server_address), + transport: Some(transport), + upload_bytes, + download_bytes, + run_server: false, + } => { + client(server_address, transport, upload_bytes, download_bytes).await?; + } + _ => panic!("invalid command line arguments: {opts:?}"), + }; + + Ok(()) +} + +async fn server(server_address: SocketAddr) -> Result<()> { + let mut swarm = swarm::().await?; + + swarm.listen_on( + Multiaddr::empty() + .with(server_address.ip().into()) + .with(Protocol::Tcp(server_address.port())), + )?; + + swarm + .listen_on( + Multiaddr::empty() + .with(server_address.ip().into()) + .with(Protocol::Udp(server_address.port())) + .with(Protocol::QuicV1), + ) + .unwrap(); + + tokio::spawn(async move { + loop { + match swarm.next().await.unwrap() { + SwarmEvent::NewListenAddr { address, .. } => { + tracing::info!(%address, "Listening on address"); + } + SwarmEvent::IncomingConnection { .. } => {} + e @ SwarmEvent::IncomingConnectionError { .. } => { + tracing::error!("{e:?}"); + } + SwarmEvent::ConnectionEstablished { + peer_id, endpoint, .. + } => { + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); + } + SwarmEvent::ConnectionClosed { .. } => {} + SwarmEvent::Behaviour(server::Event { .. }) => { + tracing::info!("Finished run",) + } + e => panic!("{e:?}"), + } + } + }) + .await + .unwrap(); + + Ok(()) +} + +async fn client( + server_address: SocketAddr, + transport: Transport, + upload_bytes: Option, + download_bytes: Option, +) -> Result<()> { + let server_address = match transport { + Transport::Tcp => Multiaddr::empty() + .with(server_address.ip().into()) + .with(Protocol::Tcp(server_address.port())), + Transport::QuicV1 => Multiaddr::empty() + .with(server_address.ip().into()) + .with(Protocol::Udp(server_address.port())) + .with(Protocol::QuicV1), + }; + let params = RunParams { + to_send: upload_bytes.unwrap(), + to_receive: download_bytes.unwrap(), + }; + let mut swarm = swarm().await?; + + tokio::spawn(async move { + tracing::info!("start benchmark: custom"); + + let start = Instant::now(); + + let server_peer_id = connect(&mut swarm, server_address.clone()).await?; + + perf(&mut swarm, server_peer_id, params).await?; + + println!( + "{}", + serde_json::to_string(&BenchmarkResult { + upload_bytes: params.to_send, + download_bytes: params.to_receive, + r#type: "final".to_string(), + time_seconds: start.elapsed().as_secs_f64(), + }) + .unwrap() + ); + + anyhow::Ok(()) + }) + .await??; + + Ok(()) +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +struct BenchmarkResult { + r#type: String, + time_seconds: f64, + upload_bytes: usize, + download_bytes: usize, +} + +async fn swarm() -> Result> { + let swarm = SwarmBuilder::with_new_identity() + .with_tokio() + .with_tcp( + libp2p_tcp::Config::default().nodelay(true), + libp2p_tls::Config::new, + libp2p_yamux::Config::default, + )? + .with_quic() + .with_dns()? + .with_behaviour(|_| B::default())? + .with_swarm_config(|cfg| { + cfg.with_substream_upgrade_protocol_override(upgrade::Version::V1Lazy) + .with_idle_connection_timeout(Duration::from_secs(60 * 5)) + }) + .build(); + + Ok(swarm) +} + +async fn connect( + swarm: &mut Swarm, + server_address: Multiaddr, +) -> Result { + let start = Instant::now(); + swarm.dial(server_address.clone()).unwrap(); + + let server_peer_id = match swarm.next().await.unwrap() { + SwarmEvent::ConnectionEstablished { peer_id, .. } => peer_id, + SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { + bail!("Outgoing connection error to {:?}: {:?}", peer_id, error); + } + e => panic!("{e:?}"), + }; + + let duration = start.elapsed(); + let duration_seconds = duration.as_secs_f64(); + + tracing::info!(elapsed_time=%format!("{duration_seconds:.4} s")); + + Ok(server_peer_id) +} + +async fn perf( + swarm: &mut Swarm, + server_peer_id: PeerId, + params: RunParams, +) -> Result { + swarm.behaviour_mut().perf(server_peer_id, params)?; + + let duration = loop { + match swarm.next().await.unwrap() { + SwarmEvent::Behaviour(client::Event { + id: _, + result: Ok(RunUpdate::Intermediate(progressed)), + }) => { + tracing::info!("{progressed}"); + + let Intermediate { + duration, + sent, + received, + } = progressed; + + println!( + "{}", + serde_json::to_string(&BenchmarkResult { + r#type: "intermediate".to_string(), + time_seconds: duration.as_secs_f64(), + upload_bytes: sent, + download_bytes: received, + }) + .unwrap() + ); + } + SwarmEvent::Behaviour(client::Event { + id: _, + result: Ok(RunUpdate::Final(Final { duration })), + }) => break duration, + e => panic!("{e:?}"), + }; + }; + + let run = Run { params, duration }; + + tracing::info!("{run}"); + + Ok(run) +} diff --git a/protocols/perf/src/client.rs b/protocols/perf/src/client.rs index c320b18ea32..c4614e979db 100644 --- a/protocols/perf/src/client.rs +++ b/protocols/perf/src/client.rs @@ -21,32 +21,11 @@ mod behaviour; mod handler; -use instant::Instant; use std::sync::atomic::{AtomicUsize, Ordering}; pub use behaviour::{Behaviour, Event}; - -/// Parameters for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug, Clone, Copy)] -pub struct RunParams { - pub to_send: usize, - pub to_receive: usize, -} - -/// Timers for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug, Clone, Copy)] -pub struct RunTimers { - pub write_start: Instant, - pub write_done: Instant, - pub read_done: Instant, -} - -/// Statistics for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug)] -pub struct RunStats { - pub params: RunParams, - pub timers: RunTimers, -} +use libp2p_swarm::StreamUpgradeError; +use void::Void; static NEXT_RUN_ID: AtomicUsize = AtomicUsize::new(1); @@ -60,3 +39,11 @@ impl RunId { Self(NEXT_RUN_ID.fetch_add(1, Ordering::SeqCst)) } } + +#[derive(thiserror::Error, Debug)] +pub enum RunError { + #[error(transparent)] + Upgrade(#[from] StreamUpgradeError), + #[error("Failed to execute perf run: {0}")] + Io(#[from] std::io::Error), +} diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index dade91c5a7f..880bcdd9c83 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -28,20 +28,19 @@ use std::{ use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::{ - derive_prelude::ConnectionEstablished, ConnectionClosed, ConnectionHandlerUpgrErr, - ConnectionId, FromSwarm, NetworkBehaviour, NotifyHandler, PollParameters, THandlerInEvent, - THandlerOutEvent, ToSwarm, + derive_prelude::ConnectionEstablished, ConnectionClosed, ConnectionId, FromSwarm, + NetworkBehaviour, NotifyHandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use void::Void; -use crate::client::handler::Handler; +use crate::RunParams; +use crate::{client::handler::Handler, RunUpdate}; -use super::{RunId, RunParams, RunStats}; +use super::{RunError, RunId}; #[derive(Debug)] pub struct Event { pub id: RunId, - pub result: Result>, + pub result: Result, } #[derive(Default)] @@ -57,9 +56,9 @@ impl Behaviour { Self::default() } - pub fn perf(&mut self, server: PeerId, params: RunParams) -> Result { + pub fn perf(&mut self, server: PeerId, params: RunParams) -> Result { if !self.connected.contains(&server) { - return Err(PerfError::NotConnected); + return Err(NotConnected {}); } let id = RunId::next(); @@ -75,14 +74,17 @@ impl Behaviour { } #[derive(thiserror::Error, Debug)] -pub enum PerfError { - #[error("Not connected to peer")] - NotConnected, +pub struct NotConnected(); + +impl std::fmt::Display for NotConnected { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "not connected to peer") + } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_outbound_connection( &mut self, @@ -104,7 +106,7 @@ impl NetworkBehaviour for Behaviour { Ok(Handler::default()) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, .. }) => { self.connected.insert(peer_id); @@ -113,23 +115,13 @@ impl NetworkBehaviour for Behaviour { peer_id, connection_id: _, endpoint: _, - handler: _, remaining_established, }) => { if remaining_established == 0 { assert!(self.connected.remove(&peer_id)); } } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } @@ -143,11 +135,8 @@ impl NetworkBehaviour for Behaviour { .push_back(ToSwarm::GenerateEvent(Event { id, result })); } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index e0c9f44c886..2a2c5499fc2 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -21,22 +21,23 @@ use std::{ collections::VecDeque, task::{Context, Poll}, - time::{Duration, Instant}, }; -use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt, TryFutureExt}; +use futures::{ + stream::{BoxStream, SelectAll}, + StreamExt, +}; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, StreamProtocol, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use void::Void; -use super::{RunId, RunParams, RunStats}; +use crate::client::{RunError, RunId}; +use crate::{RunParams, RunUpdate}; #[derive(Debug)] pub struct Command { @@ -47,7 +48,7 @@ pub struct Command { #[derive(Debug)] pub struct Event { pub(crate) id: RunId, - pub(crate) result: Result>, + pub(crate) result: Result, } pub struct Handler { @@ -56,16 +57,13 @@ pub struct Handler { ConnectionHandlerEvent< ::OutboundProtocol, ::OutboundOpenInfo, - ::OutEvent, - ::Error, + ::ToBehaviour, >, >, requested_streams: VecDeque, - outbound: FuturesUnordered>>, - - keep_alive: KeepAlive, + outbound: SelectAll)>>, } impl Handler { @@ -74,7 +72,6 @@ impl Handler { queued_events: Default::default(), requested_streams: Default::default(), outbound: Default::default(), - keep_alive: KeepAlive::Yes, } } } @@ -86,9 +83,8 @@ impl Default for Handler { } impl ConnectionHandler for Handler { - type InEvent = Command; - type OutEvent = Event; - type Error = Void; + type FromBehaviour = Command; + type ToBehaviour = Event; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -98,7 +94,7 @@ impl ConnectionHandler for Handler { SubstreamProtocol::new(DeniedUpgrade, ()) } - fn on_behaviour_event(&mut self, command: Self::InEvent) { + fn on_behaviour_event(&mut self, command: Self::FromBehaviour) { self.requested_streams.push_back(command); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { @@ -129,78 +125,48 @@ impl ConnectionHandler for Handler { .expect("opened a stream without a pending command"); self.outbound.push( crate::protocol::send_receive(params, protocol) - .map_ok(move |timers| Event { - id, - result: Ok(RunStats { params, timers }), - }) + .map(move |result| (id, result)) .boxed(), ); } - ConnectionEvent::AddressChange(_) => {} + ConnectionEvent::AddressChange(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::DialUpgradeError(DialUpgradeError { info: (), error }) => { let Command { id, .. } = self .requested_streams .pop_front() .expect("requested stream without pending command"); self.queued_events - .push_back(ConnectionHandlerEvent::Custom(Event { + .push_back(ConnectionHandlerEvent::NotifyBehaviour(Event { id, - result: Err(error), + result: Err(error.into()), })); } ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { - match error { - ConnectionHandlerUpgrErr::Timeout => {} - ConnectionHandlerUpgrErr::Timer => {} - ConnectionHandlerUpgrErr::Upgrade(error) => match error { - libp2p_core::UpgradeError::Select(_) => {} - libp2p_core::UpgradeError::Apply(v) => void::unreachable(v), - }, - } + void::unreachable(error) } + _ => {} } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - while let Poll::Ready(Some(result)) = self.outbound.poll_next_unpin(cx) { - match result { - Ok(event) => return Poll::Ready(ConnectionHandlerEvent::Custom(event)), - Err(e) => { - panic!("{e:?}") - } - } - } - - if self.outbound.is_empty() { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); - } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), - } - } else { - self.keep_alive = KeepAlive::Yes + if let Poll::Ready(Some((id, result))) = self.outbound.poll_next_unpin(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event { + id, + result: result.map_err(Into::into), + })); } Poll::Pending diff --git a/protocols/perf/src/lib.rs b/protocols/perf/src/lib.rs index aeb91ff2412..f9db96aa9d9 100644 --- a/protocols/perf/src/lib.rs +++ b/protocols/perf/src/lib.rs @@ -24,6 +24,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use std::fmt::Display; + +use instant::Duration; use libp2p_swarm::StreamProtocol; pub mod client; @@ -31,3 +34,126 @@ mod protocol; pub mod server; pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/perf/1.0.0"); +const RUN_TIMEOUT: Duration = Duration::from_secs(5 * 60); +const MAX_PARALLEL_RUNS_PER_CONNECTION: usize = 1_000; + +#[derive(Debug, Clone, Copy)] +pub enum RunUpdate { + Intermediate(Intermediate), + Final(Final), +} + +#[derive(Debug, Clone, Copy)] +pub struct Intermediate { + pub duration: Duration, + pub sent: usize, + pub received: usize, +} + +impl Display for Intermediate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Intermediate { + duration, + sent, + received, + } = self; + write!( + f, + "{:4} s uploaded {} downloaded {} ({})", + duration.as_secs_f64(), + format_bytes(*sent), + format_bytes(*received), + format_bandwidth(*duration, sent + received), + )?; + + Ok(()) + } +} + +#[derive(Debug, Clone, Copy)] +pub struct Final { + pub duration: RunDuration, +} + +/// Parameters for a single run, i.e. one stream, sending and receiving data. +/// +/// Property names are from the perspective of the actor. E.g. `to_send` is the amount of data to +/// send, both as the client and the server. +#[derive(Debug, Clone, Copy)] +pub struct RunParams { + pub to_send: usize, + pub to_receive: usize, +} + +/// Duration for a single run, i.e. one stream, sending and receiving data. +#[derive(Debug, Clone, Copy)] +pub struct RunDuration { + pub upload: Duration, + pub download: Duration, +} + +#[derive(Debug, Clone, Copy)] +pub struct Run { + pub params: RunParams, + pub duration: RunDuration, +} + +const KILO: f64 = 1024.0; +const MEGA: f64 = KILO * 1024.0; +const GIGA: f64 = MEGA * 1024.0; + +fn format_bytes(bytes: usize) -> String { + let bytes = bytes as f64; + if bytes >= GIGA { + format!("{:.2} GiB", bytes / GIGA) + } else if bytes >= MEGA { + format!("{:.2} MiB", bytes / MEGA) + } else if bytes >= KILO { + format!("{:.2} KiB", bytes / KILO) + } else { + format!("{} B", bytes) + } +} + +fn format_bandwidth(duration: Duration, bytes: usize) -> String { + const KILO: f64 = 1024.0; + const MEGA: f64 = KILO * 1024.0; + const GIGA: f64 = MEGA * 1024.0; + + let bandwidth = (bytes as f64 * 8.0) / duration.as_secs_f64(); + + if bandwidth >= GIGA { + format!("{:.2} Gbit/s", bandwidth / GIGA) + } else if bandwidth >= MEGA { + format!("{:.2} Mbit/s", bandwidth / MEGA) + } else if bandwidth >= KILO { + format!("{:.2} Kbit/s", bandwidth / KILO) + } else { + format!("{:.2} bit/s", bandwidth) + } +} + +impl Display for Run { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Run { + params: RunParams { + to_send, + to_receive, + }, + duration: RunDuration { upload, download }, + } = self; + + write!( + f, + "uploaded {} in {:.4} s ({}), downloaded {} in {:.4} s ({})", + format_bytes(*to_send), + upload.as_secs_f64(), + format_bandwidth(*upload, *to_send), + format_bytes(*to_receive), + download.as_secs_f64(), + format_bandwidth(*download, *to_receive), + )?; + + Ok(()) + } +} diff --git a/protocols/perf/src/protocol.rs b/protocols/perf/src/protocol.rs index 7f9c5137c9a..d2d65b42303 100644 --- a/protocols/perf/src/protocol.rs +++ b/protocols/perf/src/protocol.rs @@ -18,58 +18,144 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use futures_timer::Delay; use instant::Instant; +use std::time::Duration; -use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use futures::{ + future::{select, Either}, + AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, Stream, StreamExt, +}; -use crate::{client, server}; +use crate::{Final, Intermediate, Run, RunDuration, RunParams, RunUpdate}; const BUF: [u8; 1024] = [0; 1024]; +const REPORT_INTERVAL: Duration = Duration::from_secs(1); + +pub(crate) fn send_receive( + params: RunParams, + stream: S, +) -> impl Stream> { + // Use a channel to simulate a generator. `send_receive_inner` can `yield` events through the + // channel. + let (sender, receiver) = futures::channel::mpsc::channel(0); + let receiver = receiver.fuse(); + let inner = send_receive_inner(params, stream, sender).fuse(); + + futures::stream::select( + receiver.map(|progressed| Ok(RunUpdate::Intermediate(progressed))), + inner + .map(|finished| finished.map(RunUpdate::Final)) + .into_stream(), + ) +} -pub(crate) async fn send_receive( - params: client::RunParams, +async fn send_receive_inner( + params: RunParams, mut stream: S, -) -> Result { - let client::RunParams { + mut progress: futures::channel::mpsc::Sender, +) -> Result { + let mut delay = Delay::new(REPORT_INTERVAL); + + let RunParams { to_send, to_receive, } = params; let mut receive_buf = vec![0; 1024]; - - stream.write_all(&(to_receive as u64).to_be_bytes()).await?; + let to_receive_bytes = (to_receive as u64).to_be_bytes(); + stream.write_all(&to_receive_bytes).await?; let write_start = Instant::now(); - + let mut intermittant_start = Instant::now(); let mut sent = 0; + let mut intermittent_sent = 0; + while sent < to_send { let n = std::cmp::min(to_send - sent, BUF.len()); let buf = &BUF[..n]; - sent += stream.write(buf).await?; + let mut write = stream.write(buf); + sent += loop { + match select(&mut delay, &mut write).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: 0, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; + } + Either::Right((n, _)) => break n?, + } + } } - stream.close().await?; + loop { + match select(&mut delay, stream.close()).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: 0, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; + } + Either::Right((Ok(_), _)) => break, + Either::Right((Err(e), _)) => return Err(e), + } + } let write_done = Instant::now(); - let mut received = 0; + let mut intermittend_received = 0; + while received < to_receive { - received += stream.read(&mut receive_buf).await?; + let mut read = stream.read(&mut receive_buf); + received += loop { + match select(&mut delay, &mut read).await { + Either::Left((_, _)) => { + delay.reset(REPORT_INTERVAL); + progress + .send(Intermediate { + duration: intermittant_start.elapsed(), + sent: sent - intermittent_sent, + received: received - intermittend_received, + }) + .await + .expect("receiver not to be dropped"); + intermittant_start = Instant::now(); + intermittent_sent = sent; + intermittend_received = received; + } + Either::Right((n, _)) => break n?, + } + } } let read_done = Instant::now(); - Ok(client::RunTimers { - write_start, - write_done, - read_done, + Ok(Final { + duration: RunDuration { + upload: write_done.duration_since(write_start), + download: read_done.duration_since(write_done), + }, }) } pub(crate) async fn receive_send( mut stream: S, -) -> Result { +) -> Result { let to_send = { let mut buf = [0; 8]; stream.read_exact(&mut buf).await?; @@ -102,105 +188,14 @@ pub(crate) async fn receive_send( stream.close().await?; let write_done = Instant::now(); - Ok(server::RunStats { - params: server::RunParams { sent, received }, - timers: server::RunTimers { - read_start, - read_done, - write_done, + Ok(Run { + params: RunParams { + to_send: sent, + to_receive: received, + }, + duration: RunDuration { + upload: write_done.duration_since(read_done), + download: read_done.duration_since(read_start), }, }) } - -#[cfg(test)] -mod tests { - use super::*; - use futures::{executor::block_on, AsyncRead, AsyncWrite}; - use std::{ - pin::Pin, - sync::{Arc, Mutex}, - task::Poll, - }; - - #[derive(Clone)] - struct DummyStream { - inner: Arc>, - } - - struct DummyStreamInner { - read: Vec, - write: Vec, - } - - impl DummyStream { - fn new(read: Vec) -> Self { - Self { - inner: Arc::new(Mutex::new(DummyStreamInner { - read, - write: Vec::new(), - })), - } - } - } - - impl Unpin for DummyStream {} - - impl AsyncWrite for DummyStream { - fn poll_write( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - buf: &[u8], - ) -> std::task::Poll> { - Pin::new(&mut self.inner.lock().unwrap().write).poll_write(cx, buf) - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner.lock().unwrap().write).poll_flush(cx) - } - - fn poll_close( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.inner.lock().unwrap().write).poll_close(cx) - } - } - - impl AsyncRead for DummyStream { - fn poll_read( - self: Pin<&mut Self>, - _cx: &mut std::task::Context<'_>, - buf: &mut [u8], - ) -> std::task::Poll> { - let amt = std::cmp::min(buf.len(), self.inner.lock().unwrap().read.len()); - let new = self.inner.lock().unwrap().read.split_off(amt); - - buf[..amt].copy_from_slice(self.inner.lock().unwrap().read.as_slice()); - - self.inner.lock().unwrap().read = new; - Poll::Ready(Ok(amt)) - } - } - - #[test] - fn test_client() { - let stream = DummyStream::new(vec![0]); - - block_on(send_receive( - client::RunParams { - to_send: 0, - to_receive: 0, - }, - stream.clone(), - )) - .unwrap(); - - assert_eq!( - stream.inner.lock().unwrap().write, - 0u64.to_be_bytes().to_vec() - ); - } -} diff --git a/protocols/perf/src/server.rs b/protocols/perf/src/server.rs index fd0643a0079..9671b43878b 100644 --- a/protocols/perf/src/server.rs +++ b/protocols/perf/src/server.rs @@ -21,28 +21,4 @@ mod behaviour; mod handler; -use instant::Instant; - pub use behaviour::{Behaviour, Event}; - -/// Parameters for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug, Clone, Copy)] -pub struct RunParams { - pub sent: usize, - pub received: usize, -} - -/// Timers for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug, Clone, Copy)] -pub struct RunTimers { - pub read_start: Instant, - pub read_done: Instant, - pub write_done: Instant, -} - -/// Statistics for a single run, i.e. one stream, sending and receiving data. -#[derive(Debug)] -pub struct RunStats { - pub params: RunParams, - pub timers: RunTimers, -} diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index 5d63475c999..da24d763606 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -27,18 +27,16 @@ use std::{ use libp2p_identity::PeerId; use libp2p_swarm::{ - ConnectionId, FromSwarm, NetworkBehaviour, PollParameters, THandlerInEvent, THandlerOutEvent, - ToSwarm, + ConnectionId, FromSwarm, NetworkBehaviour, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use crate::server::handler::Handler; - -use super::RunStats; +use crate::Run; #[derive(Debug)] pub struct Event { pub remote_peer_id: PeerId, - pub stats: RunStats, + pub stats: Run, } #[derive(Default)] @@ -55,7 +53,7 @@ impl Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -77,22 +75,7 @@ impl NetworkBehaviour for Behaviour { Ok(Handler::default()) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddr(_) => {} - FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -106,11 +89,8 @@ impl NetworkBehaviour for Behaviour { })) } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index 95e93dd171a..ddfe8f881e5 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -18,41 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::{ - task::{Context, Poll}, - time::{Duration, Instant}, -}; +use std::task::{Context, Poll}; -use futures::{future::BoxFuture, stream::FuturesUnordered, FutureExt, StreamExt}; +use futures::FutureExt; use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_swarm::{ handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }, - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, StreamProtocol, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use log::error; +use tracing::error; use void::Void; -use super::RunStats; +use crate::Run; #[derive(Debug)] pub struct Event { - pub stats: RunStats, + pub stats: Run, } pub struct Handler { - inbound: FuturesUnordered>>, - keep_alive: KeepAlive, + inbound: futures_bounded::FuturesSet>, } impl Handler { pub fn new() -> Self { Self { - inbound: Default::default(), - keep_alive: KeepAlive::Yes, + inbound: futures_bounded::FuturesSet::new( + crate::RUN_TIMEOUT, + crate::MAX_PARALLEL_RUNS_PER_CONNECTION, + ), } } } @@ -64,9 +61,8 @@ impl Default for Handler { } impl ConnectionHandler for Handler { - type InEvent = Void; - type OutEvent = Event; - type Error = Void; + type FromBehaviour = Void; + type ToBehaviour = Event; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; type OutboundOpenInfo = Void; @@ -76,7 +72,7 @@ impl ConnectionHandler for Handler { SubstreamProtocol::new(ReadyUpgrade::new(crate::PROTOCOL_NAME), ()) } - fn on_behaviour_event(&mut self, v: Self::InEvent) { + fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { void::unreachable(v) } @@ -94,8 +90,13 @@ impl ConnectionHandler for Handler { protocol, info: _, }) => { - self.inbound - .push(crate::protocol::receive_send(protocol).boxed()); + if self + .inbound + .try_push(crate::protocol::receive_send(protocol).boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity"); + } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { void::unreachable(info) @@ -104,56 +105,40 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(DialUpgradeError { info, .. }) => { void::unreachable(info) } - ConnectionEvent::AddressChange(_) => {} + ConnectionEvent::AddressChange(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { - match error { - ConnectionHandlerUpgrErr::Timeout => {} - ConnectionHandlerUpgrErr::Timer => {} - ConnectionHandlerUpgrErr::Upgrade(error) => match error { - libp2p_core::UpgradeError::Select(_) => {} - libp2p_core::UpgradeError::Apply(v) => void::unreachable(v), - }, - } + void::unreachable(error) } + _ => {} } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) { - match result { - Ok(stats) => return Poll::Ready(ConnectionHandlerEvent::Custom(Event { stats })), - Err(e) => { - error!("{e:?}") + loop { + match self.inbound.poll_unpin(cx) { + Poll::Ready(Ok(Ok(stats))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event { stats })) } - } - } - - if self.inbound.is_empty() { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + Poll::Ready(Ok(Err(e))) => { + error!("{e:?}"); + continue; } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + Poll::Ready(Err(e @ futures_bounded::Timeout { .. })) => { + error!("inbound perf request timed out: {e}"); + continue; + } + Poll::Pending => {} } - } else { - self.keep_alive = KeepAlive::Yes - } - Poll::Pending + return Poll::Pending; + } } } diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index 1d93ce3ee8d..017d475befd 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -19,24 +19,27 @@ // DEALINGS IN THE SOFTWARE. use libp2p_perf::{ - client::{self, RunParams}, - server, + client::{self}, + server, RunParams, }; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; -#[async_std::test] +#[tokio::test] async fn perf() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server = Swarm::new_ephemeral(|_| server::Behaviour::new()); let server_peer_id = *server.local_peer_id(); let mut client = Swarm::new_ephemeral(|_| client::Behaviour::new()); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; - async_std::task::spawn(server.loop_on_next()); + tokio::task::spawn(server.loop_on_next()); client .behaviour_mut() @@ -53,7 +56,7 @@ async fn perf() { .wait(|e| match e { SwarmEvent::IncomingConnection { .. } => panic!(), SwarmEvent::ConnectionEstablished { .. } => None, - SwarmEvent::Dialing(_) => None, + SwarmEvent::Dialing { .. } => None, SwarmEvent::Behaviour(client::Event { result, .. }) => Some(result), e => panic!("{e:?}"), }) diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index 06610c840ff..33e0139b996 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,9 +1,29 @@ -## 0.43.0 - unreleased +## 0.44.0 + + +## 0.43.1 + +- Honor ping interval in case of errors. + Previously, we would immediately open another ping stream if the current one failed. + See [PR 4423]. + +[PR 4423]: https://github.com/libp2p/rust-libp2p/pull/4423 + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove deprecated items. See [PR 3702]. + +- Don't close connections on ping failures. + To restore the previous behaviour, users should call `Swarm::close_connection` upon receiving a `ping::Event` with a `ping::Failure`. + This also removes the `max_failures` config option. + See [PR 3947]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3702]: https://github.com/libp2p/rust-libp2p/pull/3702 +[PR 3947]: https://github.com/libp2p/rust-libp2p/pull/3947 ## 0.42.0 diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index 573a1170e86..a4d9259e9aa 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-ping" edition = "2021" rust-version = { workspace = true } description = "Ping protocol for libp2p" -version = "0.43.0" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,23 +11,23 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -either = "1.8.0" -futures = "0.3.28" +either = "1.9.0" +futures = "0.3.30" futures-timer = "3.0.2" -instant = "0.1.11" +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.1" rand = "0.8" +tracing = "0.1.37" void = "1.0" [dev-dependencies] async-std = "1.6.2" -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros"] } -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -35,3 +35,6 @@ quickcheck = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 1f6ca00dec7..5e6fc2cd2cf 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -19,23 +19,21 @@ // DEALINGS IN THE SOFTWARE. use crate::{protocol, PROTOCOL_NAME}; -use futures::future::BoxFuture; +use futures::future::{BoxFuture, Either}; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_core::{upgrade::NegotiationError, UpgradeError}; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - NegotiatedSubstream, StreamProtocol, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, }; use std::collections::VecDeque; use std::{ error::Error, fmt, io, - num::NonZeroU32, task::{Context, Poll}, time::Duration, }; @@ -46,16 +44,8 @@ use void::Void; pub struct Config { /// The timeout of an outbound ping. timeout: Duration, - /// The duration between the last successful outbound or inbound ping - /// and the next outbound ping. + /// The duration between outbound pings. interval: Duration, - /// The maximum number of failed outbound pings before the associated - /// connection is deemed unhealthy, indicating to the `Swarm` that it - /// should be closed. - max_failures: NonZeroU32, - /// Whether the connection should generally be kept alive unless - /// `max_failures` occur. - keep_alive: bool, } impl Config { @@ -63,25 +53,16 @@ impl Config { /// /// * [`Config::with_interval`] 15s /// * [`Config::with_timeout`] 20s - /// * [`Config::with_max_failures`] 1 - /// * [`Config::with_keep_alive`] false /// /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. /// * Every ping sent must yield a response within 20 seconds in order to /// be successful. - /// * A single ping failure is sufficient for the connection to be subject - /// to being closed. - /// * The connection may be closed at any time as far as the ping protocol - /// is concerned, i.e. the ping protocol itself does not keep the - /// connection alive. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), interval: Duration::from_secs(15), - max_failures: NonZeroU32::new(1).expect("1 != 0"), - keep_alive: false, } } @@ -96,32 +77,6 @@ impl Config { self.interval = d; self } - - /// Sets the maximum number of consecutive ping failures upon which the remote - /// peer is considered unreachable and the connection closed. - pub fn with_max_failures(mut self, n: NonZeroU32) -> Self { - self.max_failures = n; - self - } - - /// Sets whether the ping protocol itself should keep the connection alive, - /// apart from the maximum allowed failures. - /// - /// By default, the ping protocol itself allows the connection to be closed - /// at any time, i.e. in the absence of ping failures the connection lifetime - /// is determined by other protocol handlers. - /// - /// If the maximum number of allowed ping failures is reached, the - /// connection is always terminated as a result of [`ConnectionHandler::poll`] - /// returning an error, regardless of the keep-alive setting. - #[deprecated( - since = "0.40.0", - note = "Use `libp2p::swarm::behaviour::KeepAlive` if you need to keep connections alive unconditionally." - )] - pub fn with_keep_alive(mut self, b: bool) -> Self { - self.keep_alive = b; - self - } } impl Default for Config { @@ -130,17 +85,6 @@ impl Default for Config { } } -/// The successful result of processing an inbound or outbound ping. -#[derive(Debug)] -pub enum Success { - /// Received a ping and sent back a pong. - Pong, - /// Sent a ping and received back a pong. - /// - /// Includes the round-trip time. - Ping { rtt: Duration }, -} - /// An outbound ping failure. #[derive(Debug)] pub enum Failure { @@ -155,6 +99,12 @@ pub enum Failure { }, } +impl Failure { + fn other(e: impl std::error::Error + Send + 'static) -> Self { + Self::Other { error: Box::new(e) } + } +} + impl fmt::Display for Failure { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -177,14 +127,11 @@ impl Error for Failure { /// Protocol handler that handles pinging the remote at a regular period /// and answering ping queries. -/// -/// If the remote doesn't respond, produces an error that closes the connection. pub struct Handler { /// Configuration options. config: Config, - /// The timer used for the delay to the next ping as well as - /// the ping timeout. - timer: Delay, + /// The timer used for the delay to the next ping. + interval: Delay, /// Outbound ping failures that are pending to be processed by `poll()`. pending_errors: VecDeque, /// The number of consecutive ping failures that occurred. @@ -219,7 +166,7 @@ impl Handler { pub fn new(config: Config) -> Self { Handler { config, - timer: Delay::new(Duration::new(0, 0)), + interval: Delay::new(Duration::new(0, 0)), pending_errors: VecDeque::with_capacity(2), failures: 0, outbound: None, @@ -238,15 +185,21 @@ impl Handler { self.outbound = None; // Request a new substream on the next `poll`. let error = match error { - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { + StreamUpgradeError::NegotiationFailed => { debug_assert_eq!(self.state, State::Active); self.state = State::Inactive { reported: false }; return; } // Note: This timeout only covers protocol negotiation. - ConnectionHandlerUpgrErr::Timeout => Failure::Timeout, - e => Failure::Other { error: Box::new(e) }, + StreamUpgradeError::Timeout => Failure::Other { + error: Box::new(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "ping protocol negotiation timed out", + )), + }, + StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::Io(e) => Failure::Other { error: Box::new(e) }, }; self.pending_errors.push_front(error); @@ -254,9 +207,8 @@ impl Handler { } impl ConnectionHandler for Handler { - type InEvent = Void; - type OutEvent = crate::Result; - type Error = Failure; + type FromBehaviour = Void; + type ToBehaviour = Result; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -268,18 +220,11 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, _: Void) {} - fn connection_keep_alive(&self) -> KeepAlive { - if self.config.keep_alive { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll, (), crate::Result, Self::Error>> + ) -> Poll, (), Result>> { match self.state { State::Inactive { reported: true } => { @@ -287,7 +232,9 @@ impl ConnectionHandler for Handler { } State::Inactive { reported: false } => { self.state = State::Inactive { reported: true }; - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(Failure::Unsupported))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err( + Failure::Unsupported, + ))); } State::Active => {} } @@ -297,13 +244,14 @@ impl ConnectionHandler for Handler { match fut.poll_unpin(cx) { Poll::Pending => {} Poll::Ready(Err(e)) => { - log::debug!("Inbound ping error: {:?}", e); + tracing::debug!("Inbound ping error: {:?}", e); self.inbound = None; } Poll::Ready(Ok(stream)) => { + tracing::trace!("answered inbound ping from peer"); + // A ping from a remote peer has been answered, wait for the next. self.inbound = Some(protocol::recv_ping(stream).boxed()); - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(Success::Pong))); } } } @@ -311,24 +259,17 @@ impl ConnectionHandler for Handler { loop { // Check for outbound ping failures. if let Some(error) = self.pending_errors.pop_back() { - log::debug!("Ping failure: {:?}", error); + tracing::debug!("Ping failure: {:?}", error); self.failures += 1; - // Note: For backward-compatibility, with configured - // `max_failures == 1`, the first failure is always "free" - // and silent. This allows peers who still use a new substream + // Note: For backward-compatibility the first failure is always "free" + // and silent. This allows peers who use a new substream // for each ping to have successful ping exchanges with peers // that use a single substream, since every successful ping - // resets `failures` to `0`, while at the same time emitting - // events only for `max_failures - 1` failures, as before. - if self.failures > 1 || self.config.max_failures.get() > 1 { - if self.failures >= self.config.max_failures.get() { - log::debug!("Too many failures ({}). Closing connection.", self.failures); - return Poll::Ready(ConnectionHandlerEvent::Close(error)); - } - - return Poll::Ready(ConnectionHandlerEvent::Custom(Err(error))); + // resets `failures` to `0`. + if self.failures > 1 { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(error))); } } @@ -336,49 +277,46 @@ impl ConnectionHandler for Handler { match self.outbound.take() { Some(OutboundState::Ping(mut ping)) => match ping.poll_unpin(cx) { Poll::Pending => { - if self.timer.poll_unpin(cx).is_ready() { - self.pending_errors.push_front(Failure::Timeout); - } else { - self.outbound = Some(OutboundState::Ping(ping)); - break; - } + self.outbound = Some(OutboundState::Ping(ping)); + break; } Poll::Ready(Ok((stream, rtt))) => { + tracing::debug!(?rtt, "ping succeeded"); self.failures = 0; - self.timer.reset(self.config.interval); + self.interval.reset(self.config.interval); self.outbound = Some(OutboundState::Idle(stream)); - return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(Success::Ping { - rtt, - }))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(rtt))); } Poll::Ready(Err(e)) => { - self.pending_errors - .push_front(Failure::Other { error: Box::new(e) }); + self.interval.reset(self.config.interval); + self.pending_errors.push_front(e); } }, - Some(OutboundState::Idle(stream)) => match self.timer.poll_unpin(cx) { + Some(OutboundState::Idle(stream)) => match self.interval.poll_unpin(cx) { Poll::Pending => { self.outbound = Some(OutboundState::Idle(stream)); break; } Poll::Ready(()) => { - self.timer.reset(self.config.timeout); - self.outbound = - Some(OutboundState::Ping(protocol::send_ping(stream).boxed())); + self.outbound = Some(OutboundState::Ping( + send_ping(stream, self.config.timeout).boxed(), + )); } }, Some(OutboundState::OpenStream) => { self.outbound = Some(OutboundState::OpenStream); break; } - None => { - self.outbound = Some(OutboundState::OpenStream); - let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()) - .with_timeout(self.config.timeout); - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol, - }); - } + None => match self.interval.poll_unpin(cx) { + Poll::Pending => break, + Poll::Ready(()) => { + self.outbound = Some(OutboundState::OpenStream); + let protocol = SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol, + }); + } + }, } } @@ -396,35 +334,50 @@ impl ConnectionHandler for Handler { ) { match event { ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol: stream, + protocol: mut stream, .. }) => { + stream.ignore_for_keep_alive(); self.inbound = Some(protocol::recv_ping(stream).boxed()); } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: stream, + protocol: mut stream, .. }) => { - self.timer.reset(self.config.timeout); - self.outbound = Some(OutboundState::Ping(protocol::send_ping(stream).boxed())); + stream.ignore_for_keep_alive(); + self.outbound = Some(OutboundState::Ping( + send_ping(stream, self.config.timeout).boxed(), + )); } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + _ => {} } } } -type PingFuture = BoxFuture<'static, Result<(NegotiatedSubstream, Duration), io::Error>>; -type PongFuture = BoxFuture<'static, Result>; +type PingFuture = BoxFuture<'static, Result<(Stream, Duration), Failure>>; +type PongFuture = BoxFuture<'static, Result>; /// The current state w.r.t. outbound pings. enum OutboundState { /// A new substream is being negotiated for the ping protocol. OpenStream, /// The substream is idle, waiting to send the next ping. - Idle(NegotiatedSubstream), + Idle(Stream), /// A ping is being sent and the response awaited. Ping(PingFuture), } + +/// A wrapper around [`protocol::send_ping`] that enforces a time out. +async fn send_ping(stream: Stream, timeout: Duration) -> Result<(Stream, Duration), Failure> { + let ping = protocol::send_ping(stream); + futures::pin_mut!(ping); + + match future::select(ping, Delay::new(timeout)).await { + Either::Left((Ok((stream, rtt)), _)) => Ok((stream, rtt)), + Either::Left((Err(e), _)) => Err(Failure::other(e)), + Either::Right(((), _)) => Err(Failure::Timeout), + } +} diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 23fe2ba600d..5eaa6d4952a 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -26,16 +26,21 @@ //! //! # Usage //! -//! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. When used with a [`Swarm`], -//! it will respond to inbound ping requests and as necessary periodically send outbound -//! ping requests on every established connection. If a configurable number of consecutive -//! pings fail, the connection will be closed. +//! The [`Behaviour`] struct implements the [`NetworkBehaviour`] trait. +//! It will respond to inbound ping requests and periodically send outbound ping requests on every established connection. //! -//! The [`Behaviour`] network behaviour produces [`Event`]s, which may be consumed from the [`Swarm`] -//! by an application, e.g. to collect statistics. +//! It is up to the user to implement a health-check / connection management policy based on the ping protocol. //! -//! > **Note**: The ping protocol does not keep otherwise idle connections alive -//! > by default, see [`Config::with_keep_alive`] for changing this behaviour. +//! For example: +//! +//! - Disconnect from peers with an RTT > 200ms +//! - Disconnect from peers which don't support the ping protocol +//! - Disconnect from peers upon the first ping failure +//! +//! Users should inspect emitted [`Event`]s and call APIs on [`Swarm`]: +//! +//! - [`Swarm::close_connection`](libp2p_swarm::Swarm::close_connection) to close a specific connection +//! - [`Swarm::disconnect_peer_id`](libp2p_swarm::Swarm::disconnect_peer_id) to close all connections to a peer //! //! [`Swarm`]: libp2p_swarm::Swarm //! [`Transport`]: libp2p_core::Transport @@ -46,40 +51,20 @@ mod handler; mod protocol; use handler::Handler; -pub use handler::{Config, Failure, Success}; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, - THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + behaviour::FromSwarm, ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; +use std::time::Duration; use std::{ collections::VecDeque, task::{Context, Poll}, }; -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Config instead.")] -pub type PingConfig = Config; - -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Event instead.")] -pub type PingEvent = Event; - -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Success instead.")] -pub type PingSuccess = Success; - -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Failure instead.")] -pub type PingFailure = Failure; - -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Result instead.")] -pub type PingResult = Result; - -#[deprecated(since = "0.39.1", note = "Use libp2p::ping::Behaviour instead.")] -pub type Ping = Behaviour; - pub use self::protocol::PROTOCOL_NAME; - -/// The result of an inbound or outbound ping. -pub type Result = std::result::Result; +pub use handler::{Config, Failure}; /// A [`NetworkBehaviour`] that responds to inbound pings and /// periodically sends outbound pings on every established connection. @@ -97,8 +82,10 @@ pub struct Behaviour { pub struct Event { /// The peer ID of the remote. pub peer: PeerId, + /// The connection the ping was executed on. + pub connection: ConnectionId, /// The result of an inbound or outbound ping. - pub result: Result, + pub result: Result, } impl Behaviour { @@ -119,7 +106,7 @@ impl Default for Behaviour { impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -127,7 +114,7 @@ impl NetworkBehaviour for Behaviour { _: PeerId, _: &Multiaddr, _: &Multiaddr, - ) -> std::result::Result, ConnectionDenied> { + ) -> Result, ConnectionDenied> { Ok(Handler::new(self.config.clone())) } @@ -137,56 +124,31 @@ impl NetworkBehaviour for Behaviour { _: PeerId, _: &Multiaddr, _: Endpoint, - ) -> std::result::Result, ConnectionDenied> { + ) -> Result, ConnectionDenied> { Ok(Handler::new(self.config.clone())) } fn on_connection_handler_event( &mut self, peer: PeerId, - _: ConnectionId, + connection: ConnectionId, result: THandlerOutEvent, ) { - self.events.push_front(Event { peer, result }) + self.events.push_front(Event { + peer, + connection, + result, + }) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(e) = self.events.pop_back() { - let Event { result, peer } = &e; - - match result { - Ok(Success::Ping { .. }) => log::debug!("Ping sent to {:?}", peer), - Ok(Success::Pong) => log::debug!("Ping received from {:?}", peer), - _ => {} - } - Poll::Ready(ToSwarm::GenerateEvent(e)) } else { Poll::Pending } } - fn on_swarm_event( - &mut self, - event: libp2p_swarm::behaviour::FromSwarm, - ) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } diff --git a/protocols/ping/src/protocol.rs b/protocols/ping/src/protocol.rs index 34f816522d9..28549e1c198 100644 --- a/protocols/ping/src/protocol.rs +++ b/protocols/ping/src/protocol.rs @@ -35,8 +35,7 @@ pub const PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/ipfs/ping/1.0.0" /// /// At most a single inbound and outbound substream is kept open at /// any time. In case of a ping timeout or another error on a substream, the -/// substream is dropped. If a configurable number of consecutive -/// outbound pings fail, the connection is closed. +/// substream is dropped. /// /// Successful pings report the round-trip time. /// @@ -88,7 +87,7 @@ mod tests { use futures::StreamExt; use libp2p_core::{ multiaddr::multiaddr, - transport::{memory::MemoryTransport, Transport}, + transport::{memory::MemoryTransport, ListenerId, Transport}, }; use rand::{thread_rng, Rng}; use std::time::Duration; @@ -97,7 +96,7 @@ mod tests { fn ping_pong() { let mem_addr = multiaddr![Memory(thread_rng().gen::())]; let mut transport = MemoryTransport::new().boxed(); - transport.listen_on(mem_addr).unwrap(); + transport.listen_on(ListenerId::next(), mem_addr).unwrap(); let listener_addr = transport .select_next_some() diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 4616b4fc81b..3ca469f16a8 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -20,10 +20,9 @@ //! Integration tests for the `Ping` network behaviour. -use futures::prelude::*; use libp2p_ping as ping; -use libp2p_swarm::keep_alive; -use libp2p_swarm::{NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::dummy; +use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use quickcheck::*; use std::{num::NonZeroU8, time::Duration}; @@ -33,18 +32,16 @@ fn ping_pong() { fn prop(count: NonZeroU8) { let cfg = ping::Config::new().with_interval(Duration::from_millis(10)); - let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); - let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); + let mut swarm1 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); + let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; for _ in 0..count.get() { - let (e1, e2) = match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { - ([BehaviourEvent::Ping(e1)], [BehaviourEvent::Ping(e2)]) => (e1, e2), - events => panic!("Unexpected events: {events:?}"), - }; + let ([e1], [e2]): ([ping::Event; 1], [ping::Event; 1]) = + libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await; assert_eq!(&e1.peer, swarm2.local_peer_id()); assert_eq!(&e2.peer, swarm1.local_peer_id()); @@ -59,82 +56,28 @@ fn ping_pong() { } fn assert_ping_rtt_less_than_50ms(e: ping::Event) { - let success = e.result.expect("a ping success"); + let rtt = e.result.expect("a ping success"); - if let ping::Success::Ping { rtt } = success { - assert!(rtt < Duration::from_millis(50)) - } -} - -/// Tests that the connection is closed upon a configurable -/// number of consecutive ping failures. -#[test] -fn max_failures() { - fn prop(max_failures: NonZeroU8) { - let cfg = ping::Config::new() - .with_interval(Duration::from_millis(10)) - .with_timeout(Duration::from_millis(0)) - .with_max_failures(max_failures.into()); - - let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); - let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(cfg.clone())); - - let (count1, count2) = async_std::task::block_on(async { - swarm1.listen().await; - swarm2.connect(&mut swarm1).await; - - future::join( - count_ping_failures_until_connection_closed(swarm1), - count_ping_failures_until_connection_closed(swarm2), - ) - .await - }); - - assert_eq!(u8::max(count1, count2), max_failures.get() - 1); - } - - QuickCheck::new().tests(10).quickcheck(prop as fn(_)) -} - -async fn count_ping_failures_until_connection_closed(mut swarm: Swarm) -> u8 { - let mut failure_count = 0; - - loop { - match swarm.next_swarm_event().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { - result: Ok(ping::Success::Ping { .. }), - .. - })) => { - failure_count = 0; // there may be an occasional success - } - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(_), .. })) => { - failure_count += 1; - } - SwarmEvent::ConnectionClosed { .. } => { - return failure_count; - } - _ => {} - } - } + assert!(rtt < Duration::from_millis(50)) } #[test] fn unsupported_doesnt_fail() { - let mut swarm1 = Swarm::new_ephemeral(|_| keep_alive::Behaviour); - let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(ping::Config::new())); + let mut swarm1 = Swarm::new_ephemeral(|_| dummy::Behaviour); + let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(ping::Config::new())); let result = async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); async_std::task::spawn(swarm1.loop_on_next()); loop { match swarm2.next_swarm_event().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + SwarmEvent::Behaviour(ping::Event { result: Err(ping::Failure::Unsupported), .. - })) => { + }) => { swarm2.disconnect_peer_id(swarm1_peer_id).unwrap(); } SwarmEvent::ConnectionClosed { cause: Some(e), .. } => { @@ -150,19 +93,3 @@ fn unsupported_doesnt_fail() { result.expect("node with ping should not fail connection due to unsupported protocol"); } - -#[derive(NetworkBehaviour, Default)] -#[behaviour(prelude = "libp2p_swarm::derive_prelude")] -struct Behaviour { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour, -} - -impl Behaviour { - fn new(config: ping::Config) -> Self { - Self { - keep_alive: keep_alive::Behaviour, - ping: ping::Behaviour::new(config), - } - } -} diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 4b3b9778439..aaade5e48f9 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,4 +1,50 @@ -## 0.16.0 - unreleased +## 0.17.1 + +- Automatically register relayed addresses as external addresses. + See [PR 4809](https://github.com/libp2p/rust-libp2p/pull/4809). +- Fix an error where performing too many reservations at once could lead to inconsistent internal state. + See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). + +## 0.17.0 +- Don't close connections on protocol failures within the relay-server. + To achieve this, error handling was restructured: + - `libp2p::relay::outbound::stop::FatalUpgradeError` has been removed. + - `libp2p::relay::outbound::stop::{Error, ProtocolViolation}` have been introduced. + - Several variants of `libp2p::relay::Event` have been deprecated. + + See [PR 4718](https://github.com/libp2p/rust-libp2p/pull/4718). +- Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. + See [PR 4747](https://github.com/libp2p/rust-libp2p/pull/4747). +- Propagate errors of relay client to the listener / dialer. + A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. + A failed circuit request will now appear as `SwarmEvent::OutgoingConnectionError` with the `ConnectionId` of the corresponding `Swarm::dial` call. + Lastly, a failed reservation or circuit request will **no longer** close the underlying relay connection. + As a result, we remove the following enum variants: + - `relay::client::Event::ReservationReqFailed` + - `relay::client::Event::OutboundCircuitReqFailed` + - `relay::client::Event::InboundCircuitReqDenied` + - `relay::client::Event::InboundCircuitReqDenyFailed` + + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). + +## 0.16.2 + +## 0.16.1 + +- Export `RateLimiter` type. + See [PR 3742]. + +- Add functions to access data within `Limit`. + See [PR 4162]. + +- Remove unconditional `async-std` dependency. + See [PR 4283]. + +[PR 3742]: https://github.com/libp2p/rust-libp2p/pull/3742 +[PR 4162]: https://github.com/libp2p/rust-libp2p/pull/4162 +[PR 4283]: https://github.com/libp2p/rust-libp2p/pull/4283 + +## 0.16.0 - Raise MSRV to 1.65. See [PR 3715]. @@ -6,8 +52,16 @@ - Hide internals of `Connection` and expose only `AsyncRead` and `AsyncWrite`. See [PR 3829]. +- Remove `Event::CircuitReqReceiveFailed` and `Event::InboundCircuitReqFailed` variants. + These variants are no longer constructed. + See [PR 3605]. + +- Remove deprecated items. See [PR 3948]. + +[PR 3605]: https://github.com/libp2p/rust-libp2p/pull/3605 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3829]: https://github.com/libp2p/rust-libp2p/pull/3829 +[PR 3948]: https://github.com/libp2p/rust-libp2p/pull/3948 ## 0.15.2 diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index f352358561b..94b9deb1a64 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-relay" edition = "2021" rust-version = { workspace = true } description = "Communications relaying for libp2p" -version = "0.16.0" +version = "0.17.1" authors = ["Parity Technologies ", "Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,30 +11,34 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } bytes = "1" -either = "1.6.0" -futures = "0.3.28" +either = "1.9.0" +futures = "0.3.30" futures-timer = "3" -instant = "0.1.11" +futures-bounded = { workspace = true } +instant = "0.1.12" libp2p-core = { workspace = true } -libp2p-swarm = { workspace = true, features = ["async-std"] } +libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8.4" static_assertions = "1" thiserror = "1.0" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } -libp2p-swarm = { workspace = true, features = ["macros"] } +libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } +libp2p-swarm-test = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -42,3 +46,6 @@ quickcheck = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 9f2852dd19e..df8443e8359 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -20,7 +20,7 @@ //! [`NetworkBehaviour`] to act as a circuit relay v2 **relay**. -mod handler; +pub(crate) mod handler; pub(crate) mod rate_limiter; use crate::behaviour::handler::Handler; use crate::multiaddr_ext::MultiaddrExt; @@ -33,16 +33,14 @@ use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandlerUpgrErr, ConnectionId, ExternalAddresses, - NetworkBehaviour, NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, HashSet, VecDeque}; use std::num::NonZeroU32; use std::ops::Add; use std::task::{Context, Poll}; use std::time::Duration; -use void::Void; /// Configuration for the relay [`Behaviour`]. /// @@ -62,6 +60,40 @@ pub struct Config { pub circuit_src_rate_limiters: Vec>, } +impl Config { + pub fn reservation_rate_per_peer(mut self, limit: NonZeroU32, interval: Duration) -> Self { + self.reservation_rate_limiters + .push(rate_limiter::new_per_peer( + rate_limiter::GenericRateLimiterConfig { limit, interval }, + )); + self + } + + pub fn circuit_src_per_peer(mut self, limit: NonZeroU32, interval: Duration) -> Self { + self.circuit_src_rate_limiters + .push(rate_limiter::new_per_peer( + rate_limiter::GenericRateLimiterConfig { limit, interval }, + )); + self + } + + pub fn reservation_rate_per_ip(mut self, limit: NonZeroU32, interval: Duration) -> Self { + self.reservation_rate_limiters + .push(rate_limiter::new_per_ip( + rate_limiter::GenericRateLimiterConfig { limit, interval }, + )); + self + } + + pub fn circuit_src_per_ip(mut self, limit: NonZeroU32, interval: Duration) -> Self { + self.circuit_src_rate_limiters + .push(rate_limiter::new_per_ip( + rate_limiter::GenericRateLimiterConfig { limit, interval }, + )); + self + } +} + impl std::fmt::Debug for Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Config") @@ -137,50 +169,61 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqAcceptFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation request has been denied. ReservationReqDenied { src_peer_id: PeerId }, /// Denying an inbound reservation request has failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqDenyFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation has timed out. ReservationTimedOut { src_peer_id: PeerId }, - CircuitReqReceiveFailed { - src_peer_id: PeerId, - error: ConnectionHandlerUpgrErr, - }, /// An inbound circuit request has been denied. CircuitReqDenied { src_peer_id: PeerId, dst_peer_id: PeerId, }, /// Denying an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqDenyFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, - /// An inbound cirucit request has been accepted. + /// An inbound circuit request has been accepted. CircuitReqAccepted { src_peer_id: PeerId, dst_peer_id: PeerId, }, - /// An outbound connect for an inbound cirucit request failed. + /// An outbound connect for an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqOutboundConnectFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: ConnectionHandlerUpgrErr, + error: outbound_stop::Error, }, /// Accepting an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqAcceptFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -201,7 +244,7 @@ pub struct Behaviour { circuits: CircuitsTracker, /// Queue of actions to return when polled. - queued_actions: VecDeque, + queued_actions: VecDeque>>, external_addresses: ExternalAddresses, } @@ -224,7 +267,7 @@ impl Behaviour { peer_id, connection_id, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if let hash_map::Entry::Occupied(mut peer) = self.reservations.entry(peer_id) { peer.get_mut().remove(&connection_id); @@ -240,21 +283,19 @@ impl Behaviour { // Only emit [`CircuitClosed`] for accepted requests. .filter(|c| matches!(c.status, CircuitStatus::Accepted)) { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitClosed { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitClosed { src_peer_id: circuit.src_peer_id, dst_peer_id: circuit.dst_peer_id, error: Some(std::io::ErrorKind::ConnectionAborted.into()), - }) - .into(), - ); + })); } } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Either; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -306,24 +347,11 @@ impl NetworkBehaviour for Behaviour { ))) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { self.external_addresses.on_swarm_event(&event); - match event { - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::DialFailure(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + if let FromSwarm::ConnectionClosed(connection_closed) = event { + self.on_connection_closed(connection_closed) } } @@ -384,7 +412,6 @@ impl NetworkBehaviour for Behaviour { status: proto::Status::RESOURCE_LIMIT_EXCEEDED, }), } - .into() } else { // Accept reservation. self.reservations @@ -392,10 +419,22 @@ impl NetworkBehaviour for Behaviour { .or_default() .insert(connection); - Action::AcceptReservationPrototype { + ToSwarm::NotifyHandler { handler: NotifyHandler::One(connection), peer_id: event_source, - inbound_reservation_req, + event: Either::Left(handler::In::AcceptReservationReq { + inbound_reservation_req, + addrs: self + .external_addresses + .iter() + .cloned() + // Add local peer ID in case it isn't present yet. + .filter_map(|a| match a.iter().last()? { + Protocol::P2p(_) => Some(a), + _ => Some(a.with(Protocol::P2p(self.local_peer_id))), + }) + .collect(), + }), } }; @@ -409,39 +448,37 @@ impl NetworkBehaviour for Behaviour { .or_default() .insert(connection); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqAccepted { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqAccepted { src_peer_id: event_source, renewed, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqAcceptFailed { error } => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqAcceptFailed { + #[allow(deprecated)] + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqAcceptFailed { src_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqDenied {} => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqDenied { + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqDenied { src_peer_id: event_source, - }) - .into(), - ); + }, + )); } handler::Event::ReservationReqDenyFailed { error } => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationReqDenyFailed { + #[allow(deprecated)] + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::ReservationReqDenyFailed { src_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::ReservationTimedOut {} => { match self.reservations.entry(event_source) { @@ -460,12 +497,10 @@ impl NetworkBehaviour for Behaviour { } } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::ReservationTimedOut { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::ReservationTimedOut { src_peer_id: event_source, - }) - .into(), - ); + })); } handler::Event::CircuitReqReceived { inbound_circuit_req, @@ -535,16 +570,7 @@ impl NetworkBehaviour for Behaviour { }), } }; - self.queued_actions.push_back(action.into()); - } - handler::Event::CircuitReqReceiveFailed { error } => { - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqReceiveFailed { - src_peer_id: event_source, - error, - }) - .into(), - ); + self.queued_actions.push_back(action); } handler::Event::CircuitReqDenied { circuit_id, @@ -554,13 +580,11 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqDenied { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitReqDenied { src_peer_id: event_source, dst_peer_id, - }) - .into(), - ); + })); } handler::Event::CircuitReqDenyFailed { circuit_id, @@ -571,39 +595,34 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqDenyFailed { + #[allow(deprecated)] + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqDenyFailed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + }, + )); } handler::Event::OutboundConnectNegotiated { circuit_id, src_peer_id, src_connection_id, inbound_circuit_req, - dst_handler_notifier, dst_stream, dst_pending_data, } => { - self.queued_actions.push_back( - ToSwarm::NotifyHandler { - handler: NotifyHandler::One(src_connection_id), - peer_id: src_peer_id, - event: Either::Left(handler::In::AcceptAndDriveCircuit { - circuit_id, - dst_peer_id: event_source, - inbound_circuit_req, - dst_handler_notifier, - dst_stream, - dst_pending_data, - }), - } - .into(), - ); + self.queued_actions.push_back(ToSwarm::NotifyHandler { + handler: NotifyHandler::One(src_connection_id), + peer_id: src_peer_id, + event: Either::Left(handler::In::AcceptAndDriveCircuit { + circuit_id, + dst_peer_id: event_source, + inbound_circuit_req, + dst_stream, + dst_pending_data, + }), + }); } handler::Event::OutboundConnectNegotiationFailed { circuit_id, @@ -613,39 +632,34 @@ impl NetworkBehaviour for Behaviour { status, error, } => { - self.queued_actions.push_back( - ToSwarm::NotifyHandler { - handler: NotifyHandler::One(src_connection_id), - peer_id: src_peer_id, - event: Either::Left(handler::In::DenyCircuitReq { - circuit_id: Some(circuit_id), - inbound_circuit_req, - status, - }), - } - .into(), - ); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqOutboundConnectFailed { + self.queued_actions.push_back(ToSwarm::NotifyHandler { + handler: NotifyHandler::One(src_connection_id), + peer_id: src_peer_id, + event: Either::Left(handler::In::DenyCircuitReq { + circuit_id: Some(circuit_id), + inbound_circuit_req, + status, + }), + }); + #[allow(deprecated)] + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqOutboundConnectFailed { src_peer_id, dst_peer_id: event_source, error, - }) - .into(), - ); + }, + )); } handler::Event::CircuitReqAccepted { dst_peer_id, circuit_id, } => { self.circuits.accepted(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqAccepted { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitReqAccepted { src_peer_id: event_source, dst_peer_id, - }) - .into(), - ); + })); } handler::Event::CircuitReqAcceptFailed { dst_peer_id, @@ -653,14 +667,14 @@ impl NetworkBehaviour for Behaviour { error, } => { self.circuits.remove(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitReqAcceptFailed { + #[allow(deprecated)] + self.queued_actions.push_back(ToSwarm::GenerateEvent( + Event::CircuitReqAcceptFailed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + }, + )); } handler::Event::CircuitClosed { dst_peer_id, @@ -669,25 +683,20 @@ impl NetworkBehaviour for Behaviour { } => { self.circuits.remove(circuit_id); - self.queued_actions.push_back( - ToSwarm::GenerateEvent(Event::CircuitClosed { + self.queued_actions + .push_back(ToSwarm::GenerateEvent(Event::CircuitClosed { src_peer_id: event_source, dst_peer_id, error, - }) - .into(), - ); + })); } } } - fn poll( - &mut self, - _cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { - if let Some(action) = self.queued_actions.pop_front() { - return Poll::Ready(action.build(self.local_peer_id, &self.external_addresses)); + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + if let Some(to_swarm) = self.queued_actions.pop_front() { + return Poll::Ready(to_swarm); } Poll::Pending @@ -783,53 +792,3 @@ impl Add for CircuitId { CircuitId(self.0 + rhs) } } - -/// A [`ToSwarm`], either complete, or still requiring data from [`PollParameters`] -/// before being returned in [`Behaviour::poll`]. -#[allow(clippy::large_enum_variant)] -enum Action { - Done(ToSwarm>), - AcceptReservationPrototype { - inbound_reservation_req: inbound_hop::ReservationReq, - handler: NotifyHandler, - peer_id: PeerId, - }, -} - -impl From>> for Action { - fn from(action: ToSwarm>) -> Self { - Self::Done(action) - } -} - -impl Action { - fn build( - self, - local_peer_id: PeerId, - external_addresses: &ExternalAddresses, - ) -> ToSwarm> { - match self { - Action::Done(action) => action, - Action::AcceptReservationPrototype { - inbound_reservation_req, - handler, - peer_id, - } => ToSwarm::NotifyHandler { - handler, - peer_id, - event: Either::Left(handler::In::AcceptReservationReq { - inbound_reservation_req, - addrs: external_addresses - .iter() - .cloned() - // Add local peer ID in case it isn't present yet. - .filter_map(|a| match a.iter().last()? { - Protocol::P2p(_) => Some(a), - _ => Some(a.with(Protocol::P2p(local_peer_id.into()))), - }) - .collect(), - }), - }, - } - } -} diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 29b5c4b9dbd..958c6a9b906 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -20,30 +20,32 @@ use crate::behaviour::CircuitId; use crate::copy_future::CopyFuture; -use crate::proto; use crate::protocol::{inbound_hop, outbound_stop}; +use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; use bytes::Bytes; use either::Either; -use futures::channel::oneshot::{self, Canceled}; use futures::future::{BoxFuture, FutureExt, TryFutureExt}; use futures::io::AsyncWriteExt; use futures::stream::{FuturesUnordered, StreamExt}; use futures_timer::Delay; use instant::Instant; -use libp2p_core::{upgrade, ConnectedPoint, Multiaddr}; +use libp2p_core::upgrade::ReadyUpgrade; +use libp2p_core::{ConnectedPoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, ConnectionId, KeepAlive, - NegotiatedSubstream, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, + StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::fmt; +use std::collections::{HashMap, VecDeque}; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; + +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); #[derive(Debug, Clone)] pub struct Config { @@ -76,8 +78,7 @@ pub enum In { circuit_id: CircuitId, dst_peer_id: PeerId, inbound_circuit_req: inbound_hop::CircuitReq, - dst_handler_notifier: oneshot::Sender<()>, - dst_stream: NegotiatedSubstream, + dst_stream: Stream, dst_pending_data: Bytes, }, } @@ -123,7 +124,6 @@ impl fmt::Debug for In { circuit_id, inbound_circuit_req: _, dst_peer_id, - dst_handler_notifier: _, dst_stream: _, dst_pending_data: _, } => f @@ -151,11 +151,11 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. - ReservationReqAcceptFailed { error: inbound_hop::UpgradeError }, + ReservationReqAcceptFailed { error: inbound_hop::Error }, /// An inbound reservation request has been denied. ReservationReqDenied {}, /// Denying an inbound reservation request has failed. - ReservationReqDenyFailed { error: inbound_hop::UpgradeError }, + ReservationReqDenyFailed { error: inbound_hop::Error }, /// An inbound reservation has timed out. ReservationTimedOut {}, /// An inbound circuit request has been received. @@ -163,10 +163,6 @@ pub enum Event { inbound_circuit_req: inbound_hop::CircuitReq, endpoint: ConnectedPoint, }, - /// Receiving an inbound circuit request failed. - CircuitReqReceiveFailed { - error: ConnectionHandlerUpgrErr, - }, /// An inbound circuit request has been denied. CircuitReqDenied { circuit_id: Option, @@ -176,9 +172,9 @@ pub enum Event { CircuitReqDenyFailed { circuit_id: Option, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, - /// An inbound cirucit request has been accepted. + /// An inbound circuit request has been accepted. CircuitReqAccepted { circuit_id: CircuitId, dst_peer_id: PeerId, @@ -187,7 +183,7 @@ pub enum Event { CircuitReqAcceptFailed { circuit_id: CircuitId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An outbound substream for an inbound circuit request has been /// negotiated. @@ -196,8 +192,7 @@ pub enum Event { src_peer_id: PeerId, src_connection_id: ConnectionId, inbound_circuit_req: inbound_hop::CircuitReq, - dst_handler_notifier: oneshot::Sender<()>, - dst_stream: NegotiatedSubstream, + dst_stream: Stream, dst_pending_data: Bytes, }, /// Negotiating an outbound substream for an inbound circuit request failed. @@ -207,7 +202,7 @@ pub enum Event { src_connection_id: ConnectionId, inbound_circuit_req: inbound_hop::CircuitReq, status: proto::Status, - error: ConnectionHandlerUpgrErr, + error: outbound_stop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -252,10 +247,6 @@ impl fmt::Debug for Event { .debug_struct("Event::CircuitReqReceived") .field("endpoint", endpoint) .finish(), - Event::CircuitReqReceiveFailed { error } => f - .debug_struct("Event::CircuitReqReceiveFailed") - .field("error", error) - .finish(), Event::CircuitReqDenied { circuit_id, dst_peer_id, @@ -297,7 +288,6 @@ impl fmt::Debug for Event { src_peer_id, src_connection_id, inbound_circuit_req: _, - dst_handler_notifier: _, dst_stream: _, dst_pending_data: _, } => f @@ -348,20 +338,12 @@ pub struct Handler { ConnectionHandlerEvent< ::OutboundProtocol, ::OutboundOpenInfo, - ::OutEvent, - ::Error, - >, - >, - - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - ConnectionHandlerUpgrErr< - Either, + ::ToBehaviour, >, >, - /// Until when to keep the connection alive. - keep_alive: KeepAlive, + /// The point in time when this connection started idleing. + idle_at: Option, /// Future handling inbound reservation request. reservation_request_future: Option, @@ -369,257 +351,146 @@ pub struct Handler { active_reservation: Option, /// Futures accepting an inbound circuit request. - circuit_accept_futures: - Futures>, - /// Futures deying an inbound circuit request. - circuit_deny_futures: Futures<( - Option, - PeerId, - Result<(), inbound_hop::UpgradeError>, - )>, - /// Tracks substreams lend out to other [`Handler`]s. - /// - /// Contains a [`futures::future::Future`] for each lend out substream that - /// resolves once the substream is dropped. - /// - /// Once all substreams are dropped and this handler has no other work, - /// [`KeepAlive::Until`] can be set, allowing the connection to be closed - /// eventually. - alive_lend_out_substreams: FuturesUnordered>, + circuit_accept_futures: Futures>, + /// Futures denying an inbound circuit request. + circuit_deny_futures: Futures<(Option, PeerId, Result<(), inbound_hop::Error>)>, /// Futures relaying data for circuit between two peers. circuits: Futures<(CircuitId, PeerId, Result<(), std::io::Error>)>, + + /// We issue a stream upgrade for each [`PendingConnect`] request. + pending_connect_requests: VecDeque, + + /// A `CONNECT` request is in flight for these circuits. + active_connect_requests: HashMap, + + inbound_workers: futures_bounded::FuturesSet< + Result, inbound_hop::Error>, + >, + outbound_workers: futures_bounded::FuturesMap< + CircuitId, + Result, + >, } impl Handler { pub fn new(config: Config, endpoint: ConnectedPoint) -> Handler { Handler { + inbound_workers: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + outbound_workers: futures_bounded::FuturesMap::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), endpoint, config, queued_events: Default::default(), - pending_error: Default::default(), + idle_at: None, reservation_request_future: Default::default(), circuit_accept_futures: Default::default(), circuit_deny_futures: Default::default(), - alive_lend_out_substreams: Default::default(), circuits: Default::default(), active_reservation: Default::default(), - keep_alive: KeepAlive::Yes, + pending_connect_requests: Default::default(), + active_connect_requests: Default::default(), } } - fn on_fully_negotiated_inbound( - &mut self, - FullyNegotiatedInbound { - protocol: request, .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, - ) { - match request { - inbound_hop::Req::Reserve(inbound_reservation_req) => { - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::ReservationReqReceived { - inbound_reservation_req, - endpoint: self.endpoint.clone(), - renewed: self.active_reservation.is_some(), - }, - )); - } - inbound_hop::Req::Connect(inbound_circuit_req) => { - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::CircuitReqReceived { - inbound_circuit_req, - endpoint: self.endpoint.clone(), - }, - )); - } + fn on_fully_negotiated_inbound(&mut self, stream: Stream) { + if self + .inbound_workers + .try_push(inbound_hop::handle_inbound_request( + stream, + self.config.reservation_duration, + self.config.max_circuit_duration, + self.config.max_circuit_bytes, + )) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") } } - fn on_fully_negotiated_outbound( - &mut self, - FullyNegotiatedOutbound { - protocol: (dst_stream, dst_pending_data), - info: outbound_open_info, - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, - ) { - let OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - } = outbound_open_info; - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundConnectNegotiated { - circuit_id, - src_peer_id, - src_connection_id, - inbound_circuit_req, - dst_handler_notifier: tx, - dst_stream, - dst_pending_data, - }, - )); - } - - fn on_listen_upgrade_error( - &mut self, - ListenUpgradeError { error, .. }: ListenUpgradeError< - ::InboundOpenInfo, - ::InboundProtocol, - >, - ) { - let non_fatal_error = match error { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )) => ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )), - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select(upgrade::NegotiationError::ProtocolError(e)), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply( - inbound_hop::UpgradeError::Fatal(error), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Apply(Either::Left(error)), - )); - return; - } - }; + fn on_fully_negotiated_outbound(&mut self, stream: Stream) { + let connect = self + .pending_connect_requests + .pop_front() + .expect("opened a stream without a pending stop command"); + + if self + .outbound_workers + .try_push( + connect.circuit_id, + outbound_stop::connect( + stream, + connect.src_peer_id, + connect.max_circuit_duration, + connect.max_circuit_bytes, + ), + ) + .is_err() + { + tracing::warn!("Dropping outbound stream because we are at capacity") + } - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::CircuitReqReceiveFailed { - error: non_fatal_error, - }, - )); + self.active_connect_requests + .insert(connect.circuit_id, connect); } fn on_dial_upgrade_error( &mut self, - DialUpgradeError { - info: open_info, - error, - }: DialUpgradeError< + DialUpgradeError { error, .. }: DialUpgradeError< ::OutboundOpenInfo, ::OutboundProtocol, >, ) { - let (non_fatal_error, status) = match error { - ConnectionHandlerUpgrErr::Timeout => ( - ConnectionHandlerUpgrErr::Timeout, - proto::Status::CONNECTION_FAILED, - ), - ConnectionHandlerUpgrErr::Timer => ( - ConnectionHandlerUpgrErr::Timer, - proto::Status::CONNECTION_FAILED, - ), - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )) => { - // The remote has previously done a reservation. Doing a reservation but not - // supporting the stop protocol is pointless, thus disconnecting. - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select(upgrade::NegotiationError::Failed), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select(upgrade::NegotiationError::ProtocolError(e)), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply(error)) => match error { - outbound_stop::UpgradeError::Fatal(error) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Apply(Either::Right(error)), - )); - return; - } - outbound_stop::UpgradeError::CircuitFailed(error) => { - let status = match error { - outbound_stop::CircuitFailedReason::ResourceLimitExceeded => { - proto::Status::RESOURCE_LIMIT_EXCEEDED - } - outbound_stop::CircuitFailedReason::PermissionDenied => { - proto::Status::PERMISSION_DENIED - } - }; - ( - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply(error)), - status, - ) - } - }, + let error = match error { + StreamUpgradeError::Timeout => outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + StreamUpgradeError::NegotiationFailed => outbound_stop::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound_stop::Error::Io(e), + StreamUpgradeError::Apply(v) => void::unreachable(v), }; - let OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - } = open_info; - - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundConnectNegotiationFailed { - circuit_id, - src_peer_id, - src_connection_id, - inbound_circuit_req, - status, - error: non_fatal_error, - }, - )); + let stop_command = self + .pending_connect_requests + .pop_front() + .expect("failed to open a stream without a pending stop command"); + + self.queued_events + .push_back(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: stop_command.circuit_id, + src_peer_id: stop_command.src_peer_id, + src_connection_id: stop_command.src_connection_id, + inbound_circuit_req: stop_command.inbound_circuit_req, + status: proto::Status::CONNECTION_FAILED, + error, + }, + )); } } enum ReservationRequestFuture { - Accepting(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), - Denying(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), + Accepting(BoxFuture<'static, Result<(), inbound_hop::Error>>), + Denying(BoxFuture<'static, Result<(), inbound_hop::Error>>), } type Futures = FuturesUnordered>; impl ConnectionHandler for Handler { - type InEvent = In; - type OutEvent = Event; - type Error = ConnectionHandlerUpgrErr< - Either, - >; - type InboundProtocol = inbound_hop::Upgrade; - type OutboundProtocol = outbound_stop::Upgrade; - type OutboundOpenInfo = OutboundOpenInfo; + type FromBehaviour = In; + type ToBehaviour = Event; + type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); + type OutboundProtocol = ReadyUpgrade; + type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new( - inbound_hop::Upgrade { - reservation_duration: self.config.reservation_duration, - max_circuit_duration: self.config.max_circuit_duration, - max_circuit_bytes: self.config.max_circuit_bytes, - }, - (), - ) + SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()) } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::AcceptReservationReq { inbound_reservation_req, @@ -628,11 +499,11 @@ impl ConnectionHandler for Handler { if self .reservation_request_future .replace(ReservationRequestFuture::Accepting( - inbound_reservation_req.accept(addrs).boxed(), + inbound_reservation_req.accept(addrs).err_into().boxed(), )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::DenyReservationReq { @@ -642,11 +513,11 @@ impl ConnectionHandler for Handler { if self .reservation_request_future .replace(ReservationRequestFuture::Denying( - inbound_reservation_req.deny(status).boxed(), + inbound_reservation_req.deny(status).err_into().boxed(), )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::NegotiateOutboundConnect { @@ -655,21 +526,16 @@ impl ConnectionHandler for Handler { src_peer_id, src_connection_id, } => { + self.pending_connect_requests.push_back(PendingConnect::new( + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + &self.config, + )); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_stop::Upgrade { - src_peer_id, - max_circuit_duration: self.config.max_circuit_duration, - max_circuit_bytes: self.config.max_circuit_bytes, - }, - OutboundOpenInfo { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - }, - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()), }); } In::DenyCircuitReq { @@ -681,6 +547,7 @@ impl ConnectionHandler for Handler { self.circuit_deny_futures.push( inbound_circuit_req .deny(status) + .err_into() .map(move |result| (circuit_id, dst_peer_id, result)) .boxed(), ); @@ -689,19 +556,18 @@ impl ConnectionHandler for Handler { circuit_id, dst_peer_id, inbound_circuit_req, - dst_handler_notifier, dst_stream, dst_pending_data, } => { self.circuit_accept_futures.push( inbound_circuit_req .accept() + .err_into() .map_ok(move |(src_stream, src_pending_data)| CircuitParts { circuit_id, src_stream, src_pending_data, dst_peer_id, - dst_handler_notifier, dst_stream, dst_pending_data, }) @@ -712,27 +578,21 @@ impl ConnectionHandler for Handler { } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn connection_keep_alive(&self) -> bool { + let Some(idle_at) = self.idle_at else { + return true; + }; + + Instant::now().duration_since(idle_at) <= Duration::from_secs(10) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); @@ -744,35 +604,131 @@ impl ConnectionHandler for Handler { { match result { Ok(()) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(Event::CircuitClosed { - circuit_id, - dst_peer_id, - error: None, - })) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitClosed { + circuit_id, + dst_peer_id, + error: None, + }, + )) } Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(Event::CircuitClosed { - circuit_id, - dst_peer_id, - error: Some(e), - })) + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitClosed { + circuit_id, + dst_peer_id, + error: Some(e), + }, + )) } } } + // Process inbound protocol workers + loop { + match self.inbound_workers.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Either::Left(inbound_reservation_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::ReservationReqReceived { + inbound_reservation_req, + endpoint: self.endpoint.clone(), + renewed: self.active_reservation.is_some(), + }, + )); + } + Poll::Ready(Ok(Ok(Either::Right(inbound_circuit_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitReqReceived { + inbound_circuit_req, + endpoint: self.endpoint.clone(), + }, + )); + } + Poll::Ready(Err(e)) => { + tracing::debug!("Inbound stream operation timed out: {e}"); + continue; + } + Poll::Ready(Ok(Err(e))) => { + tracing::debug!("Inbound stream operation failed: {e}"); + continue; + } + Poll::Pending => { + break; + } + } + } + + // Process outbound protocol workers + match self.outbound_workers.poll_unpin(cx) { + Poll::Ready((id, Ok(Ok(circuit)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiated { + circuit_id: id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + dst_stream: circuit.dst_stream, + dst_pending_data: circuit.dst_pending_data, + }, + )); + } + Poll::Ready((id, Ok(Err(error)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: error.to_status(), + error, + }, + )); + } + Poll::Ready((id, Err(futures_bounded::Timeout { .. }))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: proto::Status::CONNECTION_FAILED, // Best fit? + error: outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )); + } + Poll::Pending => {} + } + // Deny new circuits. if let Poll::Ready(Some((circuit_id, dst_peer_id, result))) = self.circuit_deny_futures.poll_next_unpin(cx) { match result { Ok(()) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(Event::CircuitReqDenied { - circuit_id, - dst_peer_id, - })); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitReqDenied { + circuit_id, + dst_peer_id, + }, + )); } Err(error) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::CircuitReqDenyFailed { circuit_id, dst_peer_id, @@ -792,7 +748,6 @@ impl ConnectionHandler for Handler { mut src_stream, src_pending_data, dst_peer_id, - dst_handler_notifier, mut dst_stream, dst_pending_data, } = parts; @@ -816,8 +771,6 @@ impl ConnectionHandler for Handler { ) .await?; - // Inform destination handler that the stream to the destination is dropped. - drop(dst_handler_notifier); Ok(()) } .map(move |r| (circuit_id, dst_peer_id, r)) @@ -825,7 +778,7 @@ impl ConnectionHandler for Handler { self.circuits.push(circuit); - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::CircuitReqAccepted { circuit_id, dst_peer_id, @@ -833,7 +786,7 @@ impl ConnectionHandler for Handler { )); } Err((circuit_id, dst_peer_id, error)) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::CircuitReqAcceptFailed { circuit_id, dst_peer_id, @@ -851,7 +804,7 @@ impl ConnectionHandler for Handler { .map(|fut| fut.poll_unpin(cx)) { self.active_reservation = None; - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationTimedOut {}, )); } @@ -868,12 +821,12 @@ impl ConnectionHandler for Handler { .active_reservation .replace(Delay::new(self.config.reservation_duration)) .is_some(); - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationReqAccepted { renewed }, )); } Err(error) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationReqAcceptFailed { error }, )); } @@ -886,12 +839,12 @@ impl ConnectionHandler for Handler { match result { Ok(()) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationReqDenied {}, )) } Err(error) => { - return Poll::Ready(ConnectionHandlerEvent::Custom( + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::ReservationReqDenyFailed { error }, )); } @@ -901,28 +854,13 @@ impl ConnectionHandler for Handler { None => {} } - // Check lend out substreams. - while let Poll::Ready(Some(Err(Canceled))) = - self.alive_lend_out_substreams.poll_next_unpin(cx) - {} - // Check keep alive status. - if self.reservation_request_future.is_none() - && self.circuit_accept_futures.is_empty() - && self.circuit_deny_futures.is_empty() - && self.alive_lend_out_substreams.is_empty() - && self.circuits.is_empty() - && self.active_reservation.is_none() - { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); - } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + if self.active_reservation.is_none() { + if self.idle_at.is_none() { + self.idle_at = Some(Instant::now()); } } else { - self.keep_alive = KeepAlive::Yes; + self.idle_at = None; } Poll::Pending @@ -938,36 +876,60 @@ impl ConnectionHandler for Handler { >, ) { match event { - ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { - self.on_fully_negotiated_inbound(fully_negotiated_inbound) - } - ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { - self.on_fully_negotiated_outbound(fully_negotiated_outbound) + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: stream, + .. + }) => { + self.on_fully_negotiated_inbound(stream); } - ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - self.on_listen_upgrade_error(listen_upgrade_error) + ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { + protocol: stream, + .. + }) => { + self.on_fully_negotiated_outbound(stream); } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + self.on_dial_upgrade_error(dial_upgrade_error); } - ConnectionEvent::AddressChange(_) => {} + _ => {} } } } -pub struct OutboundOpenInfo { +struct CircuitParts { + circuit_id: CircuitId, + src_stream: Stream, + src_pending_data: Bytes, + dst_peer_id: PeerId, + dst_stream: Stream, + dst_pending_data: Bytes, +} + +/// Holds everything we know about a to-be-issued `CONNECT` request to a peer. +struct PendingConnect { circuit_id: CircuitId, inbound_circuit_req: inbound_hop::CircuitReq, src_peer_id: PeerId, src_connection_id: ConnectionId, + max_circuit_duration: Duration, + max_circuit_bytes: u64, } -pub(crate) struct CircuitParts { - circuit_id: CircuitId, - src_stream: NegotiatedSubstream, - src_pending_data: Bytes, - dst_peer_id: PeerId, - dst_handler_notifier: oneshot::Sender<()>, - dst_stream: NegotiatedSubstream, - dst_pending_data: Bytes, +impl PendingConnect { + fn new( + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + config: &Config, + ) -> Self { + Self { + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + max_circuit_duration: config.max_circuit_duration, + max_circuit_bytes: config.max_circuit_bytes, + } + } } diff --git a/protocols/relay/src/behaviour/rate_limiter.rs b/protocols/relay/src/behaviour/rate_limiter.rs index 31223c309f2..a4a127e1253 100644 --- a/protocols/relay/src/behaviour/rate_limiter.rs +++ b/protocols/relay/src/behaviour/rate_limiter.rs @@ -30,10 +30,10 @@ use std::time::Duration; /// Allows rate limiting access to some resource based on the [`PeerId`] and /// [`Multiaddr`] of a remote peer. -/// -/// See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use -/// [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system -/// number of a peers IP address. +// +// See [`new_per_peer`] and [`new_per_ip`] for precast implementations. Use +// [`GenericRateLimiter`] to build your own, e.g. based on the autonomous system +// number of a peers IP address. pub trait RateLimiter: Send { fn try_next(&mut self, peer: PeerId, addr: &Multiaddr, now: Instant) -> bool; } @@ -80,9 +80,9 @@ pub(crate) struct GenericRateLimiter { /// Configuration for a [`GenericRateLimiter`]. #[derive(Debug, Clone, Copy)] pub(crate) struct GenericRateLimiterConfig { - /// The maximum number of tokens in the bucket at any point in time. + // The maximum number of tokens in the bucket at any point in time. pub(crate) limit: NonZeroU32, - /// The interval at which a single token is added to the bucket. + // The interval at which a single token is added to the bucket. pub(crate) interval: Duration, } diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index 8421026d984..eca3578d599 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -28,7 +28,6 @@ mod copy_future; mod multiaddr_ext; mod priv_client; mod protocol; -pub mod v2; mod proto { #![allow(unreachable_pub)] @@ -40,26 +39,26 @@ mod proto { }; } -pub use behaviour::{Behaviour, CircuitId, Config, Event}; +pub use behaviour::{rate_limiter::RateLimiter, Behaviour, CircuitId, Config, Event}; pub use protocol::{HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; /// Types related to the relay protocol inbound. pub mod inbound { pub mod hop { - pub use crate::protocol::inbound_hop::FatalUpgradeError; - } - pub mod stop { - pub use crate::protocol::inbound_stop::FatalUpgradeError; + #[deprecated(note = "Renamed to `Error`.")] + pub type FatalUpgradeError = Error; + + pub use crate::protocol::inbound_hop::Error; } } /// Types related to the relay protocol outbound. pub mod outbound { pub mod hop { - pub use crate::protocol::outbound_hop::FatalUpgradeError; + pub use crate::protocol::outbound_hop::{ConnectError, ProtocolViolation, ReserveError}; } pub mod stop { - pub use crate::protocol::outbound_stop::FatalUpgradeError; + pub use crate::protocol::outbound_stop::{Error, ProtocolViolation}; } } diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index 9f250adf8d8..e414852ef81 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -20,28 +20,27 @@ //! [`NetworkBehaviour`] to act as a circuit relay v2 **client**. -mod handler; +pub(crate) mod handler; pub(crate) mod transport; use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop, outbound_hop}; +use crate::protocol::{self, inbound_stop}; use bytes::Bytes; use either::Either; use futures::channel::mpsc::Receiver; -use futures::channel::oneshot; use futures::future::{BoxFuture, FutureExt}; use futures::io::{AsyncRead, AsyncWrite}; use futures::ready; use futures::stream::StreamExt; +use libp2p_core::multiaddr::Protocol; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm}; use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ - dummy, ConnectionDenied, ConnectionHandler, ConnectionHandlerUpgrErr, ConnectionId, - DialFailure, NegotiatedSubstream, NetworkBehaviour, NotifyHandler, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, + NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, VecDeque}; use std::io::{Error, ErrorKind, IoSlice}; @@ -60,36 +59,21 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - relay_peer_id: PeerId, - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: ConnectionHandlerUpgrErr, - }, OutboundCircuitEstablished { relay_peer_id: PeerId, limit: Option, }, - OutboundCircuitReqFailed { - relay_peer_id: PeerId, - error: ConnectionHandlerUpgrErr, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - InboundCircuitReqFailed { - relay_peer_id: PeerId, - error: ConnectionHandlerUpgrErr, - }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum ReservationStatus { + Pending, + Confirmed, } /// [`NetworkBehaviour`] implementation of the relay client @@ -102,6 +86,11 @@ pub struct Behaviour { /// connection. directly_connected_peers: HashMap>, + /// Stores the address of a pending or confirmed reservation. + /// + /// This is indexed by the [`ConnectionId`] to a relay server and the address is the `/p2p-circuit` address we reserved on it. + reservation_addresses: HashMap, + /// Queue of actions to return when polled. queued_actions: VecDeque>>, @@ -115,6 +104,7 @@ pub fn new(local_peer_id: PeerId) -> (Transport, Behaviour) { local_peer_id, from_transport, directly_connected_peers: Default::default(), + reservation_addresses: Default::default(), queued_actions: Default::default(), pending_handler_commands: Default::default(), }; @@ -122,11 +112,6 @@ pub fn new(local_peer_id: PeerId) -> (Transport, Behaviour) { } impl Behaviour { - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::client::new instead.")] - pub fn new_transport_and_behaviour(local_peer_id: PeerId) -> (transport::Transport, Self) { - new(local_peer_id) - } - fn on_connection_closed( &mut self, ConnectionClosed { @@ -134,7 +119,7 @@ impl Behaviour { connection_id, endpoint, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { if !endpoint.is_relayed() { match self.directly_connected_peers.entry(peer_id) { @@ -154,13 +139,19 @@ impl Behaviour { unreachable!("`on_connection_closed` for unconnected peer.") } }; + if let Some((addr, ReservationStatus::Confirmed)) = + self.reservation_addresses.remove(&connection_id) + { + self.queued_actions + .push_back(ToSwarm::ExternalAddrExpired(addr)); + } } } } impl NetworkBehaviour for Behaviour { type ConnectionHandler = Either; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, @@ -172,7 +163,6 @@ impl NetworkBehaviour for Behaviour { if local_addr.is_relayed() { return Ok(Either::Right(dummy::ConnectionHandler)); } - let mut handler = Handler::new(self.local_peer_id, peer, remote_addr.clone()); if let Some(event) = self.pending_handler_commands.remove(&connection_id) { @@ -202,7 +192,7 @@ impl NetworkBehaviour for Behaviour { Ok(Either::Left(handler)) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id, @@ -229,24 +219,17 @@ impl NetworkBehaviour for Behaviour { self.on_connection_closed(connection_closed) } FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { + self.reservation_addresses.remove(&connection_id); self.pending_handler_commands.remove(&connection_id); } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } fn on_connection_handler_event( &mut self, event_source: PeerId, - _connection: ConnectionId, + connection: ConnectionId, handler_event: THandlerOutEvent, ) { let handler_event = match handler_event { @@ -256,52 +239,42 @@ impl NetworkBehaviour for Behaviour { let event = match handler_event { handler::Event::ReservationReqAccepted { renewal, limit } => { + let (addr, status) = self + .reservation_addresses + .get_mut(&connection) + .expect("Relay connection exist"); + + if !renewal && *status == ReservationStatus::Pending { + *status = ReservationStatus::Confirmed; + self.queued_actions + .push_back(ToSwarm::ExternalAddrConfirmed(addr.clone())); + } + Event::ReservationReqAccepted { relay_peer_id: event_source, renewal, limit, } } - handler::Event::ReservationReqFailed { renewal, error } => { - Event::ReservationReqFailed { - relay_peer_id: event_source, - renewal, - error, - } - } handler::Event::OutboundCircuitEstablished { limit } => { Event::OutboundCircuitEstablished { relay_peer_id: event_source, limit, } } - handler::Event::OutboundCircuitReqFailed { error } => Event::OutboundCircuitReqFailed { - relay_peer_id: event_source, - error, - }, handler::Event::InboundCircuitEstablished { src_peer_id, limit } => { Event::InboundCircuitEstablished { src_peer_id, limit } } - handler::Event::InboundCircuitReqFailed { error } => Event::InboundCircuitReqFailed { - relay_peer_id: event_source, - error, - }, - handler::Event::InboundCircuitReqDenied { src_peer_id } => { - Event::InboundCircuitReqDenied { src_peer_id } - } - handler::Event::InboundCircuitReqDenyFailed { src_peer_id, error } => { - Event::InboundCircuitReqDenyFailed { src_peer_id, error } - } }; - self.queued_actions.push_back(ToSwarm::GenerateEvent(event)) + self.queued_actions.push_back(ToSwarm::GenerateEvent(event)); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _poll_parameters: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(action) = self.queued_actions.pop_front() { return Poll::Ready(action); } @@ -315,20 +288,44 @@ impl NetworkBehaviour for Behaviour { match self .directly_connected_peers .get(&relay_peer_id) - .and_then(|cs| cs.get(0)) + .and_then(|cs| cs.first()) { - Some(connection_id) => ToSwarm::NotifyHandler { - peer_id: relay_peer_id, - handler: NotifyHandler::One(*connection_id), - event: Either::Left(handler::In::Reserve { to_listener }), - }, + Some(connection_id) => { + self.reservation_addresses.insert( + *connection_id, + ( + relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(self.local_peer_id)), + ReservationStatus::Pending, + ), + ); + + ToSwarm::NotifyHandler { + peer_id: relay_peer_id, + handler: NotifyHandler::One(*connection_id), + event: Either::Left(handler::In::Reserve { to_listener }), + } + } None => { let opts = DialOpts::peer_id(relay_peer_id) - .addresses(vec![relay_addr]) + .addresses(vec![relay_addr.clone()]) .extend_addresses_through_behaviour() .build(); let relayed_connection_id = opts.connection_id(); + self.reservation_addresses.insert( + relayed_connection_id, + ( + relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(self.local_peer_id)), + ReservationStatus::Pending, + ), + ); + self.pending_handler_commands .insert(relayed_connection_id, handler::In::Reserve { to_listener }); ToSwarm::Dial { opts } @@ -345,13 +342,13 @@ impl NetworkBehaviour for Behaviour { match self .directly_connected_peers .get(&relay_peer_id) - .and_then(|cs| cs.get(0)) + .and_then(|cs| cs.first()) { Some(connection_id) => ToSwarm::NotifyHandler { peer_id: relay_peer_id, handler: NotifyHandler::One(*connection_id), event: Either::Left(handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }), }, @@ -365,7 +362,7 @@ impl NetworkBehaviour for Behaviour { self.pending_handler_commands.insert( connection_id, handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }, ); @@ -390,32 +387,23 @@ impl NetworkBehaviour for Behaviour { /// /// Internally, this uses a stream to the relay. pub struct Connection { - state: ConnectionState, + pub(crate) state: ConnectionState, } -enum ConnectionState { +pub(crate) enum ConnectionState { InboundAccepting { accept: BoxFuture<'static, Result>, }, Operational { read_buffer: Bytes, - substream: NegotiatedSubstream, - /// "Drop notifier" pattern to signal to the transport that the connection has been dropped. - /// - /// This is flagged as "dead-code" by the compiler because we never read from it here. - /// However, it is actual use is to trigger the `Canceled` error in the `Transport` when this `Sender` is dropped. - #[allow(dead_code)] - drop_notifier: oneshot::Sender, + substream: Stream, }, } impl Unpin for ConnectionState {} impl ConnectionState { - pub(crate) fn new_inbound( - circuit: inbound_stop::Circuit, - drop_notifier: oneshot::Sender, - ) -> Self { + pub(crate) fn new_inbound(circuit: inbound_stop::Circuit) -> Self { ConnectionState::InboundAccepting { accept: async { let (substream, read_buffer) = circuit @@ -425,22 +413,16 @@ impl ConnectionState { Ok(ConnectionState::Operational { read_buffer, substream, - drop_notifier, }) } .boxed(), } } - pub(crate) fn new_outbound( - substream: NegotiatedSubstream, - read_buffer: Bytes, - drop_notifier: oneshot::Sender, - ) -> Self { + pub(crate) fn new_outbound(substream: Stream, read_buffer: Bytes) -> Self { ConnectionState::Operational { substream, read_buffer, - drop_notifier, } } } diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 290b72e94af..662d63cc742 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,37 +18,38 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::client::Connection; use crate::priv_client::transport; -use crate::proto; +use crate::priv_client::transport::ToListenerMsg; use crate::protocol::{self, inbound_stop, outbound_hop}; -use either::Either; +use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use futures::channel::mpsc::Sender; use futures::channel::{mpsc, oneshot}; -use futures::future::{BoxFuture, FutureExt}; -use futures::sink::SinkExt; -use futures::stream::{FuturesUnordered, StreamExt}; +use futures::future::FutureExt; use futures_timer::Delay; -use instant::Instant; use libp2p_core::multiaddr::Protocol; -use libp2p_core::{upgrade, Multiaddr}; +use libp2p_core::upgrade::ReadyUpgrade; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - ListenUpgradeError, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use log::debug; -use std::collections::{HashMap, VecDeque}; -use std::fmt; +use std::collections::VecDeque; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; +use void::Void; /// The maximum number of circuits being denied concurrently. /// /// Circuits to be denied exceeding the limit are dropped. const MAX_NUMBER_DENYING_CIRCUIT: usize = 8; +const DENYING_CIRCUIT_TIMEOUT: Duration = Duration::from_secs(60); + +const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; +const STREAM_TIMEOUT: Duration = Duration::from_secs(60); pub enum In { Reserve { @@ -56,7 +57,7 @@ pub enum In { }, EstablishCircuit { dst_peer_id: PeerId, - send_back: oneshot::Sender>, + to_dial: oneshot::Sender>, }, } @@ -66,7 +67,7 @@ impl fmt::Debug for In { In::Reserve { to_listener: _ } => f.debug_struct("In::Reserve").finish(), In::EstablishCircuit { dst_peer_id, - send_back: _, + to_dial: _, } => f .debug_struct("In::EstablishCircuit") .field("dst_peer_id", dst_peer_id) @@ -82,77 +83,48 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: ConnectionHandlerUpgrErr, - }, /// An outbound circuit has been established. OutboundCircuitEstablished { limit: Option }, - OutboundCircuitReqFailed { - error: ConnectionHandlerUpgrErr, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - /// An inbound circuit request has failed. - InboundCircuitReqFailed { - error: ConnectionHandlerUpgrErr, - }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, } pub struct Handler { local_peer_id: PeerId, remote_peer_id: PeerId, remote_addr: Multiaddr, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - ConnectionHandlerUpgrErr< - Either, - >, - >, - /// Until when to keep the connection alive. - keep_alive: KeepAlive, /// Queue of events to return when polled. queued_events: VecDeque< ConnectionHandlerEvent< - ::OutboundProtocol, - ::OutboundOpenInfo, - ::OutEvent, - ::Error, + ::OutboundProtocol, + ::OutboundOpenInfo, + ::ToBehaviour, >, >, - reservation: Reservation, + pending_streams: VecDeque>>>, + + inflight_reserve_requests: futures_bounded::FuturesTupleSet< + Result, + mpsc::Sender, + >, + + inflight_outbound_connect_requests: futures_bounded::FuturesTupleSet< + Result, + oneshot::Sender>, + >, + + inflight_inbound_circuit_requests: + futures_bounded::FuturesSet>, - /// Tracks substreams lent out to the transport. - /// - /// Contains a [`futures::future::Future`] for each lend out substream that - /// resolves once the substream is dropped. - /// - /// Once all substreams are dropped and this handler has no other work, - /// [`KeepAlive::Until`] can be set, allowing the connection to be closed - /// eventually. - alive_lend_out_substreams: FuturesUnordered>, - - circuit_deny_futs: - HashMap>>, - - /// Futures that try to send errors to the transport. - /// - /// We may drop errors if this handler ends up in a terminal state (by returning - /// [`ConnectionHandlerEvent::Close`]). - send_error_futs: FuturesUnordered>, + inflight_outbound_circuit_deny_requests: + futures_bounded::FuturesSet>, + + reservation: Reservation, } impl Handler { @@ -162,429 +134,288 @@ impl Handler { remote_peer_id, remote_addr, queued_events: Default::default(), - pending_error: Default::default(), + pending_streams: Default::default(), + inflight_reserve_requests: futures_bounded::FuturesTupleSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + inflight_inbound_circuit_requests: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + inflight_outbound_connect_requests: futures_bounded::FuturesTupleSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + inflight_outbound_circuit_deny_requests: futures_bounded::FuturesSet::new( + DENYING_CIRCUIT_TIMEOUT, + MAX_NUMBER_DENYING_CIRCUIT, + ), reservation: Reservation::None, - alive_lend_out_substreams: Default::default(), - circuit_deny_futs: Default::default(), - send_error_futs: Default::default(), - keep_alive: KeepAlive::Yes, } } - fn on_fully_negotiated_inbound( - &mut self, - FullyNegotiatedInbound { - protocol: inbound_circuit, - .. - }: FullyNegotiatedInbound< - ::InboundProtocol, - ::InboundOpenInfo, - >, - ) { - match &mut self.reservation { - Reservation::Accepted { pending_msgs, .. } - | Reservation::Renewing { pending_msgs, .. } => { - let src_peer_id = inbound_circuit.src_peer_id(); - let limit = inbound_circuit.limit(); - - let (tx, rx) = oneshot::channel(); - self.alive_lend_out_substreams.push(rx); - let connection = super::ConnectionState::new_inbound(inbound_circuit, tx); - - pending_msgs.push_back(transport::ToListenerMsg::IncomingRelayedConnection { - // stream: connection, - stream: super::Connection { state: connection }, - src_peer_id, - relay_peer_id: self.remote_peer_id, - relay_addr: self.remote_addr.clone(), - }); - - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundCircuitEstablished { src_peer_id, limit }, - )); - } - Reservation::None => { - let src_peer_id = inbound_circuit.src_peer_id(); + fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { + let src_peer_id = circuit.src_peer_id(); - if self.circuit_deny_futs.len() == MAX_NUMBER_DENYING_CIRCUIT - && !self.circuit_deny_futs.contains_key(&src_peer_id) - { - log::warn!( - "Dropping inbound circuit request to be denied from {:?} due to exceeding limit.", - src_peer_id, - ); - } else if self - .circuit_deny_futs - .insert( - src_peer_id, - inbound_circuit.deny(proto::Status::NO_RESERVATION).boxed(), - ) - .is_some() - { - log::warn!( - "Dropping existing inbound circuit request to be denied from {:?} in favor of new one.", - src_peer_id - ) - } - } + if self + .inflight_outbound_circuit_deny_requests + .try_push(circuit.deny(proto::Status::NO_RESERVATION)) + .is_err() + { + tracing::warn!( + peer=%src_peer_id, + "Dropping existing inbound circuit request to be denied from peer in favor of new one" + ) } } - fn on_fully_negotiated_outbound( - &mut self, - FullyNegotiatedOutbound { - protocol: output, - info, - }: FullyNegotiatedOutbound< - ::OutboundProtocol, - ::OutboundOpenInfo, - >, - ) { - match (output, info) { - // Outbound reservation - ( - outbound_hop::Output::Reservation { - renewal_timeout, - addrs, - limit, - }, - OutboundOpenInfo::Reserve { to_listener }, - ) => { - let event = self.reservation.accepted( - renewal_timeout, - addrs, - to_listener, - self.local_peer_id, - limit, - ); + fn make_new_reservation(&mut self, to_listener: Sender) { + let (sender, receiver) = oneshot::channel(); - self.queued_events - .push_back(ConnectionHandlerEvent::Custom(event)); - } + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_reserve_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_reserve_error)?; - // Outbound circuit - ( - outbound_hop::Output::Circuit { - substream, - read_buffer, - limit, - }, - OutboundOpenInfo::Connect { send_back }, - ) => { - let (tx, rx) = oneshot::channel(); - match send_back.send(Ok(super::Connection { - state: super::ConnectionState::new_outbound(substream, read_buffer, tx), - })) { - Ok(()) => { - self.alive_lend_out_substreams.push(rx); - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundCircuitEstablished { limit }, - )); - } - Err(_) => debug!( - "Oneshot to `client::transport::Dial` future dropped. \ - Dropping established relayed connection to {:?}.", - self.remote_peer_id, - ), - } - } + let reservation = outbound_hop::make_reservation(stream).await?; - _ => unreachable!(), + Ok(reservation) + }, + to_listener, + ); + + if result.is_err() { + tracing::warn!("Dropping in-flight reservation request because we are at capacity"); } } - fn on_listen_upgrade_error( + fn establish_new_circuit( &mut self, - ListenUpgradeError { error, .. }: ListenUpgradeError< - ::InboundOpenInfo, - ::InboundProtocol, - >, + to_dial: oneshot::Sender>, + dst_peer_id: PeerId, ) { - let non_fatal_error = match error { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )) => ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )), - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select(upgrade::NegotiationError::ProtocolError(e)), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply( - inbound_stop::UpgradeError::Fatal(error), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Apply(Either::Left(error)), - )); - return; - } - }; - - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::InboundCircuitReqFailed { - error: non_fatal_error, + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_outbound_connect_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_connect_error)?; + + outbound_hop::open_circuit(stream, dst_peer_id).await }, - )); - } - - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { - info: open_info, - error, - }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - match open_info { - OutboundOpenInfo::Reserve { mut to_listener } => { - let non_fatal_error = match error { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )) => ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )), - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - ), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply(error)) => { - match error { - outbound_hop::UpgradeError::Fatal(error) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Apply(Either::Right(error)), - )); - return; - } - outbound_hop::UpgradeError::ReservationFailed(error) => { - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply( - error, - )) - } - outbound_hop::UpgradeError::CircuitFailed(_) => { - unreachable!( - "Do not emitt `CircuitFailed` for outgoing reservation." - ) - } - } - } - }; - - if self.pending_error.is_none() { - self.send_error_futs.push( - async move { - let _ = to_listener - .send(transport::ToListenerMsg::Reservation(Err(()))) - .await; - } - .boxed(), - ); - } else { - // Fatal error occured, thus handler is closing as quickly as possible. - // Transport is notified through dropping `to_listener`. - } - - let renewal = self.reservation.failed(); - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::ReservationReqFailed { - renewal, - error: non_fatal_error, - }, - )); - } - OutboundOpenInfo::Connect { send_back } => { - let non_fatal_error = match error { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )) => ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::Failed, - )), - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - )) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Select( - upgrade::NegotiationError::ProtocolError(e), - ), - )); - return; - } - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply(error)) => { - match error { - outbound_hop::UpgradeError::Fatal(error) => { - self.pending_error = Some(ConnectionHandlerUpgrErr::Upgrade( - upgrade::UpgradeError::Apply(Either::Right(error)), - )); - return; - } - outbound_hop::UpgradeError::CircuitFailed(error) => { - ConnectionHandlerUpgrErr::Upgrade(upgrade::UpgradeError::Apply( - error, - )) - } - outbound_hop::UpgradeError::ReservationFailed(_) => { - unreachable!( - "Do not emitt `ReservationFailed` for outgoing circuit." - ) - } - } - } - }; - - let _ = send_back.send(Err(())); + to_dial, + ); - self.queued_events.push_back(ConnectionHandlerEvent::Custom( - Event::OutboundCircuitReqFailed { - error: non_fatal_error, - }, - )); - } + if result.is_err() { + tracing::warn!("Dropping in-flight connect request because we are at capacity") } } } impl ConnectionHandler for Handler { - type InEvent = In; - type OutEvent = Event; - type Error = ConnectionHandlerUpgrErr< - Either, - >; - type InboundProtocol = inbound_stop::Upgrade; - type OutboundProtocol = outbound_hop::Upgrade; - type OutboundOpenInfo = OutboundOpenInfo; + type FromBehaviour = In; + type ToBehaviour = Event; + type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); + type OutboundProtocol = ReadyUpgrade; + type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(inbound_stop::Upgrade {}, ()) + SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()) } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::Reserve { to_listener } => { - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_hop::Upgrade::Reserve, - OutboundOpenInfo::Reserve { to_listener }, - ), - }); + self.make_new_reservation(to_listener); } In::EstablishCircuit { - send_back, + to_dial, dst_peer_id, } => { - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - outbound_hop::Upgrade::Connect { dst_peer_id }, - OutboundOpenInfo::Connect { send_back }, - ), - }); + self.establish_new_circuit(to_dial, dst_peer_id); } } } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn connection_keep_alive(&self) -> bool { + self.reservation.is_some() } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } + loop { + // Reservations + match self.inflight_reserve_requests.poll_unpin(cx) { + Poll::Ready(( + Ok(Ok(outbound_hop::Reservation { + renewal_timeout, + addrs, + limit, + })), + to_listener, + )) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + self.reservation.accepted( + renewal_timeout, + addrs, + to_listener, + self.local_peer_id, + limit, + ), + )); + } + Poll::Ready((Ok(Err(error)), mut to_listener)) => { + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) + { + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; + } + Poll::Ready((Err(futures_bounded::Timeout { .. }), mut to_listener)) => { + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err( + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), + ))) + { + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; + } + Poll::Pending => {} + } - // Return queued events. - if let Some(event) = self.queued_events.pop_front() { - return Poll::Ready(event); - } + // Circuits + match self.inflight_outbound_connect_requests.poll_unpin(cx) { + Poll::Ready(( + Ok(Ok(outbound_hop::Circuit { + limit, + read_buffer, + stream, + })), + to_dialer, + )) => { + if to_dialer + .send(Ok(priv_client::Connection { + state: priv_client::ConnectionState::new_outbound(stream, read_buffer), + })) + .is_err() + { + tracing::debug!( + "Dropping newly established circuit because the listener is gone" + ); + continue; + } - if let Poll::Ready(Some(protocol)) = self.reservation.poll(cx) { - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }); - } + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundCircuitEstablished { limit }, + )); + } + Poll::Ready((Ok(Err(error)), to_dialer)) => { + let _ = to_dialer.send(Err(error)); + continue; + } + Poll::Ready((Err(futures_bounded::Timeout { .. }), to_dialer)) => { + if to_dialer + .send(Err(outbound_hop::ConnectError::Io( + io::ErrorKind::TimedOut.into(), + ))) + .is_err() + { + tracing::debug!("Unable to send error to dialer") + } + self.reservation.failed(); + continue; + } + Poll::Pending => {} + } - // Deny incoming circuit requests. - let maybe_event = - self.circuit_deny_futs - .iter_mut() - .find_map(|(src_peer_id, fut)| match fut.poll_unpin(cx) { - Poll::Ready(Ok(())) => Some(( - *src_peer_id, - Event::InboundCircuitReqDenied { - src_peer_id: *src_peer_id, - }, - )), - Poll::Ready(Err(error)) => Some(( - *src_peer_id, - Event::InboundCircuitReqDenyFailed { - src_peer_id: *src_peer_id, - error, - }, - )), - Poll::Pending => None, - }); - if let Some((src_peer_id, event)) = maybe_event { - self.circuit_deny_futs.remove(&src_peer_id); - return Poll::Ready(ConnectionHandlerEvent::Custom(event)); - } + // Return queued events. + if let Some(event) = self.queued_events.pop_front() { + return Poll::Ready(event); + } - // Send errors to transport. - while let Poll::Ready(Some(())) = self.send_error_futs.poll_next_unpin(cx) {} + match self.inflight_inbound_circuit_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(circuit))) => match &mut self.reservation { + Reservation::Accepted { pending_msgs, .. } + | Reservation::Renewing { pending_msgs, .. } => { + let src_peer_id = circuit.src_peer_id(); + let limit = circuit.limit(); + + let connection = super::ConnectionState::new_inbound(circuit); + + pending_msgs.push_back( + transport::ToListenerMsg::IncomingRelayedConnection { + stream: super::Connection { state: connection }, + src_peer_id, + relay_peer_id: self.remote_peer_id, + relay_addr: self.remote_addr.clone(), + }, + ); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundCircuitEstablished { src_peer_id, limit }, + )); + } + Reservation::None => { + self.insert_to_deny_futs(circuit); + continue; + } + }, + Poll::Ready(Ok(Err(e))) => { + tracing::debug!("An inbound circuit request failed: {e}"); + continue; + } + Poll::Ready(Err(e)) => { + tracing::debug!("An inbound circuit request timed out: {e}"); + continue; + } + Poll::Pending => {} + } - // Check status of lend out substreams. - loop { - match self.alive_lend_out_substreams.poll_next_unpin(cx) { - Poll::Ready(Some(Err(oneshot::Canceled))) => {} - Poll::Ready(Some(Ok(v))) => void::unreachable(v), - Poll::Ready(None) | Poll::Pending => break, + if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { + self.make_new_reservation(to_listener); + continue; } - } - // Update keep-alive handling. - if matches!(self.reservation, Reservation::None) - && self.alive_lend_out_substreams.is_empty() - && self.circuit_deny_futs.is_empty() - { - match self.keep_alive { - KeepAlive::Yes => { - self.keep_alive = KeepAlive::Until(Instant::now() + Duration::from_secs(10)); + // Deny incoming circuit requests. + match self.inflight_outbound_circuit_deny_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(()))) => continue, + Poll::Ready(Ok(Err(error))) => { + tracing::debug!("Denying inbound circuit failed: {error}"); + continue; + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + tracing::debug!("Denying inbound circuit timed out"); + continue; } - KeepAlive::Until(_) => {} - KeepAlive::No => panic!("Handler never sets KeepAlive::No."), + Poll::Pending => {} } - } else { - self.keep_alive = KeepAlive::Yes; - } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -597,19 +428,30 @@ impl ConnectionHandler for Handler { >, ) { match event { - ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { - self.on_fully_negotiated_inbound(fully_negotiated_inbound) - } - ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { - self.on_fully_negotiated_outbound(fully_negotiated_outbound) + ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { + protocol: stream, + .. + }) => { + if self + .inflight_inbound_circuit_requests + .try_push(inbound_stop::handle_open_circuit(stream)) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") + } } - ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - self.on_listen_upgrade_error(listen_upgrade_error) + ConnectionEvent::FullyNegotiatedOutbound(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Ok(ev.protocol)); + } } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::ListenUpgradeError(ev) => void::unreachable(ev.error), + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Err(ev.error)); + } } - ConnectionEvent::AddressChange(_) => {} + _ => {} } } } @@ -651,7 +493,7 @@ impl Reservation { .into_iter() .map(|a| { a.with(Protocol::P2pCircuit) - .with(Protocol::P2p(local_peer_id.into())) + .with(Protocol::P2p(local_peer_id)) }) .collect(), }, @@ -666,18 +508,13 @@ impl Reservation { Event::ReservationReqAccepted { renewal, limit } } - /// Marks the current reservation as failed. - /// - /// Returns whether the reservation request was a renewal. - fn failed(&mut self) -> bool { - let renewal = matches!( - self, - Reservation::Accepted { .. } | Reservation::Renewing { .. } - ); + fn is_some(&self) -> bool { + matches!(self, Self::Accepted { .. } | Self::Renewing { .. }) + } + /// Marks the current reservation as failed. + fn failed(&mut self) { *self = Reservation::None; - - renewal } fn forward_messages_to_transport_listener(&mut self, cx: &mut Context<'_>) { @@ -693,12 +530,12 @@ impl Reservation { if let Err(e) = to_listener .start_send(pending_msgs.pop_front().expect("Called !is_empty().")) { - debug!("Failed to sent pending message to listener: {:?}", e); + tracing::debug!("Failed to sent pending message to listener: {:?}", e); *self = Reservation::None; } } Poll::Ready(Err(e)) => { - debug!("Channel to listener failed: {:?}", e); + tracing::debug!("Channel to listener failed: {:?}", e); *self = Reservation::None; } Poll::Pending => {} @@ -710,7 +547,7 @@ impl Reservation { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll>> { self.forward_messages_to_transport_listener(cx); // Check renewal timeout if any. @@ -722,10 +559,7 @@ impl Reservation { } => match renewal_timeout.poll_unpin(cx) { Poll::Ready(()) => ( Reservation::Renewing { pending_msgs }, - Poll::Ready(Some(SubstreamProtocol::new( - outbound_hop::Upgrade::Reserve, - OutboundOpenInfo::Reserve { to_listener }, - ))), + Poll::Ready(Some(to_listener)), ), Poll::Pending => ( Reservation::Accepted { @@ -744,11 +578,24 @@ impl Reservation { } } -pub enum OutboundOpenInfo { - Reserve { - to_listener: mpsc::Sender, - }, - Connect { - send_back: oneshot::Sender>, - }, +fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ReserveError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), + } +} + +fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ConnectError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), + } } diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index 6dceefd8661..7147f0b5e55 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -21,6 +21,8 @@ use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::Connection; +use crate::protocol::outbound_hop; +use crate::protocol::outbound_hop::{ConnectError, ReserveError}; use crate::RequestId; use futures::channel::mpsc; use futures::channel::oneshot; @@ -48,13 +50,14 @@ use thiserror::Error; /// 1. Establish relayed connections by dialing `/p2p-circuit` addresses. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, PeerId}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; +/// # use libp2p_identity::PeerId; /// let actual_transport = MemoryTransport::default(); /// let (relay_transport, behaviour) = relay::client::new( -/// PeerId::random(), +/// PeerId::random() /// ); /// let mut transport = OrTransport::new(relay_transport, actual_transport); /// # let relay_id = PeerId::random(); @@ -70,22 +73,23 @@ use thiserror::Error; /// 3. Listen for incoming relayed connections via specific relay. /// /// ``` -/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, Transport, PeerId}; +/// # use libp2p_core::{Multiaddr, multiaddr::{Protocol}, transport::ListenerId, Transport}; /// # use libp2p_core::transport::memory::MemoryTransport; /// # use libp2p_core::transport::choice::OrTransport; /// # use libp2p_relay as relay; +/// # use libp2p_identity::PeerId; /// # let relay_id = PeerId::random(); /// # let local_peer_id = PeerId::random(); /// let actual_transport = MemoryTransport::default(); /// let (relay_transport, behaviour) = relay::client::new( -/// local_peer_id, +/// local_peer_id /// ); /// let mut transport = OrTransport::new(relay_transport, actual_transport); /// let relay_addr = Multiaddr::empty() /// .with(Protocol::Memory(40)) // Relay address. /// .with(Protocol::P2p(relay_id.into())) // Relay peer id. /// .with(Protocol::P2pCircuit); // Signal to listen via remote relay node. -/// transport.listen_on(relay_addr).unwrap(); +/// transport.listen_on(ListenerId::next(), relay_addr).unwrap(); /// ``` pub struct Transport { to_behaviour: mpsc::Sender, @@ -95,7 +99,7 @@ pub struct Transport { impl Transport { pub(crate) fn new() -> (Self, mpsc::Receiver) { - let (to_behaviour, from_transport) = mpsc::channel(0); + let (to_behaviour, from_transport) = mpsc::channel(1000); let transport = Transport { to_behaviour, pending_to_behaviour: VecDeque::new(), @@ -111,7 +115,11 @@ impl libp2p_core::Transport for Transport { type ListenerUpgrade = Ready>; type Dial = BoxFuture<'static, Result>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + listener_id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { let (relay_peer_id, relay_addr) = match parse_relayed_multiaddr(addr)? { RelayedMultiaddr { relay_peer_id: None, @@ -138,7 +146,6 @@ impl libp2p_core::Transport for Transport { to_listener, }); - let listener_id = ListenerId::new(); let listener = Listener { listener_id, queued_events: Default::default(), @@ -146,7 +153,7 @@ impl libp2p_core::Transport for Transport { is_closed: false, }; self.listeners.push(listener); - Ok(listener_id) + Ok(()) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -184,7 +191,8 @@ impl libp2p_core::Transport for Transport { send_back: tx, }) .await?; - let stream = rx.await?.map_err(|()| Error::Connect)?; + let stream = rx.await??; + Ok(stream) } .boxed()) @@ -264,9 +272,7 @@ fn parse_relayed_multiaddr(addr: Multiaddr) -> Result { - let peer_id = PeerId::from_multihash(hash).map_err(|_| Error::InvalidHash)?; - + Protocol::P2p(peer_id) => { if before_circuit { if relayed_multiaddr.relay_peer_id.is_some() { return Err(Error::MalformedMultiaddr.into()); @@ -298,7 +304,7 @@ fn parse_relayed_multiaddr(addr: Multiaddr) -> Result::Item>, @@ -339,13 +345,10 @@ impl Stream for Listener { return Poll::Ready(None); } - let msg = match ready!(self.from_behaviour.poll_next_unpin(cx)) { - Some(msg) => msg, - None => { - // Sender of `from_behaviour` has been dropped, signaling listener to close. - self.close(Ok(())); - continue; - } + let Some(msg) = ready!(self.from_behaviour.poll_next_unpin(cx)) else { + // Sender of `from_behaviour` has been dropped, signaling listener to close. + self.close(Ok(())); + continue; }; match msg { @@ -375,10 +378,10 @@ impl Stream for Listener { upgrade: ready(Ok(stream)), listener_id, local_addr: relay_addr.with(Protocol::P2pCircuit), - send_back_addr: Protocol::P2p(src_peer_id.into()).into(), + send_back_addr: Protocol::P2p(src_peer_id).into(), }) } - ToListenerMsg::Reservation(Err(())) => self.close(Err(Error::Reservation)), + ToListenerMsg::Reservation(Err(e)) => self.close(Err(Error::Reservation(e))), }; } } @@ -406,9 +409,9 @@ pub enum Error { #[error("One of the provided multiaddresses is malformed.")] MalformedMultiaddr, #[error("Failed to get Reservation.")] - Reservation, + Reservation(#[from] ReserveError), #[error("Failed to connect to destination.")] - Connect, + Connect(#[from] ConnectError), } impl From for TransportError { @@ -419,15 +422,16 @@ impl From for TransportError { /// Message from the [`Transport`] to the [`Behaviour`](crate::Behaviour) /// [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour). -pub enum TransportToBehaviourMsg { +pub(crate) enum TransportToBehaviourMsg { /// Dial destination node via relay node. + #[allow(dead_code)] DialReq { request_id: RequestId, relay_addr: Multiaddr, relay_peer_id: PeerId, dst_addr: Option, dst_peer_id: PeerId, - send_back: oneshot::Sender>, + send_back: oneshot::Sender>, }, /// Listen for incoming relayed connections via relay node. ListenReq { @@ -439,7 +443,7 @@ pub enum TransportToBehaviourMsg { #[allow(clippy::large_enum_variant)] pub enum ToListenerMsg { - Reservation(Result), + Reservation(Result), IncomingRelayedConnection { stream: Connection, src_peer_id: PeerId, diff --git a/protocols/relay/src/protocol.rs b/protocols/relay/src/protocol.rs index 4376f64cc0b..b94151259cd 100644 --- a/protocols/relay/src/protocol.rs +++ b/protocols/relay/src/protocol.rs @@ -31,7 +31,7 @@ pub const HOP_PROTOCOL_NAME: StreamProtocol = pub const STOP_PROTOCOL_NAME: StreamProtocol = StreamProtocol::new("/libp2p/circuit/relay/0.2.0/stop"); -const MAX_MESSAGE_SIZE: usize = 4096; +pub(crate) const MAX_MESSAGE_SIZE: usize = 4096; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Limit { @@ -39,6 +39,16 @@ pub struct Limit { data_in_bytes: Option, } +impl Limit { + pub fn duration(&self) -> Option { + self.duration + } + + pub fn data_in_bytes(&self) -> Option { + self.data_in_bytes + } +} + impl From for Limit { fn from(limit: proto::Limit) -> Self { Limit { diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 1af258fc25b..41fe2675dce 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -18,100 +18,28 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{HOP_PROTOCOL_NAME, MAX_MESSAGE_SIZE}; +use std::time::{Duration, SystemTime}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use instant::{Duration, SystemTime}; -use libp2p_core::{upgrade, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; -use std::convert::TryInto; -use std::iter; +use either::Either; +use futures::prelude::*; use thiserror::Error; -pub struct Upgrade { - pub reservation_duration: Duration, - pub max_circuit_duration: Duration, - pub max_circuit_bytes: u64, -} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(HOP_PROTOCOL_NAME) - } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = Req; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - let proto::HopMessage { - type_pb, - peer, - reservation: _, - limit: _, - status: _, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - let req = match type_pb { - proto::HopMessageType::RESERVE => Req::Reserve(ReservationReq { - substream, - reservation_duration: self.reservation_duration, - max_circuit_duration: self.max_circuit_duration, - max_circuit_bytes: self.max_circuit_bytes, - }), - proto::HopMessageType::CONNECT => { - let dst = PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; - Req::Connect(CircuitReq { dst, substream }) - } - proto::HopMessageType::STATUS => { - return Err(FatalUpgradeError::UnexpectedTypeStatus.into()) - } - }; - - Ok(req) - } - .boxed() - } -} - -#[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::Stream; -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} +use crate::proto; +use crate::proto::message_v2::pb::mod_HopMessage::Type; +use crate::protocol::MAX_MESSAGE_SIZE; #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum Error { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), #[error("Stream closed")] StreamClosed, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -120,20 +48,22 @@ pub enum FatalUpgradeError { UnexpectedTypeStatus, } -pub enum Req { - Reserve(ReservationReq), - Connect(CircuitReq), -} - pub struct ReservationReq { - substream: Framed>, + substream: Framed>, reservation_duration: Duration, max_circuit_duration: Duration, max_circuit_bytes: u64, } impl ReservationReq { - pub async fn accept(self, addrs: Vec) -> Result<(), UpgradeError> { + pub async fn accept(self, addrs: Vec) -> Result<(), Error> { + if addrs.is_empty() { + tracing::debug!( + "Accepting relay reservation without providing external addresses of local node. \ + Thus the remote node might not be able to advertise its relayed address." + ) + } + let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -160,7 +90,7 @@ impl ReservationReq { self.send(msg).await } - pub async fn deny(self, status: proto::Status) -> Result<(), UpgradeError> { + pub async fn deny(self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -172,7 +102,7 @@ impl ReservationReq { self.send(msg).await } - async fn send(mut self, msg: proto::HopMessage) -> Result<(), UpgradeError> { + async fn send(mut self, msg: proto::HopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; self.substream.close().await?; @@ -183,7 +113,7 @@ impl ReservationReq { pub struct CircuitReq { dst: PeerId, - substream: Framed>, + substream: Framed>, } impl CircuitReq { @@ -191,7 +121,7 @@ impl CircuitReq { self.dst } - pub async fn accept(mut self) -> Result<(NegotiatedSubstream, Bytes), UpgradeError> { + pub async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -216,7 +146,7 @@ impl CircuitReq { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -235,3 +165,48 @@ impl CircuitReq { Ok(()) } } + +pub(crate) async fn handle_inbound_request( + io: Stream, + reservation_duration: Duration, + max_circuit_duration: Duration, + max_circuit_bytes: u64, +) -> Result, Error> { + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + let res = substream.next().await; + + if let None | Some(Err(_)) = res { + return Err(Error::StreamClosed); + } + + let proto::HopMessage { + type_pb, + peer, + reservation: _, + limit: _, + status: _, + } = res.unwrap().expect("should be ok"); + + let req = match type_pb { + Type::RESERVE => Either::Left(ReservationReq { + substream, + reservation_duration, + max_circuit_duration, + max_circuit_bytes, + }), + Type::CONNECT => { + let peer_id_res = match peer { + Some(r) => PeerId::from_bytes(&r.id), + None => return Err(Error::MissingPeer), + }; + + let dst = peer_id_res.map_err(|_| Error::ParsePeerId)?; + + Either::Right(CircuitReq { dst, substream }) + } + Type::STATUS => return Err(Error::UnexpectedTypeStatus), + }; + + Ok(req) +} diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index bfffb6a1e9c..b698a5ff769 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -19,89 +19,62 @@ // DEALINGS IN THE SOFTWARE. use crate::proto; -use crate::protocol::{self, MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; +use crate::protocol::{self, MAX_MESSAGE_SIZE}; use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::upgrade; +use futures::prelude::*; use libp2p_identity::PeerId; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; -use std::iter; +use libp2p_swarm::Stream; +use std::io; use thiserror::Error; -pub struct Upgrade {} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(STOP_PROTOCOL_NAME) - } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = Circuit; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - let proto::StopMessage { - type_pb, - peer, - limit, - status: _, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::StopMessageType::CONNECT => { - let src_peer_id = - PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; - Ok(Circuit { - substream, - src_peer_id, - limit: limit.map(Into::into), - }) - } - proto::StopMessageType::STATUS => { - Err(FatalUpgradeError::UnexpectedTypeStatus.into()) - } - } +pub(crate) async fn handle_open_circuit(io: Stream) -> Result { + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + let proto::StopMessage { + type_pb, + peer, + limit, + status: _, + } = substream + .next() + .await + .ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))??; + + match type_pb { + proto::StopMessageType::CONNECT => { + let src_peer_id = PeerId::from_bytes(&peer.ok_or(ProtocolViolation::MissingPeer)?.id) + .map_err(|_| ProtocolViolation::ParsePeerId)?; + Ok(Circuit { + substream, + src_peer_id, + limit: limit.map(Into::into), + }) + } + proto::StopMessageType::STATUS => { + Err(Error::Protocol(ProtocolViolation::UnexpectedTypeStatus)) } - .boxed() } } #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), +pub(crate) enum Error { + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), + #[error("IO error")] + Io(#[from] io::Error), } -impl From for UpgradeError { +impl From for Error { fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) + Self::Protocol(ProtocolViolation::Codec(error)) } } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub(crate) enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -110,22 +83,22 @@ pub enum FatalUpgradeError { UnexpectedTypeStatus, } -pub struct Circuit { - substream: Framed>, +pub(crate) struct Circuit { + substream: Framed>, src_peer_id: PeerId, limit: Option, } impl Circuit { - pub fn src_peer_id(&self) -> PeerId { + pub(crate) fn src_peer_id(&self) -> PeerId { self.src_peer_id } - pub fn limit(&self) -> Option { + pub(crate) fn limit(&self) -> Option { self.limit } - pub async fn accept(mut self) -> Result<(NegotiatedSubstream, Bytes), UpgradeError> { + pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -149,7 +122,7 @@ impl Circuit { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -157,10 +130,12 @@ impl Circuit { status: Some(status), }; - self.send(msg).await.map_err(Into::into) + self.send(msg).await?; + + Ok(()) } - async fn send(&mut self, msg: proto::StopMessage) -> Result<(), quick_protobuf_codec::Error> { + async fn send(&mut self, msg: proto::StopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index 07d09157404..3ae824be167 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,204 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{Limit, HOP_PROTOCOL_NAME, MAX_MESSAGE_SIZE}; +use std::io; +use std::time::{Duration, SystemTime}; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; +use futures::prelude::*; use futures_timer::Delay; -use instant::{Duration, SystemTime}; -use libp2p_core::{upgrade, Multiaddr}; -use libp2p_identity::PeerId; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; -use std::convert::TryFrom; -use std::iter; use thiserror::Error; -pub enum Upgrade { - Reserve, - Connect { dst_peer_id: PeerId }, -} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(HOP_PROTOCOL_NAME) - } -} - -impl upgrade::OutboundUpgrade for Upgrade { - type Output = Output; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let msg = match self { - Upgrade::Reserve => proto::HopMessage { - type_pb: proto::HopMessageType::RESERVE, - peer: None, - reservation: None, - limit: None, - status: None, - }, - Upgrade::Connect { dst_peer_id } => proto::HopMessage { - type_pb: proto::HopMessageType::CONNECT, - peer: Some(proto::Peer { - id: dst_peer_id.to_bytes(), - addrs: vec![], - }), - reservation: None, - limit: None, - status: None, - }, - }; - - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - substream.send(msg).await?; - let proto::HopMessage { - type_pb, - peer: _, - reservation, - limit, - status, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect.into()) - } - proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve.into()) - } - proto::HopMessageType::STATUS => {} - } - - let limit = limit.map(Into::into); - - let output = match self { - Upgrade::Reserve => { - match status - .ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? - { - proto::Status::OK => {} - proto::Status::RESERVATION_REFUSED => { - return Err(ReservationFailedReason::Refused.into()) - } - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(ReservationFailedReason::ResourceLimitExceeded.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let reservation = - reservation.ok_or(FatalUpgradeError::MissingReservationField)?; - - if reservation.addrs.is_empty() { - return Err(FatalUpgradeError::NoAddressesInReservation.into()); - } - - let addrs = reservation - .addrs - .into_iter() - .map(|b| Multiaddr::try_from(b.to_vec())) - .collect::, _>>() - .map_err(|_| FatalUpgradeError::InvalidReservationAddrs)?; - - let renewal_timeout = reservation - .expire - .checked_sub( - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(), - ) - // Renew the reservation after 3/4 of the reservation expiration timestamp. - .and_then(|duration| duration.checked_sub(duration / 4)) - .map(Duration::from_secs) - .map(Delay::new) - .ok_or(FatalUpgradeError::InvalidReservationExpiration)?; - - substream.close().await?; - - Output::Reservation { - renewal_timeout, - addrs, - limit, - } - } - Upgrade::Connect { .. } => { - match status - .ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? - { - proto::Status::OK => {} - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(CircuitFailedReason::ResourceLimitExceeded.into()) - } - proto::Status::CONNECTION_FAILED => { - return Err(CircuitFailedReason::ConnectionFailed.into()) - } - proto::Status::NO_RESERVATION => { - return Err(CircuitFailedReason::NoReservation.into()) - } - proto::Status::PERMISSION_DENIED => { - return Err(CircuitFailedReason::PermissionDenied.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let FramedParts { - io, - read_buffer, - write_buffer, - .. - } = substream.into_parts(); - assert!( - write_buffer.is_empty(), - "Expect a flushed Framed to have empty write buffer." - ); - - Output::Circuit { - substream: io, - read_buffer: read_buffer.freeze(), - limit, - } - } - }; - - Ok(output) - } - .boxed() - } -} - -#[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Reservation failed")] - ReservationFailed(#[from] ReservationFailedReason), - #[error("Circuit failed")] - CircuitFailed(#[from] CircuitFailedReason), - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} +use libp2p_core::Multiaddr; +use libp2p_identity::PeerId; +use libp2p_swarm::Stream; -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} +use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; +use crate::{proto, HOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum ConnectError { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Relay failed to connect to destination.")] @@ -224,22 +44,32 @@ pub enum CircuitFailedReason { NoReservation, #[error("Remote denied permission.")] PermissionDenied, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum ReservationFailedReason { +pub enum ReserveError { #[error("Reservation refused.")] Refused, #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] @@ -250,27 +80,222 @@ pub enum FatalUpgradeError { InvalidReservationExpiration, #[error("Invalid addresses in reservation.")] InvalidReservationAddrs, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, #[error("Unexpected message type 'reserve'")] UnexpectedTypeReserve, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } -pub enum Output { - Reservation { - renewal_timeout: Delay, - addrs: Vec, - limit: Option, - }, - Circuit { - substream: NegotiatedSubstream, - read_buffer: Bytes, - limit: Option, - }, +impl From for ConnectError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ConnectError::Protocol(ProtocolViolation::Codec(e)) + } +} + +impl From for ReserveError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ReserveError::Protocol(ProtocolViolation::Codec(e)) + } +} + +pub(crate) struct Reservation { + pub(crate) renewal_timeout: Delay, + pub(crate) addrs: Vec, + pub(crate) limit: Option, +} + +pub(crate) struct Circuit { + pub(crate) stream: Stream, + pub(crate) read_buffer: Bytes, + pub(crate) limit: Option, +} + +pub(crate) async fn make_reservation(stream: Stream) -> Result { + let msg = proto::HopMessage { + type_pb: proto::HopMessageType::RESERVE, + peer: None, + reservation: None, + limit: None, + status: None, + }; + let mut substream = Framed::new(stream, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + substream.send(msg).await?; + + substream.close().await?; + + let proto::HopMessage { + type_pb, + peer: _, + reservation, + limit, + status, + } = substream + .next() + .await + .ok_or(ReserveError::Io(io::ErrorKind::UnexpectedEof.into()))??; + + match type_pb { + proto::HopMessageType::CONNECT => { + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); + } + proto::HopMessageType::RESERVE => { + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); + } + proto::HopMessageType::STATUS => {} + } + + let limit = limit.map(Into::into); + + match status.ok_or(ProtocolViolation::MissingStatusField)? { + proto::Status::OK => {} + proto::Status::RESERVATION_REFUSED => { + return Err(ReserveError::Refused); + } + proto::Status::RESOURCE_LIMIT_EXCEEDED => { + return Err(ReserveError::ResourceLimitExceeded); + } + s => { + return Err(ReserveError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))) + } + } + + let reservation = reservation.ok_or(ReserveError::Protocol( + ProtocolViolation::MissingReservationField, + ))?; + + if reservation.addrs.is_empty() { + return Err(ReserveError::Protocol( + ProtocolViolation::NoAddressesInReservation, + )); + } + + let addrs = reservation + .addrs + .into_iter() + .map(|b| Multiaddr::try_from(b.to_vec())) + .collect::, _>>() + .map_err(|_| ReserveError::Protocol(ProtocolViolation::InvalidReservationAddrs))?; + + let renewal_timeout = reservation + .expire + .checked_sub( + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + ) + // Renew the reservation after 3/4 of the reservation expiration timestamp. + .and_then(|duration| duration.checked_sub(duration / 4)) + .map(Duration::from_secs) + .map(Delay::new) + .ok_or(ReserveError::Protocol( + ProtocolViolation::InvalidReservationExpiration, + ))?; + + Ok(Reservation { + renewal_timeout, + addrs, + limit, + }) +} + +pub(crate) async fn open_circuit( + protocol: Stream, + dst_peer_id: PeerId, +) -> Result { + let msg = proto::HopMessage { + type_pb: proto::HopMessageType::CONNECT, + peer: Some(proto::Peer { + id: dst_peer_id.to_bytes(), + addrs: vec![], + }), + reservation: None, + limit: None, + status: None, + }; + + let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + substream.send(msg).await?; + + let proto::HopMessage { + type_pb, + peer: _, + reservation: _, + limit, + status, + } = substream + .next() + .await + .ok_or(ConnectError::Io(io::ErrorKind::UnexpectedEof.into()))??; + + match type_pb { + proto::HopMessageType::CONNECT => { + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); + } + proto::HopMessageType::RESERVE => { + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); + } + proto::HopMessageType::STATUS => {} + } + + match status { + Some(proto::Status::OK) => {} + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { + return Err(ConnectError::ResourceLimitExceeded); + } + Some(proto::Status::CONNECTION_FAILED) => { + return Err(ConnectError::ConnectionFailed); + } + Some(proto::Status::NO_RESERVATION) => { + return Err(ConnectError::NoReservation); + } + Some(proto::Status::PERMISSION_DENIED) => { + return Err(ConnectError::PermissionDenied); + } + Some(s) => { + return Err(ConnectError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))); + } + None => { + return Err(ConnectError::Protocol( + ProtocolViolation::MissingStatusField, + )); + } + } + + let limit = limit.map(Into::into); + + let FramedParts { + io, + read_buffer, + write_buffer, + .. + } = substream.into_parts(); + assert!( + write_buffer.is_empty(), + "Expect a flushed Framed to have empty write buffer." + ); + + let circuit = Circuit { + stream: io, + read_buffer: read_buffer.freeze(), + limit, + }; + + Ok(circuit) } diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 782808acc57..525ebc10821 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,146 +18,140 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::proto; -use crate::protocol::{MAX_MESSAGE_SIZE, STOP_PROTOCOL_NAME}; +use std::io; +use std::time::Duration; + use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::upgrade; -use libp2p_identity::PeerId; -use libp2p_swarm::{NegotiatedSubstream, StreamProtocol}; -use std::convert::TryInto; -use std::iter; -use std::time::Duration; +use futures::prelude::*; use thiserror::Error; -pub struct Upgrade { - pub src_peer_id: PeerId, - pub max_circuit_duration: Duration, - pub max_circuit_bytes: u64, -} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once(STOP_PROTOCOL_NAME) - } -} +use libp2p_identity::PeerId; +use libp2p_swarm::Stream; -impl upgrade::OutboundUpgrade for Upgrade { - type Output = (NegotiatedSubstream, Bytes); - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, substream: NegotiatedSubstream, _: Self::Info) -> Self::Future { - let msg = proto::StopMessage { - type_pb: proto::StopMessageType::CONNECT, - peer: Some(proto::Peer { - id: self.src_peer_id.to_bytes(), - addrs: vec![], - }), - limit: Some(proto::Limit { - duration: Some( - self.max_circuit_duration - .as_secs() - .try_into() - .expect("`max_circuit_duration` not to exceed `u32::MAX`."), - ), - data: Some(self.max_circuit_bytes), - }), - status: None, - }; - - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE), - ); - - async move { - substream.send(msg).await?; - let proto::StopMessage { - type_pb, - peer: _, - limit: _, - status, - } = substream - .next() - .await - .ok_or(FatalUpgradeError::StreamClosed)??; - - match type_pb { - proto::StopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect.into()) - } - proto::StopMessageType::STATUS => {} - } - - match status.ok_or(UpgradeError::Fatal(FatalUpgradeError::MissingStatusField))? { - proto::Status::OK => {} - proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Err(CircuitFailedReason::ResourceLimitExceeded.into()) - } - proto::Status::PERMISSION_DENIED => { - return Err(CircuitFailedReason::PermissionDenied.into()) - } - s => return Err(FatalUpgradeError::UnexpectedStatus(s).into()), - } - - let FramedParts { - io, - read_buffer, - write_buffer, - .. - } = substream.into_parts(); - assert!( - write_buffer.is_empty(), - "Expect a flushed Framed to have an empty write buffer." - ); - - Ok((io, read_buffer.freeze())) - } - .boxed() - } -} +use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Circuit failed")] - CircuitFailed(#[from] CircuitFailedReason), - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} - -#[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum Error { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Remote reported permission denied.")] PermissionDenied, + #[error("Remote does not support the `{STOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[source] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl Error { + pub(crate) fn to_status(&self) -> proto::Status { + match self { + Error::ResourceLimitExceeded => proto::Status::RESOURCE_LIMIT_EXCEEDED, + Error::PermissionDenied => proto::Status::PERMISSION_DENIED, + Error::Unsupported => proto::Status::CONNECTION_FAILED, + Error::Io(_) => proto::Status::CONNECTION_FAILED, + Error::Protocol( + ProtocolViolation::UnexpectedStatus(_) | ProtocolViolation::UnexpectedTypeConnect, + ) => proto::Status::UNEXPECTED_MESSAGE, + Error::Protocol(_) => proto::Status::MALFORMED_MESSAGE, + } + } } +/// Depicts all forms of protocol violations. #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } + +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } +} + +/// Attempts to _connect_ to a peer via the given stream. +pub(crate) async fn connect( + io: Stream, + src_peer_id: PeerId, + max_duration: Duration, + max_bytes: u64, +) -> Result { + let msg = proto::StopMessage { + type_pb: proto::StopMessageType::CONNECT, + peer: Some(proto::Peer { + id: src_peer_id.to_bytes(), + addrs: vec![], + }), + limit: Some(proto::Limit { + duration: Some( + max_duration + .as_secs() + .try_into() + .expect("`max_circuit_duration` not to exceed `u32::MAX`."), + ), + data: Some(max_bytes), + }), + status: None, + }; + + let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + + substream.send(msg).await?; + + let proto::StopMessage { + type_pb, + peer: _, + limit: _, + status, + } = substream + .next() + .await + .ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))??; + + match type_pb { + proto::StopMessageType::CONNECT => { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)) + } + proto::StopMessageType::STATUS => {} + } + + match status { + Some(proto::Status::OK) => {} + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => return Err(Error::ResourceLimitExceeded), + Some(proto::Status::PERMISSION_DENIED) => return Err(Error::PermissionDenied), + Some(s) => return Err(Error::Protocol(ProtocolViolation::UnexpectedStatus(s))), + None => return Err(Error::Protocol(ProtocolViolation::MissingStatusField)), + } + + let FramedParts { + io, + read_buffer, + write_buffer, + .. + } = substream.into_parts(); + assert!( + write_buffer.is_empty(), + "Expect a flushed Framed to have an empty write buffer." + ); + + Ok(Circuit { + dst_stream: io, + dst_pending_data: read_buffer.freeze(), + }) +} + +pub(crate) struct Circuit { + pub(crate) dst_stream: Stream, + pub(crate) dst_pending_data: Bytes, +} diff --git a/protocols/relay/src/v2.rs b/protocols/relay/src/v2.rs deleted file mode 100644 index ab222062eec..00000000000 --- a/protocols/relay/src/v2.rs +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implementation of the [libp2p circuit relay v2 -//! specification](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md). - -pub mod client { - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::client::Event instead.")] - pub type Event = crate::client::Event; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::client::Behaviour instead." - )] - pub type Client = crate::client::Behaviour; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::client::Connection instead." - )] - pub type RelayedConnection = crate::client::Connection; - - pub mod transport { - use futures::future::BoxFuture; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::client::Transport instead." - )] - pub type ClientTransport = crate::client::Transport; - - #[deprecated( - since = "0.15.0", - note = "RelayListener will become crate-private in the future - as it shouldn't be required by end users." - )] - pub type RelayListener = crate::priv_client::transport::Listener; - - #[deprecated( - since = "0.15.0", - note = "RelayedDial type alias will be deprecated, - users should create the alias themselves if needed." - )] - pub type RelayedDial = BoxFuture< - 'static, - Result, - >; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::client::transport::Error instead." - )] - pub type RelayError = crate::client::transport::Error; - - #[deprecated( - since = "0.15.0", - note = "TransportToBehaviourMsg will become crate-private in the future - as it shouldn't be required by end users." - )] - pub type TransportToBehaviourMsg = crate::priv_client::transport::TransportToBehaviourMsg; - - #[deprecated( - since = "0.15.0", - note = "ToListenerMsg will become crate-private in the future - as it shouldn't be required by end users." - )] - pub type ToListenerMsg = crate::priv_client::transport::ToListenerMsg; - - #[deprecated( - since = "0.15.0", - note = "Reservation will become crate-private in the future - as it shouldn't be required by end users." - )] - pub type Reservation = crate::priv_client::transport::Reservation; - } -} - -pub mod relay { - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::Config instead.")] - pub type Config = crate::Config; - - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::Event instead.")] - pub type Event = crate::Event; - - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::Behaviour instead.")] - pub type Relay = crate::Behaviour; - - #[deprecated(since = "0.15.0", note = "Use libp2p_relay::CircuitId instead.")] - pub type CircuitId = crate::CircuitId; - - pub mod rate_limiter { - use instant::Instant; - use libp2p_core::Multiaddr; - use libp2p_identity::PeerId; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::behaviour::rate_limiter::RateLimiter instead." - )] - pub trait RateLimiter: Send { - fn try_next(&mut self, peer: PeerId, addr: &Multiaddr, now: Instant) -> bool; - } - - #[allow(deprecated)] - impl RateLimiter for T - where - T: crate::behaviour::rate_limiter::RateLimiter, - { - fn try_next(&mut self, peer: PeerId, addr: &Multiaddr, now: Instant) -> bool { - self.try_next(peer, addr, now) - } - } - } -} - -pub mod protocol { - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::inbound::hop::FatalUpgradeError instead." - )] - pub type InboundHopFatalUpgradeError = crate::inbound::hop::FatalUpgradeError; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::inbound::stop::FatalUpgradeError instead." - )] - pub type InboundStopFatalUpgradeError = crate::inbound::stop::FatalUpgradeError; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::outbound::hop::FatalUpgradeError instead." - )] - pub type OutboundHopFatalUpgradeError = crate::outbound::hop::FatalUpgradeError; - - #[deprecated( - since = "0.15.0", - note = "Use libp2p_relay::outbound::stop::FatalUpgradeError instead." - )] - pub type OutboundStopFatalUpgradeError = crate::outbound::stop::FatalUpgradeError; -} - -#[deprecated( - since = "0.15.0", - note = "RequestId will be deprecated as it isn't used" -)] -pub type RequestId = super::RequestId; diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 2103893ba12..2b28d5a50cd 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -30,16 +30,21 @@ use libp2p_core::transport::{Boxed, MemoryTransport, Transport}; use libp2p_core::upgrade; use libp2p_identity as identity; use libp2p_identity::PeerId; -use libp2p_identity::PublicKey; use libp2p_ping as ping; -use libp2p_plaintext::PlainText2Config; +use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::{AddressScore, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::dial_opts::DialOpts; +use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[test] fn reservation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -47,11 +52,11 @@ fn reservation() { let relay_peer_id = *relay.local_peer_id(); relay.listen_on(relay_addr.clone()).unwrap(); - relay.add_external_address(relay_addr.clone(), AddressScore::Infinite); + relay.add_external_address(relay_addr.clone()); spawn_swarm_on_pool(&pool, relay); let client_addr = relay_addr - .with(Protocol::P2p(relay_peer_id.into())) + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); let mut client = build_client(); let client_peer_id = *client.local_peer_id(); @@ -64,9 +69,7 @@ fn reservation() { // Wait for initial reservation. pool.run_until(wait_for_reservation( &mut client, - client_addr - .clone() - .with(Protocol::P2p(client_peer_id.into())), + client_addr.clone().with(Protocol::P2p(client_peer_id)), relay_peer_id, false, // No renewal. )); @@ -74,7 +77,7 @@ fn reservation() { // Wait for renewal. pool.run_until(wait_for_reservation( &mut client, - client_addr.with(Protocol::P2p(client_peer_id.into())), + client_addr.with(Protocol::P2p(client_peer_id)), relay_peer_id, true, // Renewal. )); @@ -82,7 +85,9 @@ fn reservation() { #[test] fn new_reservation_to_same_relay_replaces_old() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -90,17 +95,15 @@ fn new_reservation_to_same_relay_replaces_old() { let relay_peer_id = *relay.local_peer_id(); relay.listen_on(relay_addr.clone()).unwrap(); - relay.add_external_address(relay_addr.clone(), AddressScore::Infinite); + relay.add_external_address(relay_addr.clone()); spawn_swarm_on_pool(&pool, relay); let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .with(Protocol::P2p(relay_peer_id.into())) + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); - let client_addr_with_peer_id = client_addr - .clone() - .with(Protocol::P2p(client_peer_id.into())); + let client_addr_with_peer_id = client_addr.clone().with(Protocol::P2p(client_peer_id)); let old_listener = client.listen_on(client_addr.clone()).unwrap(); @@ -116,7 +119,7 @@ fn new_reservation_to_same_relay_replaces_old() { )); // Trigger new reservation. - let new_listener = client.listen_on(client_addr).unwrap(); + let new_listener = client.listen_on(client_addr.clone()).unwrap(); // Wait for // - listener of old reservation to close @@ -166,6 +169,12 @@ fn new_reservation_to_same_relay_replaces_old() { break; } } + SwarmEvent::ExternalAddrConfirmed { address } => { + assert_eq!( + address, + client_addr.clone().with(Protocol::P2p(client_peer_id)) + ); + } SwarmEvent::Behaviour(ClientEvent::Ping(_)) => {} e => panic!("{e:?}"), } @@ -175,7 +184,9 @@ fn new_reservation_to_same_relay_replaces_old() { #[test] fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -183,15 +194,15 @@ fn connect() { let relay_peer_id = *relay.local_peer_id(); relay.listen_on(relay_addr.clone()).unwrap(); - relay.add_external_address(relay_addr.clone(), AddressScore::Infinite); + relay.add_external_address(relay_addr.clone()); spawn_swarm_on_pool(&pool, relay); let mut dst = build_client(); let dst_peer_id = *dst.local_peer_id(); let dst_addr = relay_addr - .with(Protocol::P2p(relay_peer_id.into())) + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) - .with(Protocol::P2p(dst_peer_id.into())); + .with(Protocol::P2p(dst_peer_id)); dst.listen_on(dst_addr.clone()).unwrap(); @@ -222,7 +233,10 @@ async fn connection_established_to( ) { loop { match swarm.select_next_some().await { - SwarmEvent::Dialing(peer_id) if peer_id == relay_peer_id => {} + SwarmEvent::Dialing { + peer_id: Some(peer_id), + .. + } if peer_id == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == relay_peer_id => {} SwarmEvent::Behaviour(ClientEvent::Ping(ping::Event { peer, .. })) if peer == other => { break @@ -239,8 +253,13 @@ async fn connection_established_to( if peer == relay_peer_id => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == other => break, SwarmEvent::IncomingConnection { send_back_addr, .. } => { - let peer_id_from_addr = - PeerId::try_from_multiaddr(&send_back_addr).expect("to have /p2p"); + let peer_id_from_addr = send_back_addr + .iter() + .find_map(|protocol| match protocol { + Protocol::P2p(peer_id) => Some(peer_id), + _ => None, + }) + .expect("to have /p2p"); assert_eq!(peer_id_from_addr, other) } @@ -251,7 +270,9 @@ async fn connection_established_to( #[test] fn handle_dial_failure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -260,17 +281,124 @@ fn handle_dial_failure() { let mut client = build_client(); let client_peer_id = *client.local_peer_id(); let client_addr = relay_addr - .with(Protocol::P2p(relay_peer_id.into())) + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) - .with(Protocol::P2p(client_peer_id.into())); + .with(Protocol::P2p(client_peer_id)); client.listen_on(client_addr).unwrap(); assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id))); } +#[test] +fn propagate_reservation_error_to_listener() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay_with_config(relay::Config { + max_reservations: 0, // Will make us fail to make the reservation + ..relay::Config::default() + }); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let client_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit); + let mut client = build_client(); + + let reservation_listener = client.listen_on(client_addr.clone()).unwrap(); + + // Wait for connection to relay. + assert!(pool.run_until(wait_for_dial(&mut client, relay_peer_id))); + + let error = pool.run_until(client.wait(|e| match e { + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + } if listener_id == reservation_listener => Some(e), + _ => None, + })); + + let error = error + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert!(matches!( + error, + relay::outbound::hop::ReserveError::ResourceLimitExceeded + )); +} + +#[test] +fn propagate_connect_error_to_unknown_peer_to_dialer() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay(); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let mut src = build_client(); + + let dst_peer_id = PeerId::random(); // We don't have a destination peer in this test, so the CONNECT request will fail. + let dst_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(dst_peer_id)); + + let opts = DialOpts::from(dst_addr.clone()); + let circuit_connection_id = opts.connection_id(); + + src.dial(opts).unwrap(); + + let (failed_address, error) = pool.run_until(src.wait(|e| match e { + SwarmEvent::OutgoingConnectionError { + connection_id, + error: DialError::Transport(mut errors), + .. + } if connection_id == circuit_connection_id => { + assert_eq!(errors.len(), 1); + Some(errors.remove(0)) + } + _ => None, + })); + + // This is a bit wonky but we need to get the _actual_ source error :) + let error = error + .source() + .unwrap() + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert_eq!(failed_address, dst_addr); + assert!(matches!( + error, + relay::outbound::hop::ConnectError::NoReservation + )); +} + #[test] fn reuse_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -278,14 +406,18 @@ fn reuse_connection() { let relay_peer_id = *relay.local_peer_id(); relay.listen_on(relay_addr.clone()).unwrap(); - relay.add_external_address(relay_addr.clone(), AddressScore::Infinite); + relay.add_external_address(relay_addr.clone()); spawn_swarm_on_pool(&pool, relay); let client_addr = relay_addr .clone() - .with(Protocol::P2p(relay_peer_id.into())) + .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit); - let mut client = build_client(); + + // To reuse the connection, we need to ensure it is not shut down due to being idle. + let mut client = build_client_with_config( + Config::with_async_std_executor().with_idle_connection_timeout(Duration::from_secs(1)), + ); let client_peer_id = *client.local_peer_id(); client.dial(relay_addr).unwrap(); @@ -295,68 +427,71 @@ fn reuse_connection() { pool.run_until(wait_for_reservation( &mut client, - client_addr.with(Protocol::P2p(client_peer_id.into())), + client_addr.with(Protocol::P2p(client_peer_id)), relay_peer_id, false, // No renewal. )); } fn build_relay() -> Swarm { + build_relay_with_config(relay::Config { + reservation_duration: Duration::from_secs(2), + ..Default::default() + }) +} + +fn build_relay_with_config(config: relay::Config) -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); - let transport = upgrade_transport(MemoryTransport::default().boxed(), local_public_key); + let transport = upgrade_transport(MemoryTransport::default().boxed(), &local_key); - SwarmBuilder::with_async_std_executor( + Swarm::new( transport, Relay { ping: ping::Behaviour::new(ping::Config::new()), - relay: relay::Behaviour::new( - local_peer_id, - relay::Config { - reservation_duration: Duration::from_secs(2), - ..Default::default() - }, - ), + relay: relay::Behaviour::new(local_peer_id, config), }, local_peer_id, + Config::with_async_std_executor(), ) - .build() } fn build_client() -> Swarm { + build_client_with_config(Config::with_async_std_executor()) +} + +fn build_client_with_config(config: Config) -> Swarm { let local_key = identity::Keypair::generate_ed25519(); - let local_public_key = local_key.public(); - let local_peer_id = local_public_key.to_peer_id(); + let local_peer_id = local_key.public().to_peer_id(); let (relay_transport, behaviour) = relay::client::new(local_peer_id); let transport = upgrade_transport( OrTransport::new(relay_transport, MemoryTransport::default()).boxed(), - local_public_key, + &local_key, ); - SwarmBuilder::with_async_std_executor( + Swarm::new( transport, Client { ping: ping::Behaviour::new(ping::Config::new()), relay: behaviour, }, local_peer_id, + config, ) - .build() } fn upgrade_transport( transport: Boxed, - local_public_key: PublicKey, + identity: &identity::Keypair, ) -> Boxed<(PeerId, StreamMuxerBox)> where StreamSink: AsyncRead + AsyncWrite + Send + Unpin + 'static, { transport .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { local_public_key }) + .authenticate(plaintext::Config::new(identity)) .multiplex(libp2p_yamux::Config::default()) .boxed() } @@ -392,6 +527,9 @@ async fn wait_for_reservation( loop { match client.select_next_some().await { + SwarmEvent::ExternalAddrConfirmed { address } if !is_renewal => { + assert_eq!(address, client_addr); + } SwarmEvent::Behaviour(ClientEvent::Relay( relay::client::Event::ReservationReqAccepted { relay_peer_id: peer_id, @@ -419,7 +557,10 @@ async fn wait_for_reservation( async fn wait_for_dial(client: &mut Swarm, remote: PeerId) -> bool { loop { match client.select_next_some().await { - SwarmEvent::Dialing(peer_id) if peer_id == remote => {} + SwarmEvent::Dialing { + peer_id: Some(peer_id), + .. + } if peer_id == remote => {} SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == remote => return true, SwarmEvent::OutgoingConnectionError { peer_id, .. } if peer_id == Some(remote) => { return false diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 8f2bd968e94..e60699da734 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,8 +1,24 @@ -## 0.13.0 - unreleased +## 0.14.0 + + +## 0.13.1 +- Refresh registration upon a change in external addresses. + See [PR 4629]. + +[PR 4629]: https://github.com/libp2p/rust-libp2p/pull/4629 + +## 0.13.0 + +- Changed the signature of the function `client::Behavior::register()`, + it returns `Result<(), RegisterError>` now. + Remove the `Remote` variant from `RegisterError` and instead put the information from `Remote` + directly into the variant from the `Event` enum. + See [PR 4073]. - Raise MSRV to 1.65. See [PR 3715]. +[PR 4073]: https://github.com/libp2p/rust-libp2p/pull/4073 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 ## 0.12.1 @@ -25,7 +41,7 @@ - Update to `libp2p-swarm` `v0.41.0`. -- Replace `Client` and `Server`'s `NetworkBehaviour` implemention `inject_*` methods with the new `on_*` methods. +- Replace `Client` and `Server`'s `NetworkBehaviour` implementation `inject_*` methods with the new `on_*` methods. See [PR 3011]. - Update `rust-version` to reflect the actual MSRV: 1.62.0. See [PR 3090]. diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index ce5e9dfbeaf..edc0ec8a38a 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-rendezvous" edition = "2021" rust-version = { workspace = true } description = "Rendezvous protocol for libp2p" -version = "0.13.0" +version = "0.14.0" authors = ["The COMIT guys "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,34 +11,34 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } +async-trait = "0.1" bimap = "0.6.3" futures = { version = "0.3", default-features = false, features = ["std"] } futures-timer = "3.0.2" -instant = "0.1.11" +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" +libp2p-request-response = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" thiserror = "1" +tracing = "0.1.37" void = "1" [dev-dependencies] -async-trait = "0.1" -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } -libp2p-mplex = { workspace = true } libp2p-noise = { workspace = true } libp2p-ping = { workspace = true } libp2p-identify = { workspace = true } -libp2p-yamux = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-yamux = { workspace = true } rand = "0.8" -tokio = { version = "1.28", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } -libp2p-swarm-test = { workspace = true } +tokio = { version = "1.35", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -46,3 +46,6 @@ libp2p-swarm-test = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index 324d352c0f7..92d7884758b 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -18,38 +18,39 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Cookie, ErrorCode, Namespace, NewRegistration, Registration, Ttl}; -use crate::handler; -use crate::handler::outbound; -use crate::handler::outbound::OpenInfo; -use crate::substream_handler::{InEvent, SubstreamConnectionHandler}; +use crate::codec::Message::*; +use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; use futures::future::BoxFuture; use futures::future::FutureExt; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; -use instant::Duration; use libp2p_core::{Endpoint, Multiaddr, PeerRecord}; use libp2p_identity::{Keypair, PeerId, SigningError}; -use libp2p_swarm::behaviour::FromSwarm; +use libp2p_request_response::{OutboundRequestId, ProtocolSupport}; use libp2p_swarm::{ - CloseConnection, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, - NotifyHandler, PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, ExternalAddresses, FromSwarm, NetworkBehaviour, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use std::collections::{HashMap, VecDeque}; -use std::iter::FromIterator; +use std::collections::HashMap; +use std::iter; use std::task::{Context, Poll}; -use void::Void; +use std::time::Duration; pub struct Behaviour { - events: VecDeque>>, + inner: libp2p_request_response::Behaviour, + keypair: Keypair, - pending_register_requests: Vec<(Namespace, PeerId, Option)>, + + waiting_for_register: HashMap, + waiting_for_discovery: HashMap)>, /// Hold addresses of all peers that we have discovered so far. /// - /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::addresses_of_peer`]. + /// Storing these internally allows us to assist the [`libp2p_swarm::Swarm`] in dialing by returning addresses from [`NetworkBehaviour::handle_pending_outbound_connection`]. discovered_peers: HashMap<(PeerId, Namespace), Vec>, + registered_namespaces: HashMap<(PeerId, Namespace), Ttl>, + /// Tracks the expiry of registrations that we have discovered and stored in `discovered_peers` otherwise we have a memory leak. expiring_registrations: FuturesUnordered>, @@ -60,10 +61,16 @@ impl Behaviour { /// Create a new instance of the rendezvous [`NetworkBehaviour`]. pub fn new(keypair: Keypair) -> Self { Self { - events: Default::default(), + inner: libp2p_request_response::Behaviour::with_codec( + crate::codec::Codec::default(), + iter::once((crate::PROTOCOL_IDENT, ProtocolSupport::Outbound)), + libp2p_request_response::Config::default(), + ), keypair, - pending_register_requests: vec![], + waiting_for_register: Default::default(), + waiting_for_discovery: Default::default(), discovered_peers: Default::default(), + registered_namespaces: Default::default(), expiring_registrations: FuturesUnordered::from_iter(vec![ futures::future::pending().boxed() ]), @@ -74,21 +81,36 @@ impl Behaviour { /// Register our external addresses in the given namespace with the given rendezvous peer. /// /// External addresses are either manually added via [`libp2p_swarm::Swarm::add_external_address`] or reported - /// by other [`NetworkBehaviour`]s via [`ToSwarm::ReportObservedAddr`]. - pub fn register(&mut self, namespace: Namespace, rendezvous_node: PeerId, ttl: Option) { - self.pending_register_requests - .push((namespace, rendezvous_node, ttl)); + /// by other [`NetworkBehaviour`]s via [`ToSwarm::ExternalAddrConfirmed`]. + pub fn register( + &mut self, + namespace: Namespace, + rendezvous_node: PeerId, + ttl: Option, + ) -> Result<(), RegisterError> { + let external_addresses = self.external_addresses.iter().cloned().collect::>(); + if external_addresses.is_empty() { + return Err(RegisterError::NoExternalAddresses); + } + + let peer_record = PeerRecord::new(&self.keypair, external_addresses)?; + let req_id = self.inner.send_request( + &rendezvous_node, + Register(NewRegistration::new(namespace.clone(), peer_record, ttl)), + ); + self.waiting_for_register + .insert(req_id, (rendezvous_node, namespace)); + + Ok(()) } /// Unregister ourselves from the given namespace with the given rendezvous peer. pub fn unregister(&mut self, namespace: Namespace, rendezvous_node: PeerId) { - self.events.push_back(ToSwarm::NotifyHandler { - peer_id: rendezvous_node, - event: handler::OutboundInEvent::NewSubstream { - open_info: OpenInfo::UnregisterRequest(namespace), - }, - handler: NotifyHandler::Any, - }); + self.registered_namespaces + .retain(|(rz_node, ns), _| rz_node.ne(&rendezvous_node) && ns.ne(&namespace)); + + self.inner + .send_request(&rendezvous_node, Unregister(namespace)); } /// Discover other peers at a given rendezvous peer. @@ -100,22 +122,22 @@ impl Behaviour { /// the cookie was acquired. pub fn discover( &mut self, - ns: Option, + namespace: Option, cookie: Option, limit: Option, rendezvous_node: PeerId, ) { - self.events.push_back(ToSwarm::NotifyHandler { - peer_id: rendezvous_node, - event: handler::OutboundInEvent::NewSubstream { - open_info: OpenInfo::DiscoverRequest { - namespace: ns, - cookie, - limit, - }, + let req_id = self.inner.send_request( + &rendezvous_node, + Discover { + namespace: namespace.clone(), + cookie, + limit, }, - handler: NotifyHandler::Any, - }); + ); + + self.waiting_for_discovery + .insert(req_id, (rendezvous_node, namespace)); } } @@ -125,12 +147,6 @@ pub enum RegisterError { NoExternalAddresses, #[error("Failed to make a new PeerRecord")] FailedToMakeRecord(#[from] SigningError), - #[error("Failed to register with Rendezvous node")] - Remote { - rendezvous_node: PeerId, - namespace: Namespace, - error: ErrorCode, - }, } #[derive(Debug)] @@ -155,26 +171,136 @@ pub enum Event { namespace: Namespace, }, /// We failed to register with the contained rendezvous node. - RegisterFailed(RegisterError), + RegisterFailed { + rendezvous_node: PeerId, + namespace: Namespace, + error: ErrorCode, + }, /// The connection details we learned from this node expired. Expired { peer: PeerId }, } impl NetworkBehaviour for Behaviour { - type ConnectionHandler = - SubstreamConnectionHandler; - type OutEvent = Event; + type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: &Multiaddr, + connection_id: ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(SubstreamConnectionHandler::new_outbound_only( - Duration::from_secs(30), - )) + self.inner.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: Endpoint, + ) -> Result, ConnectionDenied> { + self.inner + .handle_established_outbound_connection(connection_id, peer, addr, role_override) + } + + fn on_connection_handler_event( + &mut self, + peer_id: PeerId, + connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + self.inner + .on_connection_handler_event(peer_id, connection_id, event); + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + let changed = self.external_addresses.on_swarm_event(&event); + + self.inner.on_swarm_event(event); + + if changed && self.external_addresses.iter().count() > 0 { + let registered = self.registered_namespaces.clone(); + for ((rz_node, ns), ttl) in registered { + if let Err(e) = self.register(ns, rz_node, Some(ttl)) { + tracing::warn!("refreshing registration failed: {e}") + } + } + } + } + + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + use libp2p_request_response as req_res; + + loop { + match self.inner.poll(cx) { + Poll::Ready(ToSwarm::GenerateEvent(req_res::Event::Message { + message: + req_res::Message::Response { + request_id, + response, + }, + .. + })) => { + if let Some(event) = self.handle_response(&request_id, response) { + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + continue; // not a request we care about + } + Poll::Ready(ToSwarm::GenerateEvent(req_res::Event::OutboundFailure { + request_id, + .. + })) => { + if let Some(event) = self.event_for_outbound_failure(&request_id) { + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + continue; // not a request we care about + } + Poll::Ready(ToSwarm::GenerateEvent( + req_res::Event::InboundFailure { .. } + | req_res::Event::ResponseSent { .. } + | req_res::Event::Message { + message: req_res::Message::Request { .. }, + .. + }, + )) => { + unreachable!("rendezvous clients never receive requests") + } + Poll::Ready(other) => { + let new_to_swarm = + other.map_out(|_| unreachable!("we manually map `GenerateEvent` variants")); + + return Poll::Ready(new_to_swarm); + } + Poll::Pending => {} + } + + if let Poll::Ready(Some(expired_registration)) = + self.expiring_registrations.poll_next_unpin(cx) + { + self.discovered_peers.remove(&expired_registration); + return Poll::Ready(ToSwarm::GenerateEvent(Event::Expired { + peer: expired_registration.0, + })); + } + + return Poll::Pending; + } } fn handle_pending_outbound_connection( @@ -199,177 +325,110 @@ impl NetworkBehaviour for Behaviour { Ok(addresses) } +} - fn handle_established_outbound_connection( - &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: Endpoint, - ) -> Result, ConnectionDenied> { - Ok(SubstreamConnectionHandler::new_outbound_only( - Duration::from_secs(30), - )) - } +impl Behaviour { + fn event_for_outbound_failure(&mut self, req_id: &OutboundRequestId) -> Option { + if let Some((rendezvous_node, namespace)) = self.waiting_for_register.remove(req_id) { + return Some(Event::RegisterFailed { + rendezvous_node, + namespace, + error: ErrorCode::Unavailable, + }); + }; - fn on_connection_handler_event( - &mut self, - peer_id: PeerId, - connection_id: ConnectionId, - event: THandlerOutEvent, - ) { - let new_events = match event { - handler::OutboundOutEvent::InboundEvent { message, .. } => void::unreachable(message), - handler::OutboundOutEvent::OutboundEvent { message, .. } => handle_outbound_event( - message, - peer_id, - &mut self.discovered_peers, - &mut self.expiring_registrations, - ), - handler::OutboundOutEvent::InboundError { error, .. } => void::unreachable(error), - handler::OutboundOutEvent::OutboundError { error, .. } => { - log::warn!("Connection with peer {} failed: {}", peer_id, error); - - vec![ToSwarm::CloseConnection { - peer_id, - connection: CloseConnection::One(connection_id), - }] - } + if let Some((rendezvous_node, namespace)) = self.waiting_for_discovery.remove(req_id) { + return Some(Event::DiscoverFailed { + rendezvous_node, + namespace, + error: ErrorCode::Unavailable, + }); }; - self.events.extend(new_events); + None } - fn poll( + fn handle_response( &mut self, - cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - if let Some((namespace, rendezvous_node, ttl)) = self.pending_register_requests.pop() { - // Update our external addresses based on the Swarm's current knowledge. - // It doesn't make sense to register addresses on which we are not reachable, hence this should not be configurable from the outside. - - let external_addresses = self.external_addresses.iter().cloned().collect::>(); + request_id: &OutboundRequestId, + response: Message, + ) -> Option { + match response { + RegisterResponse(Ok(ttl)) => { + if let Some((rendezvous_node, namespace)) = + self.waiting_for_register.remove(request_id) + { + self.registered_namespaces + .insert((rendezvous_node, namespace.clone()), ttl); + + return Some(Event::Registered { + rendezvous_node, + ttl, + namespace, + }); + } - if external_addresses.is_empty() { - return Poll::Ready(ToSwarm::GenerateEvent(Event::RegisterFailed( - RegisterError::NoExternalAddresses, - ))); + None } + RegisterResponse(Err(error_code)) => { + if let Some((rendezvous_node, namespace)) = + self.waiting_for_register.remove(request_id) + { + return Some(Event::RegisterFailed { + rendezvous_node, + namespace, + error: error_code, + }); + } - let action = match PeerRecord::new(&self.keypair, external_addresses) { - Ok(peer_record) => ToSwarm::NotifyHandler { - peer_id: rendezvous_node, - event: handler::OutboundInEvent::NewSubstream { - open_info: OpenInfo::RegisterRequest(NewRegistration { - namespace, - record: peer_record, - ttl, - }), - }, - handler: NotifyHandler::Any, - }, - Err(signing_error) => ToSwarm::GenerateEvent(Event::RegisterFailed( - RegisterError::FailedToMakeRecord(signing_error), - )), - }; - - return Poll::Ready(action); - } - - if let Some(expired_registration) = - futures::ready!(self.expiring_registrations.poll_next_unpin(cx)) - { - self.discovered_peers.remove(&expired_registration); - return Poll::Ready(ToSwarm::GenerateEvent(Event::Expired { - peer: expired_registration.0, - })); - } - - Poll::Pending - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - self.external_addresses.on_swarm_event(&event); - - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } -} + None + } + DiscoverResponse(Ok((registrations, cookie))) => { + if let Some((rendezvous_node, _ns)) = self.waiting_for_discovery.remove(request_id) + { + self.discovered_peers + .extend(registrations.iter().map(|registration| { + let peer_id = registration.record.peer_id(); + let namespace = registration.namespace.clone(); + + let addresses = registration.record.addresses().to_vec(); + + ((peer_id, namespace), addresses) + })); + + self.expiring_registrations + .extend(registrations.iter().cloned().map(|registration| { + async move { + // if the timer errors we consider it expired + futures_timer::Delay::new(Duration::from_secs(registration.ttl)) + .await; + + (registration.record.peer_id(), registration.namespace) + } + .boxed() + })); + + return Some(Event::Discovered { + rendezvous_node, + registrations, + cookie, + }); + } -fn handle_outbound_event( - event: outbound::OutEvent, - peer_id: PeerId, - discovered_peers: &mut HashMap<(PeerId, Namespace), Vec>, - expiring_registrations: &mut FuturesUnordered>, -) -> Vec>> { - match event { - outbound::OutEvent::Registered { namespace, ttl } => { - vec![ToSwarm::GenerateEvent(Event::Registered { - rendezvous_node: peer_id, - ttl, - namespace, - })] - } - outbound::OutEvent::RegisterFailed(namespace, error) => { - vec![ToSwarm::GenerateEvent(Event::RegisterFailed( - RegisterError::Remote { - rendezvous_node: peer_id, - namespace, - error, - }, - ))] - } - outbound::OutEvent::Discovered { - registrations, - cookie, - } => { - discovered_peers.extend(registrations.iter().map(|registration| { - let peer_id = registration.record.peer_id(); - let namespace = registration.namespace.clone(); - - let addresses = registration.record.addresses().to_vec(); - - ((peer_id, namespace), addresses) - })); - expiring_registrations.extend(registrations.iter().cloned().map(|registration| { - async move { - // if the timer errors we consider it expired - futures_timer::Delay::new(Duration::from_secs(registration.ttl)).await; - - (registration.record.peer_id(), registration.namespace) + None + } + DiscoverResponse(Err(error_code)) => { + if let Some((rendezvous_node, ns)) = self.waiting_for_discovery.remove(request_id) { + return Some(Event::DiscoverFailed { + rendezvous_node, + namespace: ns, + error: error_code, + }); } - .boxed() - })); - vec![ToSwarm::GenerateEvent(Event::Discovered { - rendezvous_node: peer_id, - registrations, - cookie, - })] - } - outbound::OutEvent::DiscoverFailed { namespace, error } => { - vec![ToSwarm::GenerateEvent(Event::DiscoverFailed { - rendezvous_node: peer_id, - namespace, - error, - })] + None + } + _ => unreachable!("rendezvous clients never receive requests"), } } } diff --git a/protocols/rendezvous/src/codec.rs b/protocols/rendezvous/src/codec.rs index 716ad79893f..41432a91d8c 100644 --- a/protocols/rendezvous/src/codec.rs +++ b/protocols/rendezvous/src/codec.rs @@ -19,16 +19,24 @@ // DEALINGS IN THE SOFTWARE. use crate::DEFAULT_TTL; +use async_trait::async_trait; use asynchronous_codec::{BytesMut, Decoder, Encoder}; +use asynchronous_codec::{FramedRead, FramedWrite}; +use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; use libp2p_core::{peer_record, signed_envelope, PeerRecord, SignedEnvelope}; +use libp2p_swarm::StreamProtocol; +use quick_protobuf_codec::Codec as ProtobufCodec; use rand::RngCore; use std::convert::{TryFrom, TryInto}; -use std::fmt; +use std::{fmt, io}; pub type Ttl = u64; +pub(crate) type Limit = u64; + +const MAX_MESSAGE_LEN_BYTES: usize = 1024 * 1024; #[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum Message { Register(NewRegistration), RegisterResponse(Result), @@ -36,7 +44,7 @@ pub enum Message { Discover { namespace: Option, cookie: Option, - limit: Option, + limit: Option, }, DiscoverResponse(Result<(Vec, Cookie), ErrorCode>), } @@ -49,7 +57,7 @@ impl Namespace { /// /// This will panic if the namespace is too long. We accepting panicking in this case because we are enforcing a `static lifetime which means this value can only be a constant in the program and hence we hope the developer checked that it is of an acceptable length. pub fn from_static(value: &'static str) -> Self { - if value.len() > 255 { + if value.len() > crate::MAX_NAMESPACE { panic!("Namespace '{value}' is too long!") } @@ -57,7 +65,7 @@ impl Namespace { } pub fn new(value: String) -> Result { - if value.len() > 255 { + if value.len() > crate::MAX_NAMESPACE { return Err(NamespaceTooLong); } @@ -160,7 +168,7 @@ impl Cookie { #[error("The cookie was malformed")] pub struct InvalidCookie; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct NewRegistration { pub namespace: Namespace, pub record: PeerRecord, @@ -199,43 +207,100 @@ pub enum ErrorCode { Unavailable, } -pub struct RendezvousCodec { - inner: quick_protobuf_codec::Codec, -} - -impl Default for RendezvousCodec { - fn default() -> Self { - Self { - inner: quick_protobuf_codec::Codec::new(1024 * 1024), // 1MB - } - } -} - -impl Encoder for RendezvousCodec { - type Item = Message; +impl Encoder for Codec { + type Item<'a> = Message; type Error = Error; - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { - self.inner.encode(proto::Message::from(item), dst)?; + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + let mut pb: ProtobufCodec = ProtobufCodec::new(MAX_MESSAGE_LEN_BYTES); + + pb.encode(proto::Message::from(item), dst)?; Ok(()) } } -impl Decoder for RendezvousCodec { +impl Decoder for Codec { type Item = Message; type Error = Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let message = match self.inner.decode(src)? { - Some(p) => p, - None => return Ok(None), + let mut pb: ProtobufCodec = ProtobufCodec::new(MAX_MESSAGE_LEN_BYTES); + + let Some(message) = pb.decode(src)? else { + return Ok(None); }; Ok(Some(message.try_into()?)) } } +#[derive(Clone, Default)] +pub struct Codec {} + +#[async_trait] +impl libp2p_request_response::Codec for Codec { + type Protocol = StreamProtocol; + type Request = Message; + type Response = Message; + + async fn read_request(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let message = FramedRead::new(io, self.clone()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + + Ok(message) + } + + async fn read_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let message = FramedRead::new(io, self.clone()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + + Ok(message) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + FramedWrite::new(io, self.clone()).send(req).await?; + + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + res: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + FramedWrite::new(io, self.clone()).send(res).await?; + + Ok(()) + } +} + #[derive(Debug, thiserror::Error)] pub enum Error { #[error(transparent)] @@ -246,6 +311,16 @@ pub enum Error { Conversion(#[from] ConversionError), } +impl From for std::io::Error { + fn from(value: Error) -> Self { + match value { + Error::Io(e) => e, + Error::Codec(e) => io::Error::from(e), + Error::Conversion(e) => io::Error::new(io::ErrorKind::InvalidInput, e), + } + } +} + impl From for proto::Message { fn from(message: Message) -> Self { match message { @@ -528,7 +603,7 @@ impl TryFrom for ErrorCode { E_UNAVAILABLE => ErrorCode::Unavailable, }; - Result::Ok(code) + Ok(code) } } @@ -567,6 +642,7 @@ mod proto { #[cfg(test)] mod tests { use super::*; + use crate::Namespace; #[test] fn cookie_wire_encoding_roundtrip() { diff --git a/protocols/rendezvous/src/handler.rs b/protocols/rendezvous/src/handler.rs deleted file mode 100644 index ccf765c9c65..00000000000 --- a/protocols/rendezvous/src/handler.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 COMIT Network. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::codec; -use crate::codec::Message; -use libp2p_swarm::StreamProtocol; -use void::Void; - -const PROTOCOL_IDENT: StreamProtocol = StreamProtocol::new("/rendezvous/1.0.0"); - -pub(crate) mod inbound; -pub(crate) mod outbound; -/// Errors that can occur while interacting with a substream. -#[allow(clippy::large_enum_variant)] -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Reading message {0:?} at this stage is a protocol violation")] - BadMessage(Message), - #[error("Failed to write message to substream")] - WriteMessage(#[source] codec::Error), - #[error("Failed to read message from substream")] - ReadMessage(#[source] codec::Error), - #[error("Substream ended unexpectedly mid-protocol")] - UnexpectedEndOfStream, -} - -pub(crate) type OutboundInEvent = crate::substream_handler::InEvent; -pub(crate) type OutboundOutEvent = - crate::substream_handler::OutEvent; - -pub(crate) type InboundInEvent = crate::substream_handler::InEvent<(), inbound::InEvent, Void>; -pub(crate) type InboundOutEvent = - crate::substream_handler::OutEvent; diff --git a/protocols/rendezvous/src/handler/inbound.rs b/protocols/rendezvous/src/handler/inbound.rs deleted file mode 100644 index 5ed2e4052ab..00000000000 --- a/protocols/rendezvous/src/handler/inbound.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2021 COMIT Network. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::codec::{ - Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, RendezvousCodec, Ttl, -}; -use crate::handler::Error; -use crate::handler::PROTOCOL_IDENT; -use crate::substream_handler::{Next, PassthroughProtocol, SubstreamHandler}; -use asynchronous_codec::Framed; -use futures::{SinkExt, StreamExt}; -use libp2p_swarm::{NegotiatedSubstream, SubstreamProtocol}; -use std::fmt; -use std::task::{Context, Poll}; - -/// The state of an inbound substream (i.e. the remote node opened it). -#[allow(clippy::large_enum_variant)] -#[allow(clippy::enum_variant_names)] -pub enum Stream { - /// We are in the process of reading a message from the substream. - PendingRead(Framed), - /// We read a message, dispatched it to the behaviour and are waiting for the response. - PendingBehaviour(Framed), - /// We are in the process of sending a response. - PendingSend(Framed, Message), - /// We've sent the message and are now closing down the substream. - PendingClose(Framed), -} - -impl fmt::Debug for Stream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Stream::PendingRead(_) => write!(f, "Inbound::PendingRead"), - Stream::PendingBehaviour(_) => write!(f, "Inbound::PendingBehaviour"), - Stream::PendingSend(_, _) => write!(f, "Inbound::PendingSend"), - Stream::PendingClose(_) => write!(f, "Inbound::PendingClose"), - } - } -} - -#[allow(clippy::large_enum_variant)] -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Clone)] -pub enum OutEvent { - RegistrationRequested(NewRegistration), - UnregisterRequested(Namespace), - DiscoverRequested { - namespace: Option, - cookie: Option, - limit: Option, - }, -} - -#[derive(Debug)] -pub enum InEvent { - RegisterResponse { - ttl: Ttl, - }, - DeclineRegisterRequest(ErrorCode), - DiscoverResponse { - discovered: Vec, - cookie: Cookie, - }, - DeclineDiscoverRequest(ErrorCode), -} - -impl SubstreamHandler for Stream { - type InEvent = InEvent; - type OutEvent = OutEvent; - type Error = Error; - type OpenInfo = (); - - fn upgrade( - open_info: Self::OpenInfo, - ) -> SubstreamProtocol { - SubstreamProtocol::new(PassthroughProtocol::new(PROTOCOL_IDENT), open_info) - } - - fn new(substream: NegotiatedSubstream, _: Self::OpenInfo) -> Self { - Stream::PendingRead(Framed::new(substream, RendezvousCodec::default())) - } - - fn on_event(self, event: Self::InEvent) -> Self { - match (event, self) { - (InEvent::RegisterResponse { ttl }, Stream::PendingBehaviour(substream)) => { - Stream::PendingSend(substream, Message::RegisterResponse(Ok(ttl))) - } - (InEvent::DeclineRegisterRequest(error), Stream::PendingBehaviour(substream)) => { - Stream::PendingSend(substream, Message::RegisterResponse(Err(error))) - } - ( - InEvent::DiscoverResponse { discovered, cookie }, - Stream::PendingBehaviour(substream), - ) => Stream::PendingSend( - substream, - Message::DiscoverResponse(Ok((discovered, cookie))), - ), - (InEvent::DeclineDiscoverRequest(error), Stream::PendingBehaviour(substream)) => { - Stream::PendingSend(substream, Message::DiscoverResponse(Err(error))) - } - (event, inbound) => { - debug_assert!(false, "{inbound:?} cannot handle event {event:?}"); - - inbound - } - } - } - - fn advance(self, cx: &mut Context<'_>) -> Result, Self::Error> { - let next_state = match self { - Stream::PendingRead(mut substream) => { - match substream.poll_next_unpin(cx).map_err(Error::ReadMessage)? { - Poll::Ready(Some(msg)) => { - let event = match msg { - Message::Register(registration) => { - OutEvent::RegistrationRequested(registration) - } - Message::Discover { - cookie, - namespace, - limit, - } => OutEvent::DiscoverRequested { - cookie, - namespace, - limit, - }, - Message::Unregister(namespace) => { - OutEvent::UnregisterRequested(namespace) - } - other => return Err(Error::BadMessage(other)), - }; - - Next::EmitEvent { - event, - next_state: Stream::PendingBehaviour(substream), - } - } - Poll::Ready(None) => return Err(Error::UnexpectedEndOfStream), - Poll::Pending => Next::Pending { - next_state: Stream::PendingRead(substream), - }, - } - } - Stream::PendingBehaviour(substream) => Next::Pending { - next_state: Stream::PendingBehaviour(substream), - }, - Stream::PendingSend(mut substream, message) => match substream - .poll_ready_unpin(cx) - .map_err(Error::WriteMessage)? - { - Poll::Ready(()) => { - substream - .start_send_unpin(message) - .map_err(Error::WriteMessage)?; - - Next::Continue { - next_state: Stream::PendingClose(substream), - } - } - Poll::Pending => Next::Pending { - next_state: Stream::PendingSend(substream, message), - }, - }, - Stream::PendingClose(mut substream) => match substream.poll_close_unpin(cx) { - Poll::Ready(Ok(())) => Next::Done, - Poll::Ready(Err(_)) => Next::Done, // there is nothing we can do about an error during close - Poll::Pending => Next::Pending { - next_state: Stream::PendingClose(substream), - }, - }, - }; - - Ok(next_state) - } -} diff --git a/protocols/rendezvous/src/handler/outbound.rs b/protocols/rendezvous/src/handler/outbound.rs deleted file mode 100644 index d80bcdeb82a..00000000000 --- a/protocols/rendezvous/src/handler/outbound.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2021 COMIT Network. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::codec::{Cookie, Message, NewRegistration, RendezvousCodec}; -use crate::handler::Error; -use crate::handler::PROTOCOL_IDENT; -use crate::substream_handler::{FutureSubstream, Next, PassthroughProtocol, SubstreamHandler}; -use crate::{ErrorCode, Namespace, Registration, Ttl}; -use asynchronous_codec::Framed; -use futures::{SinkExt, TryFutureExt, TryStreamExt}; -use libp2p_swarm::{NegotiatedSubstream, SubstreamProtocol}; -use std::task::Context; -use void::Void; - -pub struct Stream(FutureSubstream); - -impl SubstreamHandler for Stream { - type InEvent = Void; - type OutEvent = OutEvent; - type Error = Error; - type OpenInfo = OpenInfo; - - fn upgrade( - open_info: Self::OpenInfo, - ) -> SubstreamProtocol { - SubstreamProtocol::new(PassthroughProtocol::new(PROTOCOL_IDENT), open_info) - } - - fn new(substream: NegotiatedSubstream, info: Self::OpenInfo) -> Self { - let mut stream = Framed::new(substream, RendezvousCodec::default()); - let sent_message = match info { - OpenInfo::RegisterRequest(new_registration) => Message::Register(new_registration), - OpenInfo::UnregisterRequest(namespace) => Message::Unregister(namespace), - OpenInfo::DiscoverRequest { - namespace, - cookie, - limit, - } => Message::Discover { - namespace, - cookie, - limit, - }, - }; - - Self(FutureSubstream::new(async move { - use Message::*; - use OutEvent::*; - - stream - .send(sent_message.clone()) - .map_err(Error::WriteMessage) - .await?; - let received_message = stream.try_next().map_err(Error::ReadMessage).await?; - let received_message = received_message.ok_or(Error::UnexpectedEndOfStream)?; - - let event = match (sent_message, received_message) { - (Register(registration), RegisterResponse(Ok(ttl))) => Registered { - namespace: registration.namespace, - ttl, - }, - (Register(registration), RegisterResponse(Err(error))) => { - RegisterFailed(registration.namespace, error) - } - (Discover { .. }, DiscoverResponse(Ok((registrations, cookie)))) => Discovered { - registrations, - cookie, - }, - (Discover { namespace, .. }, DiscoverResponse(Err(error))) => { - DiscoverFailed { namespace, error } - } - (.., other) => return Err(Error::BadMessage(other)), - }; - - stream.close().map_err(Error::WriteMessage).await?; - - Ok(event) - })) - } - - fn on_event(self, event: Self::InEvent) -> Self { - void::unreachable(event) - } - - fn advance(self, cx: &mut Context<'_>) -> Result, Self::Error> { - Ok(self.0.advance(cx)?.map_state(Stream)) - } -} - -#[derive(Debug, Clone)] -pub enum OutEvent { - Registered { - namespace: Namespace, - ttl: Ttl, - }, - RegisterFailed(Namespace, ErrorCode), - Discovered { - registrations: Vec, - cookie: Cookie, - }, - DiscoverFailed { - namespace: Option, - error: ErrorCode, - }, -} - -#[allow(clippy::large_enum_variant)] -#[allow(clippy::enum_variant_names)] -#[derive(Debug)] -pub enum OpenInfo { - RegisterRequest(NewRegistration), - UnregisterRequest(Namespace), - DiscoverRequest { - namespace: Option, - cookie: Option, - limit: Option, - }, -} diff --git a/protocols/rendezvous/src/lib.rs b/protocols/rendezvous/src/lib.rs index 337e554ea00..7c607085f20 100644 --- a/protocols/rendezvous/src/lib.rs +++ b/protocols/rendezvous/src/lib.rs @@ -23,10 +23,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub use self::codec::{Cookie, ErrorCode, Namespace, NamespaceTooLong, Registration, Ttl}; +use libp2p_swarm::StreamProtocol; mod codec; -mod handler; -mod substream_handler; /// If unspecified, rendezvous nodes should assume a TTL of 2h. /// @@ -43,5 +42,12 @@ pub const MIN_TTL: Ttl = 60 * 60 * 2; /// . pub const MAX_TTL: Ttl = 60 * 60 * 72; +/// The maximum namespace length. +/// +/// . +pub const MAX_NAMESPACE: usize = 255; + +pub(crate) const PROTOCOL_IDENT: StreamProtocol = StreamProtocol::new("/rendezvous/1.0.0"); + pub mod client; pub mod server; diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 1311d4f903f..667c71e20e3 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -18,30 +18,29 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::codec::{Cookie, ErrorCode, Namespace, NewRegistration, Registration, Ttl}; -use crate::handler::inbound; -use crate::substream_handler::{InEvent, InboundSubstreamId, SubstreamConnectionHandler}; -use crate::{handler, MAX_TTL, MIN_TTL}; +use crate::codec::{Cookie, ErrorCode, Message, Namespace, NewRegistration, Registration, Ttl}; +use crate::{MAX_TTL, MIN_TTL}; use bimap::BiMap; use futures::future::BoxFuture; -use futures::ready; use futures::stream::FuturesUnordered; use futures::{FutureExt, StreamExt}; use libp2p_core::{Endpoint, Multiaddr}; use libp2p_identity::PeerId; +use libp2p_request_response::ProtocolSupport; use libp2p_swarm::behaviour::FromSwarm; use libp2p_swarm::{ - CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, - PollParameters, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, + ToSwarm, }; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet}; +use std::iter; use std::iter::FromIterator; -use std::task::{Context, Poll}; +use std::task::{ready, Context, Poll}; use std::time::Duration; -use void::Void; pub struct Behaviour { - events: VecDeque>>, + inner: libp2p_request_response::Behaviour, + registrations: Registrations, } @@ -75,7 +74,12 @@ impl Behaviour { /// Create a new instance of the rendezvous [`NetworkBehaviour`]. pub fn new(config: Config) -> Self { Self { - events: Default::default(), + inner: libp2p_request_response::Behaviour::with_codec( + crate::codec::Codec::default(), + iter::once((crate::PROTOCOL_IDENT, ProtocolSupport::Inbound)), + libp2p_request_response::Config::default(), + ), + registrations: Registrations::with_config(config), } } @@ -109,31 +113,36 @@ pub enum Event { } impl NetworkBehaviour for Behaviour { - type ConnectionHandler = SubstreamConnectionHandler; - type OutEvent = Event; + type ConnectionHandler = as NetworkBehaviour>::ConnectionHandler; + + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: &Multiaddr, + connection_id: ConnectionId, + peer: PeerId, + local_addr: &Multiaddr, + remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(SubstreamConnectionHandler::new_inbound_only( - Duration::from_secs(30), - )) + self.inner.handle_established_inbound_connection( + connection_id, + peer, + local_addr, + remote_addr, + ) } fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: Endpoint, + connection_id: ConnectionId, + peer: PeerId, + addr: &Multiaddr, + role_override: Endpoint, ) -> Result, ConnectionDenied> { - Ok(SubstreamConnectionHandler::new_inbound_only( - Duration::from_secs(30), - )) + self.inner + .handle_established_outbound_connection(connection_id, peer, addr, role_override) } fn on_connection_handler_event( @@ -142,135 +151,146 @@ impl NetworkBehaviour for Behaviour { connection: ConnectionId, event: THandlerOutEvent, ) { - let new_events = match event { - handler::InboundOutEvent::InboundEvent { id, message } => { - handle_inbound_event(message, peer_id, connection, id, &mut self.registrations) - } - handler::InboundOutEvent::OutboundEvent { message, .. } => void::unreachable(message), - handler::InboundOutEvent::InboundError { error, .. } => { - log::warn!("Connection with peer {} failed: {}", peer_id, error); - - vec![ToSwarm::CloseConnection { - peer_id, - connection: CloseConnection::One(connection), - }] - } - handler::InboundOutEvent::OutboundError { error, .. } => void::unreachable(error), - }; - - self.events.extend(new_events); + self.inner + .on_connection_handler_event(peer_id, connection, event); } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Poll::Ready(ExpiredRegistration(registration)) = self.registrations.poll(cx) { return Poll::Ready(ToSwarm::GenerateEvent(Event::RegistrationExpired( registration, ))); } - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } + loop { + if let Poll::Ready(to_swarm) = self.inner.poll(cx) { + match to_swarm { + ToSwarm::GenerateEvent(libp2p_request_response::Event::Message { + peer: peer_id, + message: + libp2p_request_response::Message::Request { + request, channel, .. + }, + }) => { + if let Some((event, response)) = + handle_request(peer_id, request, &mut self.registrations) + { + if let Some(resp) = response { + self.inner + .send_response(channel, resp) + .expect("Send response"); + } + + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + continue; + } + ToSwarm::GenerateEvent(libp2p_request_response::Event::InboundFailure { + peer, + request_id, + error, + }) => { + tracing::warn!( + %peer, + request=%request_id, + "Inbound request with peer failed: {error}" + ); + + continue; + } + ToSwarm::GenerateEvent(libp2p_request_response::Event::ResponseSent { + .. + }) + | ToSwarm::GenerateEvent(libp2p_request_response::Event::Message { + peer: _, + message: libp2p_request_response::Message::Response { .. }, + }) + | ToSwarm::GenerateEvent(libp2p_request_response::Event::OutboundFailure { + .. + }) => { + continue; + } + other => { + let new_to_swarm = other + .map_out(|_| unreachable!("we manually map `GenerateEvent` variants")); + + return Poll::Ready(new_to_swarm); + } + }; + } - Poll::Pending - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} + return Poll::Pending; } } + + fn on_swarm_event(&mut self, event: FromSwarm) { + self.inner.on_swarm_event(event); + } } -fn handle_inbound_event( - event: inbound::OutEvent, +fn handle_request( peer_id: PeerId, - connection: ConnectionId, - id: InboundSubstreamId, + message: Message, registrations: &mut Registrations, -) -> Vec>> { - match event { - // bad registration - inbound::OutEvent::RegistrationRequested(registration) - if registration.record.peer_id() != peer_id => - { - let error = ErrorCode::NotAuthorized; - - vec![ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection), - event: handler::InboundInEvent::NotifyInboundSubstream { - id, - message: inbound::InEvent::DeclineRegisterRequest(error), - }, - }, - ToSwarm::GenerateEvent(Event::PeerNotRegistered { +) -> Option<(Event, Option)> { + match message { + Message::Register(registration) => { + if registration.record.peer_id() != peer_id { + let error = ErrorCode::NotAuthorized; + + let event = Event::PeerNotRegistered { peer: peer_id, namespace: registration.namespace, error, - }), - ] - } - inbound::OutEvent::RegistrationRequested(registration) => { + }; + + return Some((event, Some(Message::RegisterResponse(Err(error))))); + } + let namespace = registration.namespace.clone(); match registrations.add(registration) { Ok(registration) => { - vec![ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection), - event: handler::InboundInEvent::NotifyInboundSubstream { - id, - message: inbound::InEvent::RegisterResponse { - ttl: registration.ttl, - }, - }, - }, - ToSwarm::GenerateEvent(Event::PeerRegistered { - peer: peer_id, - registration, - }), - ] + let response = Message::RegisterResponse(Ok(registration.ttl)); + + let event = Event::PeerRegistered { + peer: peer_id, + registration, + }; + + Some((event, Some(response))) } Err(TtlOutOfRange::TooLong { .. }) | Err(TtlOutOfRange::TooShort { .. }) => { let error = ErrorCode::InvalidTtl; - vec![ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection), - event: handler::InboundInEvent::NotifyInboundSubstream { - id, - message: inbound::InEvent::DeclineRegisterRequest(error), - }, - }, - ToSwarm::GenerateEvent(Event::PeerNotRegistered { - peer: peer_id, - namespace, - error, - }), - ] + let response = Message::RegisterResponse(Err(error)); + + let event = Event::PeerNotRegistered { + peer: peer_id, + namespace, + error, + }; + + Some((event, Some(response))) } } } - inbound::OutEvent::DiscoverRequested { + Message::Unregister(namespace) => { + registrations.remove(namespace.clone(), peer_id); + + let event = Event::PeerUnregistered { + peer: peer_id, + namespace, + }; + + Some((event, None)) + } + Message::Discover { namespace, cookie, limit, @@ -278,51 +298,30 @@ fn handle_inbound_event( Ok((registrations, cookie)) => { let discovered = registrations.cloned().collect::>(); - vec![ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection), - event: handler::InboundInEvent::NotifyInboundSubstream { - id, - message: inbound::InEvent::DiscoverResponse { - discovered: discovered.clone(), - cookie, - }, - }, - }, - ToSwarm::GenerateEvent(Event::DiscoverServed { - enquirer: peer_id, - registrations: discovered, - }), - ] + let response = Message::DiscoverResponse(Ok((discovered.clone(), cookie))); + + let event = Event::DiscoverServed { + enquirer: peer_id, + registrations: discovered, + }; + + Some((event, Some(response))) } Err(_) => { let error = ErrorCode::InvalidCookie; - vec![ - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::One(connection), - event: handler::InboundInEvent::NotifyInboundSubstream { - id, - message: inbound::InEvent::DeclineDiscoverRequest(error), - }, - }, - ToSwarm::GenerateEvent(Event::DiscoverNotServed { - enquirer: peer_id, - error, - }), - ] + let response = Message::DiscoverResponse(Err(error)); + + let event = Event::DiscoverNotServed { + enquirer: peer_id, + error, + }; + + Some((event, Some(response))) } }, - inbound::OutEvent::UnregisterRequested(namespace) => { - registrations.remove(namespace.clone(), peer_id); - - vec![ToSwarm::GenerateEvent(Event::PeerUnregistered { - peer: peer_id, - namespace, - })] - } + Message::RegisterResponse(_) => None, + Message::DiscoverResponse(_) => None, } } @@ -487,32 +486,38 @@ impl Registrations { self.cookies .insert(new_cookie.clone(), reggos_of_last_discover); - let reggos = &self.registrations; + let regs = &self.registrations; let registrations = ids .into_iter() - .map(move |id| reggos.get(&id).expect("bad internal datastructure")); + .map(move |id| regs.get(&id).expect("bad internal data structure")); Ok((registrations, new_cookie)) } fn poll(&mut self, cx: &mut Context<'_>) -> Poll { - let expired_registration = ready!(self.next_expiry.poll_next_unpin(cx)).expect( - "This stream should never finish because it is initialised with a pending future", - ); + loop { + let expired_registration = ready!(self.next_expiry.poll_next_unpin(cx)).expect( + "This stream should never finish because it is initialised with a pending future", + ); - // clean up our cookies - self.cookies.retain(|_, registrations| { - registrations.remove(&expired_registration); + // clean up our cookies + self.cookies.retain(|_, registrations| { + registrations.remove(&expired_registration); - // retain all cookies where there are still registrations left - !registrations.is_empty() - }); + // retain all cookies where there are still registrations left + !registrations.is_empty() + }); - self.registrations_for_peer - .remove_by_right(&expired_registration); - match self.registrations.remove(&expired_registration) { - None => self.poll(cx), - Some(registration) => Poll::Ready(ExpiredRegistration(registration)), + self.registrations_for_peer + .remove_by_right(&expired_registration); + match self.registrations.remove(&expired_registration) { + None => { + continue; + } + Some(registration) => { + return Poll::Ready(ExpiredRegistration(registration)); + } + } } } } diff --git a/protocols/rendezvous/src/substream_handler.rs b/protocols/rendezvous/src/substream_handler.rs deleted file mode 100644 index d2a1651cd52..00000000000 --- a/protocols/rendezvous/src/substream_handler.rs +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2021 COMIT Network. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! A generic [`ConnectionHandler`] that delegates the handling of substreams to [`SubstreamHandler`]s. -//! -//! This module is an attempt to simplify the implementation of protocols by freeing implementations from dealing with aspects such as concurrent substreams. -//! Particularly for outbound substreams, it greatly simplifies the definition of protocols through the [`FutureSubstream`] helper. -//! -//! At the moment, this module is an implementation detail of the rendezvous protocol but the intent is for it to be provided as a generic module that is accessible to other protocols as well. - -use futures::future::{self, BoxFuture, Fuse, FusedFuture}; -use futures::FutureExt; -use instant::Instant; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; -use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, KeepAlive, NegotiatedSubstream, StreamProtocol, - SubstreamProtocol, -}; -use std::collections::{HashMap, VecDeque}; -use std::fmt; -use std::future::Future; -use std::hash::Hash; -use std::task::{Context, Poll}; -use std::time::Duration; -use void::Void; - -/// Handles a substream throughout its lifetime. -pub trait SubstreamHandler: Sized { - type InEvent; - type OutEvent; - type Error; - type OpenInfo; - - fn upgrade(open_info: Self::OpenInfo) - -> SubstreamProtocol; - fn new(substream: NegotiatedSubstream, info: Self::OpenInfo) -> Self; - fn on_event(self, event: Self::InEvent) -> Self; - fn advance(self, cx: &mut Context<'_>) -> Result, Self::Error>; -} - -/// The result of advancing a [`SubstreamHandler`]. -pub enum Next { - /// Return the given event and set the handler into `next_state`. - EmitEvent { event: TEvent, next_state: TState }, - /// The handler currently cannot do any more work, set its state back into `next_state`. - Pending { next_state: TState }, - /// The handler performed some work and wants to continue in the given state. - /// - /// This variant is useful because it frees the handler from implementing a loop internally. - Continue { next_state: TState }, - /// The handler finished. - Done, -} - -impl Next { - pub fn map_state( - self, - map: impl FnOnce(TState) -> TNextState, - ) -> Next { - match self { - Next::EmitEvent { event, next_state } => Next::EmitEvent { - event, - next_state: map(next_state), - }, - Next::Pending { next_state } => Next::Pending { - next_state: map(next_state), - }, - Next::Continue { next_state } => Next::Pending { - next_state: map(next_state), - }, - Next::Done => Next::Done, - } - } -} - -#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] -pub struct InboundSubstreamId(u64); - -impl InboundSubstreamId { - fn fetch_and_increment(&mut self) -> Self { - let next_id = *self; - self.0 += 1; - - next_id - } -} - -impl fmt::Display for InboundSubstreamId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] -pub struct OutboundSubstreamId(u64); - -impl OutboundSubstreamId { - fn fetch_and_increment(&mut self) -> Self { - let next_id = *self; - self.0 += 1; - - next_id - } -} - -impl fmt::Display for OutboundSubstreamId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -pub struct PassthroughProtocol { - ident: Option, -} - -impl PassthroughProtocol { - pub fn new(ident: StreamProtocol) -> Self { - Self { ident: Some(ident) } - } -} - -impl UpgradeInfo for PassthroughProtocol { - type Info = StreamProtocol; - type InfoIter = std::option::IntoIter; - - fn protocol_info(&self) -> Self::InfoIter { - self.ident.clone().into_iter() - } -} - -impl InboundUpgrade for PassthroughProtocol { - type Output = C; - type Error = Void; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { - match self.ident { - Some(_) => future::ready(Ok(socket)).boxed(), - None => future::pending().boxed(), - } - } -} - -impl OutboundUpgrade for PassthroughProtocol { - type Output = C; - type Error = Void; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { - match self.ident { - Some(_) => future::ready(Ok(socket)).boxed(), - None => future::pending().boxed(), - } - } -} - -/// An implementation of [`ConnectionHandler`] that delegates to individual [`SubstreamHandler`]s. -pub struct SubstreamConnectionHandler { - inbound_substreams: HashMap, - outbound_substreams: HashMap, - next_inbound_substream_id: InboundSubstreamId, - next_outbound_substream_id: OutboundSubstreamId, - - new_substreams: VecDeque, - - initial_keep_alive_deadline: Instant, -} - -impl - SubstreamConnectionHandler -{ - pub fn new(initial_keep_alive: Duration) -> Self { - Self { - inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - next_inbound_substream_id: InboundSubstreamId(0), - next_outbound_substream_id: OutboundSubstreamId(0), - new_substreams: Default::default(), - initial_keep_alive_deadline: Instant::now() + initial_keep_alive, - } - } -} - -impl - SubstreamConnectionHandler -{ - pub fn new_outbound_only(initial_keep_alive: Duration) -> Self { - Self { - inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - next_inbound_substream_id: InboundSubstreamId(0), - next_outbound_substream_id: OutboundSubstreamId(0), - new_substreams: Default::default(), - initial_keep_alive_deadline: Instant::now() + initial_keep_alive, - } - } -} - -impl - SubstreamConnectionHandler -{ - pub fn new_inbound_only(initial_keep_alive: Duration) -> Self { - Self { - inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - next_inbound_substream_id: InboundSubstreamId(0), - next_outbound_substream_id: OutboundSubstreamId(0), - new_substreams: Default::default(), - initial_keep_alive_deadline: Instant::now() + initial_keep_alive, - } - } -} - -/// Poll all substreams within the given HashMap. -/// -/// This is defined as a separate function because we call it with two different fields stored within [`SubstreamConnectionHandler`]. -fn poll_substreams( - substreams: &mut HashMap, - cx: &mut Context<'_>, -) -> Poll> -where - TSubstream: SubstreamHandler, - TId: Copy + Eq + Hash + fmt::Display, -{ - let substream_ids = substreams.keys().copied().collect::>(); - - 'loop_substreams: for id in substream_ids { - let mut handler = substreams - .remove(&id) - .expect("we just got the key out of the map"); - - let (next_state, poll) = 'loop_handler: loop { - match handler.advance(cx) { - Ok(Next::EmitEvent { next_state, event }) => { - break (next_state, Poll::Ready(Ok((id, event)))) - } - Ok(Next::Pending { next_state }) => break (next_state, Poll::Pending), - Ok(Next::Continue { next_state }) => { - handler = next_state; - continue 'loop_handler; - } - Ok(Next::Done) => { - log::debug!("Substream handler {} finished", id); - continue 'loop_substreams; - } - Err(e) => return Poll::Ready(Err((id, e))), - } - }; - - substreams.insert(id, next_state); - - return poll; - } - - Poll::Pending -} - -/// Event sent from the [`libp2p_swarm::NetworkBehaviour`] to the [`SubstreamConnectionHandler`]. -#[allow(clippy::enum_variant_names)] -#[derive(Debug)] -pub enum InEvent { - /// Open a new substream using the provided `open_info`. - /// - /// For "client-server" protocols, this is typically the initial message to be sent to the other party. - NewSubstream { open_info: I }, - NotifyInboundSubstream { - id: InboundSubstreamId, - message: TInboundEvent, - }, - NotifyOutboundSubstream { - id: OutboundSubstreamId, - message: TOutboundEvent, - }, -} - -/// Event produced by the [`SubstreamConnectionHandler`] for the corresponding [`libp2p_swarm::NetworkBehaviour`]. -#[derive(Debug)] -pub enum OutEvent { - /// An inbound substream produced an event. - InboundEvent { - id: InboundSubstreamId, - message: TInbound, - }, - /// An outbound substream produced an event. - OutboundEvent { - id: OutboundSubstreamId, - message: TOutbound, - }, - /// An inbound substream errored irrecoverably. - InboundError { - id: InboundSubstreamId, - error: TInboundError, - }, - /// An outbound substream errored irrecoverably. - OutboundError { - id: OutboundSubstreamId, - error: TOutboundError, - }, -} - -impl< - TInboundInEvent, - TInboundOutEvent, - TOutboundInEvent, - TOutboundOutEvent, - TOutboundOpenInfo, - TInboundError, - TOutboundError, - TInboundSubstreamHandler, - TOutboundSubstreamHandler, - > ConnectionHandler - for SubstreamConnectionHandler< - TInboundSubstreamHandler, - TOutboundSubstreamHandler, - TOutboundOpenInfo, - > -where - TInboundSubstreamHandler: SubstreamHandler< - InEvent = TInboundInEvent, - OutEvent = TInboundOutEvent, - Error = TInboundError, - OpenInfo = (), - >, - TOutboundSubstreamHandler: SubstreamHandler< - InEvent = TOutboundInEvent, - OutEvent = TOutboundOutEvent, - Error = TOutboundError, - OpenInfo = TOutboundOpenInfo, - >, - TInboundInEvent: fmt::Debug + Send + 'static, - TInboundOutEvent: fmt::Debug + Send + 'static, - TOutboundInEvent: fmt::Debug + Send + 'static, - TOutboundOutEvent: fmt::Debug + Send + 'static, - TOutboundOpenInfo: fmt::Debug + Send + 'static, - TInboundError: fmt::Debug + Send + 'static, - TOutboundError: fmt::Debug + Send + 'static, - TInboundSubstreamHandler: Send + 'static, - TOutboundSubstreamHandler: Send + 'static, -{ - type InEvent = InEvent; - type OutEvent = OutEvent; - type Error = Void; - type InboundProtocol = PassthroughProtocol; - type OutboundProtocol = PassthroughProtocol; - type InboundOpenInfo = (); - type OutboundOpenInfo = TOutboundOpenInfo; - - fn listen_protocol(&self) -> SubstreamProtocol { - TInboundSubstreamHandler::upgrade(()) - } - - fn on_connection_event( - &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, - ) { - match event { - ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol, .. - }) => { - self.inbound_substreams.insert( - self.next_inbound_substream_id.fetch_and_increment(), - TInboundSubstreamHandler::new(protocol, ()), - ); - } - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol, - info, - }) => { - self.outbound_substreams.insert( - self.next_outbound_substream_id.fetch_and_increment(), - TOutboundSubstreamHandler::new(protocol, info), - ); - } - // TODO: Handle upgrade errors properly - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::DialUpgradeError(_) => {} - } - } - - fn on_behaviour_event(&mut self, event: Self::InEvent) { - match event { - InEvent::NewSubstream { open_info } => self.new_substreams.push_back(open_info), - InEvent::NotifyInboundSubstream { id, message } => { - match self.inbound_substreams.remove(&id) { - Some(handler) => { - let new_handler = handler.on_event(message); - - self.inbound_substreams.insert(id, new_handler); - } - None => { - log::debug!("Substream with ID {} not found", id); - } - } - } - InEvent::NotifyOutboundSubstream { id, message } => { - match self.outbound_substreams.remove(&id) { - Some(handler) => { - let new_handler = handler.on_event(message); - - self.outbound_substreams.insert(id, new_handler); - } - None => { - log::debug!("Substream with ID {} not found", id); - } - } - } - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Rudimentary keep-alive handling, to be extended as needed as this abstraction is used more by other protocols. - - if Instant::now() < self.initial_keep_alive_deadline { - return KeepAlive::Yes; - } - - if self.inbound_substreams.is_empty() - && self.outbound_substreams.is_empty() - && self.new_substreams.is_empty() - { - return KeepAlive::No; - } - - KeepAlive::Yes - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - if let Some(open_info) = self.new_substreams.pop_front() { - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: TOutboundSubstreamHandler::upgrade(open_info), - }); - } - - match poll_substreams(&mut self.inbound_substreams, cx) { - Poll::Ready(Ok((id, message))) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(OutEvent::InboundEvent { - id, - message, - })) - } - Poll::Ready(Err((id, error))) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(OutEvent::InboundError { - id, - error, - })) - } - Poll::Pending => {} - } - - match poll_substreams(&mut self.outbound_substreams, cx) { - Poll::Ready(Ok((id, message))) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(OutEvent::OutboundEvent { - id, - message, - })) - } - Poll::Ready(Err((id, error))) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(OutEvent::OutboundError { - id, - error, - })) - } - Poll::Pending => {} - } - - Poll::Pending - } -} - -/// A helper struct for substream handlers that can be implemented as async functions. -/// -/// This only works for substreams without an `InEvent` because - once constructed - the state of an inner future is opaque. -pub(crate) struct FutureSubstream { - future: Fuse>>, -} - -impl FutureSubstream { - pub(crate) fn new( - future: impl Future> + Send + 'static, - ) -> Self { - Self { - future: future.boxed().fuse(), - } - } - - pub(crate) fn advance(mut self, cx: &mut Context<'_>) -> Result, TError> { - if self.future.is_terminated() { - return Ok(Next::Done); - } - - match self.future.poll_unpin(cx) { - Poll::Ready(Ok(event)) => Ok(Next::EmitEvent { - event, - next_state: self, - }), - Poll::Ready(Err(error)) => Err(error), - Poll::Pending => Ok(Next::Pending { next_state: self }), - } - } -} - -impl SubstreamHandler for void::Void { - type InEvent = void::Void; - type OutEvent = void::Void; - type Error = void::Void; - type OpenInfo = (); - - fn new(_: NegotiatedSubstream, _: Self::OpenInfo) -> Self { - unreachable!("we should never yield a substream") - } - - fn on_event(self, event: Self::InEvent) -> Self { - void::unreachable(event) - } - - fn advance(self, _: &mut Context<'_>) -> Result, Self::Error> { - void::unreachable(self) - } - - fn upgrade( - open_info: Self::OpenInfo, - ) -> SubstreamProtocol { - SubstreamProtocol::new(PassthroughProtocol { ident: None }, open_info) - } -} diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 494f56551d8..c2de88fd615 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -20,23 +20,30 @@ use futures::stream::FuturesUnordered; use futures::StreamExt; +use libp2p_core::multiaddr::Protocol; +use libp2p_core::Multiaddr; use libp2p_identity as identity; use libp2p_rendezvous as rendezvous; +use libp2p_rendezvous::client::RegisterError; use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::convert::TryInto; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn given_successful_registration_then_successful_discovery() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; alice .behaviour_mut() - .register(namespace.clone(), *robert.local_peer_id(), None); + .register(namespace.clone(), *robert.local_peer_id(), None) + .unwrap(); match libp2p_swarm_test::drive(&mut alice, &mut robert).await { ( @@ -79,9 +86,28 @@ async fn given_successful_registration_then_successful_discovery() { } } +#[tokio::test] +async fn should_return_error_when_no_external_addresses() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let namespace = rendezvous::Namespace::from_static("some-namespace"); + let server = new_server(rendezvous::server::Config::default()).await; + let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); + + let actual = client + .behaviour_mut() + .register(namespace.clone(), *server.local_peer_id(), None) + .unwrap_err(); + + assert!(matches!(actual, RegisterError::NoExternalAddresses)) +} + #[tokio::test] async fn given_successful_registration_then_refresh_ttl() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -91,7 +117,8 @@ async fn given_successful_registration_then_refresh_ttl() { alice .behaviour_mut() - .register(namespace.clone(), roberts_peer_id, None); + .register(namespace.clone(), roberts_peer_id, None) + .unwrap(); match libp2p_swarm_test::drive(&mut alice, &mut robert).await { ( @@ -114,7 +141,8 @@ async fn given_successful_registration_then_refresh_ttl() { alice .behaviour_mut() - .register(namespace.clone(), roberts_peer_id, Some(refresh_ttl)); + .register(namespace.clone(), roberts_peer_id, Some(refresh_ttl)) + .unwrap(); match libp2p_swarm_test::drive(&mut alice, &mut robert).await { ( @@ -143,25 +171,80 @@ async fn given_successful_registration_then_refresh_ttl() { } } +#[tokio::test] +async fn given_successful_registration_then_refresh_external_addrs() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let namespace = rendezvous::Namespace::from_static("some-namespace"); + let ([mut alice], mut robert) = + new_server_with_connected_clients(rendezvous::server::Config::default()).await; + + let roberts_peer_id = *robert.local_peer_id(); + + alice + .behaviour_mut() + .register(namespace.clone(), roberts_peer_id, None) + .unwrap(); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { .. }], + ) => {} + events => panic!("Unexpected events: {events:?}"), + } + + let external_addr = Multiaddr::empty().with(Protocol::Memory(0)); + + alice.add_external_address(external_addr.clone()); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { registration, .. }], + ) => { + let record = registration.record; + assert!(record.addresses().contains(&external_addr)); + } + events => panic!("Unexpected events: {events:?}"), + } + + alice.remove_external_address(&external_addr); + + match libp2p_swarm_test::drive(&mut alice, &mut robert).await { + ( + [rendezvous::client::Event::Registered { .. }], + [rendezvous::server::Event::PeerRegistered { registration, .. }], + ) => { + let record = registration.record; + assert!(!record.addresses().contains(&external_addr)); + } + events => panic!("Unexpected events: {events:?}"), + } +} + #[tokio::test] async fn given_invalid_ttl_then_unsuccessful_registration() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; - alice.behaviour_mut().register( - namespace.clone(), - *robert.local_peer_id(), - Some(100_000_000), - ); + alice + .behaviour_mut() + .register( + namespace.clone(), + *robert.local_peer_id(), + Some(100_000_000), + ) + .unwrap(); match libp2p_swarm_test::drive(&mut alice, &mut robert).await { ( - [rendezvous::client::Event::RegisterFailed(rendezvous::client::RegisterError::Remote { - error, - .. - })], + [rendezvous::client::Event::RegisterFailed { error, .. }], [rendezvous::server::Event::PeerNotRegistered { .. }], ) => { assert_eq!(error, rendezvous::ErrorCode::InvalidTtl); @@ -172,7 +255,9 @@ async fn given_invalid_ttl_then_unsuccessful_registration() { #[tokio::test] async fn discover_allows_for_dial_by_peer_id() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -182,7 +267,8 @@ async fn discover_allows_for_dial_by_peer_id() { alice .behaviour_mut() - .register(namespace.clone(), roberts_peer_id, None); + .register(namespace.clone(), roberts_peer_id, None) + .unwrap(); match alice.next_behaviour_event().await { rendezvous::client::Event::Registered { .. } => {} event => panic!("Unexpected event: {event:?}"), @@ -226,21 +312,23 @@ async fn discover_allows_for_dial_by_peer_id() { #[tokio::test] async fn eve_cannot_register() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let mut robert = new_server(rendezvous::server::Config::default()).await; let mut eve = new_impersonating_client().await; eve.connect(&mut robert).await; eve.behaviour_mut() - .register(namespace.clone(), *robert.local_peer_id(), None); + .register(namespace.clone(), *robert.local_peer_id(), None) + .unwrap(); match libp2p_swarm_test::drive(&mut eve, &mut robert).await { ( - [rendezvous::client::Event::RegisterFailed(rendezvous::client::RegisterError::Remote { - error: err_code, - .. - })], + [rendezvous::client::Event::RegisterFailed { + error: err_code, .. + }], [rendezvous::server::Event::PeerNotRegistered { .. }], ) => { assert_eq!(err_code, rendezvous::ErrorCode::NotAuthorized); @@ -252,7 +340,9 @@ async fn eve_cannot_register() { // test if charlie can operate as client and server simultaneously #[tokio::test] async fn can_combine_client_and_server() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -263,7 +353,8 @@ async fn can_combine_client_and_server() { charlie .behaviour_mut() .client - .register(namespace.clone(), *robert.local_peer_id(), None); + .register(namespace.clone(), *robert.local_peer_id(), None) + .unwrap(); match libp2p_swarm_test::drive(&mut charlie, &mut robert).await { ( [CombinedEvent::Client(rendezvous::client::Event::Registered { .. })], @@ -274,7 +365,8 @@ async fn can_combine_client_and_server() { alice .behaviour_mut() - .register(namespace, *charlie.local_peer_id(), None); + .register(namespace, *charlie.local_peer_id(), None) + .unwrap(); match libp2p_swarm_test::drive(&mut charlie, &mut alice).await { ( [CombinedEvent::Server(rendezvous::server::Event::PeerRegistered { .. })], @@ -286,7 +378,9 @@ async fn can_combine_client_and_server() { #[tokio::test] async fn registration_on_clients_expire() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default().with_min_ttl(1)) @@ -295,11 +389,12 @@ async fn registration_on_clients_expire() { let roberts_peer_id = *robert.local_peer_id(); tokio::spawn(robert.loop_on_next()); - let registration_ttl = 3; + let registration_ttl = 1; alice .behaviour_mut() - .register(namespace.clone(), roberts_peer_id, Some(registration_ttl)); + .register(namespace.clone(), roberts_peer_id, Some(registration_ttl)) + .unwrap(); match alice.next_behaviour_event().await { rendezvous::client::Event::Registered { .. } => {} event => panic!("Unexpected event: {event:?}"), @@ -313,7 +408,7 @@ async fn registration_on_clients_expire() { event => panic!("Unexpected event: {event:?}"), } - tokio::time::sleep(Duration::from_secs(registration_ttl + 5)).await; + tokio::time::sleep(Duration::from_secs(registration_ttl + 1)).await; let event = bob.select_next_some().await; let error = bob.dial(*alice.local_peer_id()).unwrap_err(); @@ -353,7 +448,7 @@ async fn new_server_with_connected_clients( async fn new_client() -> Swarm { let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); - client.listen().await; // we need to listen otherwise we don't have addresses to register + client.listen().with_memory_addr_external().await; // we need to listen otherwise we don't have addresses to register client } @@ -361,7 +456,7 @@ async fn new_client() -> Swarm { async fn new_server(config: rendezvous::server::Config) -> Swarm { let mut server = Swarm::new_ephemeral(|_| rendezvous::server::Behaviour::new(config)); - server.listen().await; + server.listen().with_memory_addr_external().await; server } @@ -371,7 +466,7 @@ async fn new_combined_node() -> Swarm { client: rendezvous::client::Behaviour::new(identity), server: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), }); - node.listen().await; + node.listen().with_memory_addr_external().await; node } @@ -382,7 +477,7 @@ async fn new_impersonating_client() -> Swarm { // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); - eve.listen().await; + eve.listen().with_memory_addr_external().await; eve } diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index bb57385c9c6..d53ff479ee2 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,11 +1,63 @@ -## 0.25.0 - unreleased +## 0.26.1 + +- Derive `PartialOrd` and `Ord` for `{Out,In}boundRequestId`. + See [PR 4956](https://github.com/libp2p/rust-libp2p/pull/4956). + +## 0.26.0 + +- Remove `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4679](https://github.com/libp2p/rust-libp2p/pull/4679). +- Allow at most 100 concurrent inbound + outbound streams per instance of `request_response::Behaviour`. + This limit is configurable via `Config::with_max_concurrent_streams`. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Report IO failures on inbound and outbound streams. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Introduce dedicated types for `InboundRequestId` and `OutboundRequestId`. + See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). +- Keep peer addresses in `HashSet` instead of `SmallVec` to prevent adding duplicate addresses. + See [PR 4700](https://github.com/libp2p/rust-libp2p/pull/4700). + +## 0.25.2 + +- Deprecate `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. + See [PR 4029](https://github.com/libp2p/rust-libp2p/pull/4029). + + + +## 0.25.1 + +- Replace unmaintained `serde_cbor` dependency with `cbor4ii`. + See [PR 4187]. + +[PR 4187]: https://github.com/libp2p/rust-libp2p/pull/4187 + +## 0.25.0 + +- Add `request_response::json::Behaviour` and `request_response::cbor::Behaviour` building on top of the `serde` traits. + To conveniently construct these, we remove the `Codec` parameter from `Behaviour::new` and add `Behaviour::with_codec`. + See [PR 3952]. - Raise MSRV to 1.65. See [PR 3715]. - Remove deprecated `RequestResponse` prefixed items. See [PR 3702]. +- Remove `InboundFailure::UnsupportedProtocols` and `InboundFailure::InboundTimeout`. + These variants are no longer constructed. + See [PR 3605]. + +- Don't close connections if individual streams fail. + Log the error instead. + See [PR 3913]. + +[PR 3952]: https://github.com/libp2p/rust-libp2p/pull/3952 +[PR 3605]: https://github.com/libp2p/rust-libp2p/pull/3605 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3702]: https://github.com/libp2p/rust-libp2p/pull/3702 +[PR 3913]: https://github.com/libp2p/rust-libp2p/pull/3913 ## 0.24.1 diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 3bde34e2cc1..e85ff691969 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.25.0" +version = "0.26.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,22 +12,36 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -futures = "0.3.28" -instant = "0.1.11" +cbor4ii = { version = "0.3.2", features = ["serde1", "use_std"], optional = true } +futures = "0.3.30" +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" -smallvec = "1.6.1" +serde = { version = "1.0", optional = true} +serde_json = { version = "1.0.108", optional = true } +smallvec = "1.11.2" +tracing = "0.1.37" +void = "1.0.2" +futures-timer = "3.0.2" +futures-bounded = { workspace = true } + +[features] +json = ["dep:serde", "dep:serde_json", "libp2p-swarm/macros"] +cbor = ["dep:serde", "dep:cbor4ii", "libp2p-swarm/macros"] [dev-dependencies] +anyhow = "1.0.76" async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" -libp2p-swarm-test = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } +futures_ringbuf = "0.4.0" +serde = { version = "1.0", features = ["derive"]} +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -35,3 +49,6 @@ libp2p-swarm-test = { workspace = true } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/protocols/request-response/src/cbor.rs b/protocols/request-response/src/cbor.rs new file mode 100644 index 00000000000..f371f6149dc --- /dev/null +++ b/protocols/request-response/src/cbor.rs @@ -0,0 +1,228 @@ +// Copyright 2023 Protocol Labs +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +/// A request-response behaviour using [`cbor4ii::serde`] for serializing and +/// deserializing the messages. +/// +/// # Example +/// +/// ``` +/// # use libp2p_request_response::{cbor, ProtocolSupport, self as request_response}; +/// # use libp2p_swarm::StreamProtocol; +/// #[derive(Debug, serde::Serialize, serde::Deserialize)] +/// struct GreetRequest { +/// name: String, +/// } +/// +/// #[derive(Debug, serde::Serialize, serde::Deserialize)] +/// struct GreetResponse { +/// message: String, +/// } +/// +/// let behaviour = cbor::Behaviour::::new( +/// [(StreamProtocol::new("/my-cbor-protocol"), ProtocolSupport::Full)], +/// request_response::Config::default() +/// ); +/// ``` +pub type Behaviour = crate::Behaviour>; + +mod codec { + use async_trait::async_trait; + use cbor4ii::core::error::DecodeError; + use futures::prelude::*; + use futures::{AsyncRead, AsyncWrite}; + use libp2p_swarm::StreamProtocol; + use serde::{de::DeserializeOwned, Serialize}; + use std::{collections::TryReserveError, convert::Infallible, io, marker::PhantomData}; + + /// Max request size in bytes + const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; + /// Max response size in bytes + const RESPONSE_SIZE_MAXIMUM: u64 = 10 * 1024 * 1024; + + pub struct Codec { + phantom: PhantomData<(Req, Resp)>, + } + + impl Default for Codec { + fn default() -> Self { + Codec { + phantom: PhantomData, + } + } + } + + impl Clone for Codec { + fn clone(&self) -> Self { + Self::default() + } + } + + #[async_trait] + impl crate::Codec for Codec + where + Req: Send + Serialize + DeserializeOwned, + Resp: Send + Serialize + DeserializeOwned, + { + type Protocol = StreamProtocol; + type Request = Req; + type Response = Resp; + + async fn read_request(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(REQUEST_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + + cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) + } + + async fn read_response(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(RESPONSE_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + + cbor4ii::serde::from_slice(vec.as_slice()).map_err(decode_into_io_error) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data: Vec = + cbor4ii::serde::to_vec(Vec::new(), &req).map_err(encode_into_io_error)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + resp: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data: Vec = + cbor4ii::serde::to_vec(Vec::new(), &resp).map_err(encode_into_io_error)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } + } + + fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Error { + match err { + cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { + io::Error::new(io::ErrorKind::Other, e) + } + cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { + io::Error::new(io::ErrorKind::Unsupported, e) + } + cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { + io::Error::new(io::ErrorKind::UnexpectedEof, e) + } + cbor4ii::serde::DecodeError::Core(e) => io::Error::new(io::ErrorKind::InvalidData, e), + cbor4ii::serde::DecodeError::Custom(e) => { + io::Error::new(io::ErrorKind::Other, e.to_string()) + } + } + } + + fn encode_into_io_error(err: cbor4ii::serde::EncodeError) -> io::Error { + io::Error::new(io::ErrorKind::Other, err) + } +} + +#[cfg(test)] +mod tests { + use crate::cbor::codec::Codec; + use crate::Codec as _; + use futures::AsyncWriteExt; + use futures_ringbuf::Endpoint; + use libp2p_swarm::StreamProtocol; + use serde::{Deserialize, Serialize}; + + #[async_std::test] + async fn test_codec() { + let expected_request = TestRequest { + payload: "test_payload".to_string(), + }; + let expected_response = TestResponse { + payload: "test_payload".to_string(), + }; + let protocol = StreamProtocol::new("/test_cbor/1"); + let mut codec = Codec::default(); + + let (mut a, mut b) = Endpoint::pair(124, 124); + codec + .write_request(&protocol, &mut a, expected_request.clone()) + .await + .expect("Should write request"); + a.close().await.unwrap(); + + let actual_request = codec + .read_request(&protocol, &mut b) + .await + .expect("Should read request"); + b.close().await.unwrap(); + + assert_eq!(actual_request, expected_request); + + let (mut a, mut b) = Endpoint::pair(124, 124); + codec + .write_response(&protocol, &mut a, expected_response.clone()) + .await + .expect("Should write response"); + a.close().await.unwrap(); + + let actual_response = codec + .read_response(&protocol, &mut b) + .await + .expect("Should read response"); + b.close().await.unwrap(); + + assert_eq!(actual_response, expected_response); + } + + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + struct TestRequest { + payload: String, + } + + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + struct TestResponse { + payload: String, + } +} diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index 65aa1f842c5..2d45e0d7dc3 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -23,18 +23,17 @@ pub(crate) mod protocol; pub use protocol::ProtocolSupport; use crate::codec::Codec; -use crate::handler::protocol::{RequestProtocol, ResponseProtocol}; -use crate::{RequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; +use crate::handler::protocol::Protocol; +use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use instant::Instant; -use libp2p_core::upgrade::{NegotiationError, UpgradeError}; +use futures::channel::mpsc; +use futures::{channel::oneshot, prelude::*}; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }; use libp2p_swarm::{ - handler::{ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive}, + handler::{ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError}, SubstreamProtocol, }; use smallvec::SmallVec; @@ -58,34 +57,34 @@ where inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, /// The request/response message codec. codec: TCodec, - /// The keep-alive timeout of idle connections. A connection is considered - /// idle if there are no outbound substreams. - keep_alive_timeout: Duration, - /// The timeout for inbound and outbound substreams (i.e. request - /// and response processing). - substream_timeout: Duration, - /// The current connection keep-alive. - keep_alive: KeepAlive, - /// A pending fatal error that results in the connection being closed. - pending_error: Option>, /// Queue of events to emit in `poll()`. pending_events: VecDeque>, /// Outbound upgrades waiting to be emitted as an `OutboundSubstreamRequest`. - outbound: VecDeque>, - /// Inbound upgrades waiting for the incoming request. - inbound: FuturesUnordered< - BoxFuture< - 'static, - Result< - ( - (RequestId, TCodec::Request), - oneshot::Sender, - ), - oneshot::Canceled, - >, - >, - >, + pending_outbound: VecDeque>, + + requested_outbound: VecDeque>, + /// A channel for receiving inbound requests. + inbound_receiver: mpsc::Receiver<( + InboundRequestId, + TCodec::Request, + oneshot::Sender, + )>, + /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound request. + inbound_sender: mpsc::Sender<( + InboundRequestId, + TCodec::Request, + oneshot::Sender, + )>, + inbound_request_id: Arc, + + worker_streams: futures_bounded::FuturesMap, io::Error>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum RequestId { + Inbound(InboundRequestId), + Outbound(OutboundRequestId), } impl Handler @@ -95,96 +94,164 @@ where pub(super) fn new( inbound_protocols: SmallVec<[TCodec::Protocol; 2]>, codec: TCodec, - keep_alive_timeout: Duration, substream_timeout: Duration, inbound_request_id: Arc, + max_concurrent_streams: usize, ) -> Self { + let (inbound_sender, inbound_receiver) = mpsc::channel(0); Self { inbound_protocols, codec, - keep_alive: KeepAlive::Yes, - keep_alive_timeout, - substream_timeout, - outbound: VecDeque::new(), - inbound: FuturesUnordered::new(), + pending_outbound: VecDeque::new(), + requested_outbound: Default::default(), + inbound_receiver, + inbound_sender, pending_events: VecDeque::new(), - pending_error: None, inbound_request_id, + worker_streams: futures_bounded::FuturesMap::new( + substream_timeout, + max_concurrent_streams, + ), } } + /// Returns the next inbound request ID. + fn next_inbound_request_id(&mut self) -> InboundRequestId { + InboundRequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)) + } + fn on_fully_negotiated_inbound( &mut self, FullyNegotiatedInbound { - protocol: sent, - info: request_id, + protocol: (mut stream, protocol), + info: (), }: FullyNegotiatedInbound< ::InboundProtocol, ::InboundOpenInfo, >, ) { - if sent { - self.pending_events - .push_back(Event::ResponseSent(request_id)) - } else { - self.pending_events - .push_back(Event::ResponseOmission(request_id)) + let mut codec = self.codec.clone(); + let request_id = self.next_inbound_request_id(); + let mut sender = self.inbound_sender.clone(); + + let recv = async move { + // A channel for notifying the inbound upgrade when the + // response is sent. + let (rs_send, rs_recv) = oneshot::channel(); + + let read = codec.read_request(&protocol, &mut stream); + let request = read.await?; + sender + .send((request_id, request, rs_send)) + .await + .expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + if let Ok(response) = rs_recv.await { + let write = codec.write_response(&protocol, &mut stream, response); + write.await?; + + stream.close().await?; + Ok(Event::ResponseSent(request_id)) + } else { + stream.close().await?; + Ok(Event::ResponseOmission(request_id)) + } + }; + + if self + .worker_streams + .try_push(RequestId::Inbound(request_id), recv.boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") + } + } + + fn on_fully_negotiated_outbound( + &mut self, + FullyNegotiatedOutbound { + protocol: (mut stream, protocol), + info: (), + }: FullyNegotiatedOutbound< + ::OutboundProtocol, + ::OutboundOpenInfo, + >, + ) { + let message = self + .requested_outbound + .pop_front() + .expect("negotiated a stream without a pending message"); + + let mut codec = self.codec.clone(); + let request_id = message.request_id; + + let send = async move { + let write = codec.write_request(&protocol, &mut stream, message.request); + write.await?; + stream.close().await?; + let read = codec.read_response(&protocol, &mut stream); + let response = read.await?; + + Ok(Event::Response { + request_id, + response, + }) + }; + + if self + .worker_streams + .try_push(RequestId::Outbound(request_id), send.boxed()) + .is_err() + { + tracing::warn!("Dropping outbound stream because we are at capacity") } } fn on_dial_upgrade_error( &mut self, - DialUpgradeError { info, error }: DialUpgradeError< + DialUpgradeError { error, info: () }: DialUpgradeError< ::OutboundOpenInfo, ::OutboundProtocol, >, ) { + let message = self + .requested_outbound + .pop_front() + .expect("negotiated a stream without a pending message"); + match error { - ConnectionHandlerUpgrErr::Timeout => { - self.pending_events.push_back(Event::OutboundTimeout(info)); + StreamUpgradeError::Timeout => { + self.pending_events + .push_back(Event::OutboundTimeout(message.request_id)); } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { + StreamUpgradeError::NegotiationFailed => { // The remote merely doesn't support the protocol(s) we requested. // This is no reason to close the connection, which may // successfully communicate with other protocols already. // An event is reported to permit user code to react to the fact that // the remote peer does not support the requested protocol(s). self.pending_events - .push_back(Event::OutboundUnsupportedProtocols(info)); + .push_back(Event::OutboundUnsupportedProtocols(message.request_id)); } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error); + StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::Io(e) => { + tracing::debug!( + "outbound stream for request {} failed: {e}, retrying", + message.request_id + ); + self.requested_outbound.push_back(message); } } } fn on_listen_upgrade_error( &mut self, - ListenUpgradeError { info, error }: ListenUpgradeError< + ListenUpgradeError { error, .. }: ListenUpgradeError< ::InboundOpenInfo, ::InboundProtocol, >, ) { - match error { - ConnectionHandlerUpgrErr::Timeout => { - self.pending_events.push_back(Event::InboundTimeout(info)) - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - // The local peer merely doesn't support the protocol(s) requested. - // This is no reason to close the connection, which may - // successfully communicate with other protocols already. - // An event is reported to permit user code to react to the fact that - // the local peer does not support the requested protocol(s). - self.pending_events - .push_back(Event::InboundUnsupportedProtocols(info)); - } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error); - } - } + void::unreachable(error) } } @@ -195,30 +262,36 @@ where { /// A request has been received. Request { - request_id: RequestId, + request_id: InboundRequestId, request: TCodec::Request, sender: oneshot::Sender, }, /// A response has been received. Response { - request_id: RequestId, + request_id: OutboundRequestId, response: TCodec::Response, }, /// A response to an inbound request has been sent. - ResponseSent(RequestId), + ResponseSent(InboundRequestId), /// A response to an inbound request was omitted as a result /// of dropping the response `sender` of an inbound `Request`. - ResponseOmission(RequestId), + ResponseOmission(InboundRequestId), /// An outbound request timed out while sending the request /// or waiting for the response. - OutboundTimeout(RequestId), + OutboundTimeout(OutboundRequestId), /// An outbound request failed to negotiate a mutually supported protocol. - OutboundUnsupportedProtocols(RequestId), + OutboundUnsupportedProtocols(OutboundRequestId), + OutboundStreamFailed { + request_id: OutboundRequestId, + error: io::Error, + }, /// An inbound request timed out while waiting for the request /// or sending the response. - InboundTimeout(RequestId), - /// An inbound request failed to negotiate a mutually supported protocol. - InboundUnsupportedProtocols(RequestId), + InboundTimeout(InboundRequestId), + InboundStreamFailed { + request_id: InboundRequestId, + error: io::Error, + }, } impl fmt::Debug for Event { @@ -255,132 +328,133 @@ impl fmt::Debug for Event { .debug_tuple("Event::OutboundUnsupportedProtocols") .field(request_id) .finish(), + Event::OutboundStreamFailed { request_id, error } => f + .debug_struct("Event::OutboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), Event::InboundTimeout(request_id) => f .debug_tuple("Event::InboundTimeout") .field(request_id) .finish(), - Event::InboundUnsupportedProtocols(request_id) => f - .debug_tuple("Event::InboundUnsupportedProtocols") - .field(request_id) + Event::InboundStreamFailed { request_id, error } => f + .debug_struct("Event::InboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) .finish(), } } } +pub struct OutboundMessage { + pub(crate) request_id: OutboundRequestId, + pub(crate) request: TCodec::Request, + pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, +} + +impl fmt::Debug for OutboundMessage +where + TCodec: Codec, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OutboundMessage").finish_non_exhaustive() + } +} + impl ConnectionHandler for Handler where TCodec: Codec + Send + Clone + 'static, { - type InEvent = RequestProtocol; - type OutEvent = Event; - type Error = ConnectionHandlerUpgrErr; - type InboundProtocol = ResponseProtocol; - type OutboundProtocol = RequestProtocol; - type OutboundOpenInfo = RequestId; - type InboundOpenInfo = RequestId; + type FromBehaviour = OutboundMessage; + type ToBehaviour = Event; + type InboundProtocol = Protocol; + type OutboundProtocol = Protocol; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - // A channel for notifying the handler when the inbound - // upgrade received the request. - let (rq_send, rq_recv) = oneshot::channel(); - - // A channel for notifying the inbound upgrade when the - // response is sent. - let (rs_send, rs_recv) = oneshot::channel(); - - let request_id = RequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)); - - // By keeping all I/O inside the `ResponseProtocol` and thus the - // inbound substream upgrade via above channels, we ensure that it - // is all subject to the configured timeout without extra bookkeeping - // for inbound substreams as well as their timeouts and also make the - // implementation of inbound and outbound upgrades symmetric in - // this sense. - let proto = ResponseProtocol { - protocols: self.inbound_protocols.clone(), - codec: self.codec.clone(), - request_sender: rq_send, - response_receiver: rs_recv, - request_id, - }; - - // The handler waits for the request to come in. It then emits - // `Event::Request` together with a - // `ResponseChannel`. - self.inbound - .push(rq_recv.map_ok(move |rq| (rq, rs_send)).boxed()); - - SubstreamProtocol::new(proto, request_id).with_timeout(self.substream_timeout) + SubstreamProtocol::new( + Protocol { + protocols: self.inbound_protocols.clone(), + }, + (), + ) } - fn on_behaviour_event(&mut self, request: Self::InEvent) { - self.keep_alive = KeepAlive::Yes; - self.outbound.push_back(request); - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive + fn on_behaviour_event(&mut self, request: Self::FromBehaviour) { + self.pending_outbound.push_back(request); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll, RequestId, Self::OutEvent, Self::Error>> - { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); + ) -> Poll, (), Self::ToBehaviour>> { + match self.worker_streams.poll_unpin(cx) { + Poll::Ready((_, Ok(Ok(event)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + Poll::Ready((RequestId::Inbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundStreamFailed { + request_id: id, + error: e, + }, + )); + } + Poll::Ready((RequestId::Outbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundStreamFailed { + request_id: id, + error: e, + }, + )); + } + Poll::Ready((RequestId::Inbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundTimeout(id), + )); + } + Poll::Ready((RequestId::Outbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundTimeout(id), + )); + } + Poll::Pending => {} } - // Drain pending events. + // Drain pending events that were produced by `worker_streams`. if let Some(event) = self.pending_events.pop_front() { - return Poll::Ready(ConnectionHandlerEvent::Custom(event)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { self.pending_events.shrink_to_fit(); } // Check for inbound requests. - while let Poll::Ready(Some(result)) = self.inbound.poll_next_unpin(cx) { - match result { - Ok(((id, rq), rs_sender)) => { - // We received an inbound request. - self.keep_alive = KeepAlive::Yes; - return Poll::Ready(ConnectionHandlerEvent::Custom(Event::Request { - request_id: id, - request: rq, - sender: rs_sender, - })); - } - Err(oneshot::Canceled) => { - // The inbound upgrade has errored or timed out reading - // or waiting for the request. The handler is informed - // via `on_connection_event` call with `ConnectionEvent::ListenUpgradeError`. - } - } + if let Poll::Ready(Some((id, rq, rs_sender))) = self.inbound_receiver.poll_next_unpin(cx) { + // We received an inbound request. + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::Request { + request_id: id, + request: rq, + sender: rs_sender, + })); } // Emit outbound requests. - if let Some(request) = self.outbound.pop_front() { - let info = request.request_id; + if let Some(request) = self.pending_outbound.pop_front() { + let protocols = request.protocols.clone(); + self.requested_outbound.push_back(request); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(request, info) - .with_timeout(self.substream_timeout), + protocol: SubstreamProtocol::new(Protocol { protocols }, ()), }); } - debug_assert!(self.outbound.is_empty()); - - if self.outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { - self.outbound.shrink_to_fit(); - } + debug_assert!(self.pending_outbound.is_empty()); - if self.inbound.is_empty() && self.keep_alive.is_yes() { - // No new inbound or outbound requests. However, we may just have - // started the latest inbound or outbound upgrade(s), so make sure - // the keep-alive timeout is preceded by the substream timeout. - let until = Instant::now() + self.substream_timeout + self.keep_alive_timeout; - self.keep_alive = KeepAlive::Until(until); + if self.pending_outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_outbound.shrink_to_fit(); } Poll::Pending @@ -399,14 +473,8 @@ where ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { self.on_fully_negotiated_inbound(fully_negotiated_inbound) } - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: response, - info: request_id, - }) => { - self.pending_events.push_back(Event::Response { - request_id, - response, - }); + ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { + self.on_fully_negotiated_outbound(fully_negotiated_outbound) } ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) @@ -414,7 +482,7 @@ where ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } - ConnectionEvent::AddressChange(_) => {} + _ => {} } } } diff --git a/protocols/request-response/src/handler/protocol.rs b/protocols/request-response/src/handler/protocol.rs index 84ef365734f..833cacdd6ce 100644 --- a/protocols/request-response/src/handler/protocol.rs +++ b/protocols/request-response/src/handler/protocol.rs @@ -23,14 +23,10 @@ //! receives a request and sends a response, whereas the //! outbound upgrade send a request and receives a response. -use crate::codec::Codec; -use crate::RequestId; - -use futures::{channel::oneshot, future::BoxFuture, prelude::*}; +use futures::future::{ready, Ready}; use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use libp2p_swarm::NegotiatedSubstream; +use libp2p_swarm::Stream; use smallvec::SmallVec; -use std::{fmt, io}; /// The level of support for a particular protocol. #[derive(Debug, Clone)] @@ -65,22 +61,15 @@ impl ProtocolSupport { /// /// Receives a request and sends a response. #[derive(Debug)] -pub struct ResponseProtocol -where - TCodec: Codec, -{ - pub(crate) codec: TCodec, - pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, - pub(crate) request_sender: oneshot::Sender<(RequestId, TCodec::Request)>, - pub(crate) response_receiver: oneshot::Receiver, - pub(crate) request_id: RequestId, +pub struct Protocol

{ + pub(crate) protocols: SmallVec<[P; 2]>, } -impl UpgradeInfo for ResponseProtocol +impl

UpgradeInfo for Protocol

where - TCodec: Codec, + P: AsRef + Clone, { - type Info = TCodec::Protocol; + type Info = P; type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; fn protocol_info(&self) -> Self::InfoIter { @@ -88,102 +77,28 @@ where } } -impl InboundUpgrade for ResponseProtocol +impl

InboundUpgrade for Protocol

where - TCodec: Codec + Send + 'static, + P: AsRef + Clone, { - type Output = bool; - type Error = io::Error; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound( - mut self, - mut io: NegotiatedSubstream, - protocol: Self::Info, - ) -> Self::Future { - async move { - let read = self.codec.read_request(&protocol, &mut io); - let request = read.await?; - match self.request_sender.send((self.request_id, request)) { - Ok(()) => {}, - Err(_) => panic!( - "Expect request receiver to be alive i.e. protocol handler to be alive.", - ), - } + type Output = (Stream, P); + type Error = void::Void; + type Future = Ready>; - if let Ok(response) = self.response_receiver.await { - let write = self.codec.write_response(&protocol, &mut io, response); - write.await?; - - io.close().await?; - // Response was sent. Indicate to handler to emit a `ResponseSent` event. - Ok(true) - } else { - io.close().await?; - // No response was sent. Indicate to handler to emit a `ResponseOmission` event. - Ok(false) - } - }.boxed() + fn upgrade_inbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) } } -/// Request substream upgrade protocol. -/// -/// Sends a request and receives a response. -pub struct RequestProtocol +impl

OutboundUpgrade for Protocol

, + dh_keys: AuthenticKeypair, + params: NoiseParams, + webtransport_certhashes: Option>>, /// Prologue to use in the noise handshake. /// @@ -222,294 +91,93 @@ pub struct NoiseConfig { prologue: Vec, } -impl NoiseConfig { - /// Turn the `NoiseConfig` into an authenticated upgrade for use - /// with a `Swarm`. - pub fn into_authenticated(self) -> NoiseAuthenticated { - NoiseAuthenticated { config: self } - } +impl Config { + /// Construct a new configuration for the noise handshake using the XX handshake pattern. + pub fn new(identity: &identity::Keypair) -> Result { + let noise_keys = Keypair::new().into_authentic(identity)?; - /// Set the noise prologue. - pub fn with_prologue(self, prologue: Vec) -> Self { - Self { prologue, ..self } + Ok(Self { + dh_keys: noise_keys, + params: PARAMS_XX.clone(), + webtransport_certhashes: None, + prologue: vec![], + }) } - /// Sets the legacy configuration options to use, if any. - #[deprecated( - since = "0.42.0", - note = "`LegacyConfig` will be removed without replacement." - )] - - pub fn set_legacy_config(&mut self, cfg: LegacyConfig) -> &mut Self { - self.legacy = cfg; + /// Set the noise prologue. + pub fn with_prologue(mut self, prologue: Vec) -> Self { + self.prologue = prologue; self } -} - -/// Implement `into_responder` and `into_initiator` for all configs where `R = ()`. -/// -/// This allows us to ignore the `remote` field. - -impl NoiseConfig -where - C: Zeroize + Protocol + AsRef<[u8]>, -{ - fn into_responder(self, socket: S) -> Result, Error> { - let session = self - .params - .into_builder(&self.prologue, self.dh_keys.keypair.secret(), None) - .build_responder()?; - let state = State::new(socket, session, self.dh_keys.identity, None, self.legacy); - - Ok(state) - } - - fn into_initiator(self, socket: S) -> Result, Error> { - let session = self - .params - .into_builder(&self.prologue, self.dh_keys.keypair.secret(), None) - .build_initiator()?; - - let state = State::new(socket, session, self.dh_keys.identity, None, self.legacy); - - Ok(state) - } -} - -impl NoiseConfig -where - C: Protocol + Zeroize, -{ - /// Create a new `NoiseConfig` for the `IX` handshake pattern. - pub fn ix(dh_keys: AuthenticKeypair) -> Self { - NoiseConfig { - dh_keys, - params: C::params_ix(), - legacy: { LegacyConfig::default() }, - remote: (), - _marker: std::marker::PhantomData, - prologue: Vec::default(), - } - } -} - -impl NoiseConfig -where - C: Protocol + Zeroize, -{ - /// Create a new `NoiseConfig` for the `XX` handshake pattern. - pub fn xx(dh_keys: AuthenticKeypair) -> Self { - NoiseConfig { - dh_keys, - params: C::params_xx(), - legacy: { LegacyConfig::default() }, - remote: (), - _marker: std::marker::PhantomData, - prologue: Vec::default(), - } - } -} - -impl NoiseConfig -where - C: Protocol + Zeroize, -{ - /// Create a new `NoiseConfig` for the `IK` handshake pattern (recipient side). + /// Set WebTransport certhashes extension. /// - /// Since the identity of the local node is known to the remote, this configuration - /// does not transmit a static DH public key or public identity key to the remote. - pub fn ik_listener(dh_keys: AuthenticKeypair) -> Self { - NoiseConfig { - dh_keys, - params: C::params_ik(), - legacy: { LegacyConfig::default() }, - remote: (), - _marker: std::marker::PhantomData, - prologue: Vec::default(), - } - } -} - -impl NoiseConfig, identity::PublicKey)> -where - C: Protocol + Zeroize + AsRef<[u8]>, -{ - /// Create a new `NoiseConfig` for the `IK` handshake pattern (initiator side). + /// In case of initiator, these certhashes will be used to validate the ones reported by + /// responder. /// - /// In this configuration, the remote identity is known to the local node, - /// but the local node still needs to transmit its own public identity. - pub fn ik_dialer( - dh_keys: AuthenticKeypair, - remote_id: identity::PublicKey, - remote_dh: PublicKey, - ) -> Self { - NoiseConfig { - dh_keys, - params: C::params_ik(), - legacy: { LegacyConfig::default() }, - remote: (remote_dh, remote_id), - _marker: std::marker::PhantomData, - prologue: Vec::default(), - } + /// In case of responder, these certhashes will be reported to initiator. + pub fn with_webtransport_certhashes(mut self, certhashes: HashSet>) -> Self { + self.webtransport_certhashes = Some(certhashes).filter(|h| !h.is_empty()); + self } - /// Specialised implementation of `into_initiator` for the `IK` handshake where `R != ()`. - fn into_initiator(self, socket: S) -> Result, Error> { - let session = self - .params - .into_builder( - &self.prologue, - self.dh_keys.keypair.secret(), - Some(&self.remote.0), - ) - .build_initiator()?; + fn into_responder(self, socket: S) -> Result, Error> { + let session = noise_params_into_builder( + self.params, + &self.prologue, + self.dh_keys.keypair.secret(), + None, + ) + .build_responder()?; let state = State::new( socket, session, self.dh_keys.identity, - Some(self.remote.1), - self.legacy, + None, + self.webtransport_certhashes, ); Ok(state) } -} - -/// libp2p_noise error type. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum Error { - #[error(transparent)] - Io(#[from] std::io::Error), - #[error(transparent)] - Noise(#[from] snow::Error), - #[error("Invalid public key")] - InvalidKey(#[from] libp2p_identity::DecodingError), - #[error("Only keys of length 32 bytes are supported")] - InvalidLength, - #[error("Remote authenticated with an unexpected public key")] - UnexpectedKey, - #[error("The signature of the remote identity's public key does not verify")] - BadSignature, - #[error("Authentication failed")] - AuthenticationFailed, - #[error(transparent)] - InvalidPayload(DecodeError), - #[error(transparent)] - SigningError(#[from] libp2p_identity::SigningError), -} - -#[derive(Debug, thiserror::Error)] -pub struct DecodeError(String); - -impl fmt::Display for DecodeError { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for DecodeError { - fn from(e: quick_protobuf::Error) -> Self { - Self(e.to_string()) - } -} -impl From for Error { - fn from(e: quick_protobuf::Error) -> Self { - Error::InvalidPayload(e.into()) - } -} - -// Handshake pattern IX ///////////////////////////////////////////////////// - -/// Implements the responder part of the `IX` noise handshake pattern. -/// -/// `IX` is a single round-trip (2 messages) handshake in which each party sends their identity over to the other party. -/// -/// ```raw -/// initiator -{id}-> responder -/// initiator <-{id}- responder -/// ``` - -impl InboundUpgrade for NoiseConfig -where - NoiseConfig: UpgradeInfo, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, -{ - type Output = (RemoteIdentity, Output); - type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; - - fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - async move { - let mut state = self.into_responder(socket)?; + fn into_initiator(self, socket: S) -> Result, Error> { + let session = noise_params_into_builder( + self.params, + &self.prologue, + self.dh_keys.keypair.secret(), + None, + ) + .build_initiator()?; - handshake::recv_identity(&mut state).await?; - handshake::send_identity(&mut state).await?; + let state = State::new( + socket, + session, + self.dh_keys.identity, + None, + self.webtransport_certhashes, + ); - state.finish() - } - .boxed() + Ok(state) } } -/// Implements the initiator part of the `IX` noise handshake pattern. -/// -/// `IX` is a single round-trip (2 messages) handshake in which each party sends their identity over to the other party. -/// -/// ```raw -/// initiator -{id}-> responder -/// initiator <-{id}- responder -/// ``` - -impl OutboundUpgrade for NoiseConfig -where - NoiseConfig: UpgradeInfo, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, -{ - type Output = (RemoteIdentity, Output); - type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; - - fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { - async move { - let mut state = self.into_initiator(socket)?; - - handshake::send_identity(&mut state).await?; - handshake::recv_identity(&mut state).await?; +impl UpgradeInfo for Config { + type Info = &'static str; + type InfoIter = std::iter::Once; - state.finish() - } - .boxed() + fn protocol_info(&self) -> Self::InfoIter { + std::iter::once("/noise") } } -/// Implements the responder part of the `XX` noise handshake pattern. -/// -/// `XX` is a 1.5 round-trip (3 messages) handshake. -/// The first message in a noise handshake is unencrypted. In the `XX` handshake pattern, that message -/// is empty and thus does not leak any information. The identities are then exchanged in the second -/// and third message. -/// -/// ```raw -/// initiator --{}--> responder -/// initiator <-{id}- responder -/// initiator -{id}-> responder -/// ``` - -impl InboundUpgrade for NoiseConfig +impl InboundConnectionUpgrade for Config where - NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { - type Output = (RemoteIdentity, Output); + type Output = (PeerId, Output); type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; + type Future = Pin> + Send>>; fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { async move { @@ -519,34 +187,21 @@ where handshake::send_identity(&mut state).await?; handshake::recv_identity(&mut state).await?; - state.finish() + let (pk, io) = state.finish()?; + + Ok((pk.to_peer_id(), io)) } .boxed() } } -/// Implements the initiator part of the `XX` noise handshake pattern. -/// -/// `XX` is a 1.5 round-trip (3 messages) handshake. -/// The first message in a noise handshake is unencrypted. In the `XX` handshake pattern, that message -/// is empty and thus does not leak any information. The identities are then exchanged in the second -/// and third message. -/// -/// ```raw -/// initiator --{}--> responder -/// initiator <-{id}- responder -/// initiator -{id}-> responder -/// ``` - -impl OutboundUpgrade for NoiseConfig +impl OutboundConnectionUpgrade for Config where - NoiseConfig: UpgradeInfo, T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, { - type Output = (RemoteIdentity, Output); + type Output = (PeerId, Output); type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; + type Future = Pin> + Send>>; fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { async move { @@ -556,193 +211,50 @@ where handshake::recv_identity(&mut state).await?; handshake::send_identity(&mut state).await?; - state.finish() - } - .boxed() - } -} - -/// Implements the responder part of the `IK` handshake pattern. -/// -/// `IK` is a single round-trip (2 messages) handshake. -/// -/// In the `IK` handshake, the initiator is expected to know the responder's identity already, which -/// is why the responder does not send it in the second message. -/// -/// ```raw -/// initiator -{id}-> responder -/// initiator <-{id}- responder -/// ``` - -impl InboundUpgrade for NoiseConfig -where - NoiseConfig: UpgradeInfo, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, -{ - type Output = (RemoteIdentity, Output); - type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; - - fn upgrade_inbound(self, socket: T, _: Self::Info) -> Self::Future { - async move { - let mut state = self.into_responder(socket)?; - - handshake::recv_identity(&mut state).await?; - handshake::send_signature_only(&mut state).await?; - - state.finish() - } - .boxed() - } -} - -/// Implements the initiator part of the `IK` handshake pattern. -/// -/// `IK` is a single round-trip (2 messages) handshake. -/// -/// In the `IK` handshake, the initiator knows and pre-configures the remote's identity in the -/// [`HandshakeState`](snow::HandshakeState). -/// -/// ```raw -/// initiator -{id}-> responder -/// initiator <-{id}- responder -/// ``` - -impl OutboundUpgrade for NoiseConfig, identity::PublicKey)> -where - NoiseConfig, identity::PublicKey)>: UpgradeInfo, - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Clone + Send + 'static, -{ - type Output = (RemoteIdentity, Output); - type Error = Error; - type Future = BoxFuture<'static, Result<(RemoteIdentity, Output), Error>>; - - fn upgrade_outbound(self, socket: T, _: Self::Info) -> Self::Future { - async move { - let mut state = self.into_initiator(socket)?; - - handshake::send_identity(&mut state).await?; - handshake::recv_identity(&mut state).await?; + let (pk, io) = state.finish()?; - state.finish() + Ok((pk.to_peer_id(), io)) } .boxed() } } -// Authenticated Upgrades ///////////////////////////////////////////////////// - -/// A `NoiseAuthenticated` transport upgrade that wraps around any -/// `NoiseConfig` handshake and verifies that the remote identified with a -/// [`RemoteIdentity::IdentityKey`], aborting otherwise. -/// -/// See [`NoiseConfig::into_authenticated`]. -/// -/// On success, the upgrade yields the [`PeerId`] obtained from the -/// `RemoteIdentity`. The output of this upgrade is thus directly suitable -/// for creating an [`authenticated`](libp2p_core::transport::upgrade::Authenticate) -/// transport for use with a `Swarm`. -#[derive(Clone)] -#[deprecated( - note = "Use `libp2p_noise::Config` instead. All other handshake patterns are deprecated and will be removed." -)] -pub struct NoiseAuthenticated { - config: NoiseConfig, -} - -impl NoiseAuthenticated { - /// Create a new [`NoiseAuthenticated`] for the `XX` handshake pattern using X25519 DH keys. - /// - /// For now, this is the only combination that is guaranteed to be compatible with other libp2p implementations. - #[deprecated(note = "Use `libp2p_noise::Config::new` instead.")] - pub fn xx(id_keys: &identity::Keypair) -> Result { - let dh_keys = Keypair::::new(); - let noise_keys = dh_keys.into_authentic(id_keys)?; - let config = NoiseConfig::xx(noise_keys); - - Ok(config.into_authenticated()) - } -} - -impl UpgradeInfo for NoiseAuthenticated -where - NoiseConfig: UpgradeInfo, -{ - type Info = as UpgradeInfo>::Info; - type InfoIter = as UpgradeInfo>::InfoIter; - - fn protocol_info(&self) -> Self::InfoIter { - self.config.protocol_info() - } +/// libp2p_noise error type. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum Error { + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + Noise(#[from] snow::Error), + #[error("Invalid public key")] + InvalidKey(#[from] libp2p_identity::DecodingError), + #[error("Only keys of length 32 bytes are supported")] + InvalidLength, + #[error("Remote authenticated with an unexpected public key")] + UnexpectedKey, + #[error("The signature of the remote identity's public key does not verify")] + BadSignature, + #[error("Authentication failed")] + AuthenticationFailed, + #[error("failed to decode protobuf ")] + InvalidPayload(#[from] DecodeError), + #[error(transparent)] + SigningError(#[from] libp2p_identity::SigningError), + #[error("Expected WebTransport certhashes ({}) are not a subset of received ones ({})", certhashes_to_string(.0), certhashes_to_string(.1))] + UnknownWebTransportCerthashes(HashSet>, HashSet>), } -impl InboundUpgrade for NoiseAuthenticated -where - NoiseConfig: UpgradeInfo - + InboundUpgrade, Output), Error = Error> - + 'static, - as InboundUpgrade>::Future: Send, - T: AsyncRead + AsyncWrite + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, -{ - type Output = (PeerId, Output); - type Error = Error; - type Future = Pin> + Send>>; - - fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future { - Box::pin( - self.config - .upgrade_inbound(socket, info) - .and_then(|(remote, io)| match remote { - RemoteIdentity::IdentityKey(pk) => future::ok((pk.to_peer_id(), io)), - _ => future::err(Error::AuthenticationFailed), - }), - ) - } -} +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct DecodeError(quick_protobuf::Error); -impl OutboundUpgrade for NoiseAuthenticated -where - NoiseConfig: UpgradeInfo - + OutboundUpgrade, Output), Error = Error> - + 'static, - as OutboundUpgrade>::Future: Send, - T: AsyncRead + AsyncWrite + Send + 'static, - C: Protocol + AsRef<[u8]> + Zeroize + Send + 'static, -{ - type Output = (PeerId, Output); - type Error = Error; - type Future = Pin> + Send>>; +fn certhashes_to_string(certhashes: &HashSet>) -> String { + let mut s = String::new(); - fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future { - Box::pin( - self.config - .upgrade_outbound(socket, info) - .and_then(|(remote, io)| match remote { - RemoteIdentity::IdentityKey(pk) => future::ok((pk.to_peer_id(), io)), - _ => future::err(Error::AuthenticationFailed), - }), - ) + for hash in certhashes { + write!(&mut s, "{}", Protocol::Certhash(*hash)).unwrap(); } -} -/// Legacy configuration options. -#[derive(Clone, Copy, Default)] -#[deprecated( - since = "0.42.0", - note = "`LegacyConfig` will be removed without replacement." -)] -pub struct LegacyConfig { - /// Whether to continue sending legacy handshake payloads, - /// i.e. length-prefixed protobuf payloads inside a length-prefixed - /// noise frame. These payloads are not interoperable with other - /// libp2p implementations. - pub send_legacy_handshake: bool, - /// Whether to support receiving legacy handshake payloads, - /// i.e. length-prefixed protobuf payloads inside a length-prefixed - /// noise frame. These payloads are not interoperable with other - /// libp2p implementations. - pub recv_legacy_handshake: bool, + s } diff --git a/transports/noise/src/protocol.rs b/transports/noise/src/protocol.rs index c8d9da3ff56..e37c55c7f10 100644 --- a/transports/noise/src/protocol.rs +++ b/transports/noise/src/protocol.rs @@ -20,174 +20,80 @@ //! Components of a Noise protocol. -pub(crate) mod x25519; -pub(crate) mod x25519_spec; use crate::Error; use libp2p_identity as identity; -use rand::SeedableRng; +use once_cell::sync::Lazy; +use rand::{Rng as _, SeedableRng}; +use snow::params::NoiseParams; +use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; use zeroize::Zeroize; -/// The parameters of a Noise protocol, consisting of a choice -/// for a handshake pattern as well as DH, cipher and hash functions. -#[derive(Clone)] -pub struct ProtocolParams(snow::params::NoiseParams); - -impl ProtocolParams { - pub(crate) fn into_builder<'b, C>( - self, - prologue: &'b [u8], - private_key: &'b SecretKey, - remote_public_key: Option<&'b PublicKey>, - ) -> snow::Builder<'b> - where - C: Zeroize + AsRef<[u8]> + Protocol, - { - let mut builder = snow::Builder::with_resolver(self.0, Box::new(Resolver)) - .prologue(prologue.as_ref()) - .local_private_key(private_key.as_ref()); - - if let Some(remote_public_key) = remote_public_key { - builder = builder.remote_public_key(remote_public_key.as_ref()); - } - - builder - } -} - -/// Type tag for the IK handshake pattern. -#[derive(Debug, Clone)] -pub enum IK {} - -/// Type tag for the IX handshake pattern. -#[derive(Debug, Clone)] -pub enum IX {} - -/// Type tag for the XX handshake pattern. -#[derive(Debug, Clone)] -pub enum XX {} - -/// A Noise protocol over DH keys of type `C`. The choice of `C` determines the -/// protocol parameters for each handshake pattern. -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub trait Protocol { - /// The protocol parameters for the IK handshake pattern. - fn params_ik() -> ProtocolParams; - /// The protocol parameters for the IX handshake pattern. - fn params_ix() -> ProtocolParams; - /// The protocol parameters for the XX handshake pattern. - fn params_xx() -> ProtocolParams; - - /// Construct a DH public key from a byte slice. - fn public_from_bytes(s: &[u8]) -> Result, Error>; - - /// Determines whether the authenticity of the given DH static public key - /// and public identity key is linked, i.e. that proof of ownership of a - /// secret key for the static DH public key implies that the key is - /// authentic w.r.t. the given public identity key. - /// - /// The trivial case is when the keys are byte for byte identical. - #[allow(unused_variables)] - #[deprecated] - fn linked(id_pk: &identity::PublicKey, dh_pk: &PublicKey) -> bool { - false - } - - /// Verifies that a given static DH public key is authentic w.r.t. a - /// given public identity key in the context of an optional signature. - /// - /// The given static DH public key is assumed to already be authentic - /// in the sense that possession of a corresponding secret key has been - /// established, as is the case at the end of a Noise handshake involving - /// static DH keys. - /// - /// If the public keys are [`linked`](Protocol::linked), verification succeeds - /// without a signature, otherwise a signature over the static DH public key - /// must be given and is verified with the public identity key, establishing - /// the authenticity of the static DH public key w.r.t. the public identity key. - - fn verify(id_pk: &identity::PublicKey, dh_pk: &PublicKey, sig: &Option>) -> bool - where - C: AsRef<[u8]>, - { - Self::linked(id_pk, dh_pk) - || sig - .as_ref() - .map_or(false, |s| id_pk.verify(dh_pk.as_ref(), s)) +/// Prefix of static key signatures for domain separation. +pub(crate) const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:"; + +pub(crate) static PARAMS_XX: Lazy = Lazy::new(|| { + "Noise_XX_25519_ChaChaPoly_SHA256" + .parse() + .expect("Invalid protocol name") +}); + +pub(crate) fn noise_params_into_builder<'b>( + params: NoiseParams, + prologue: &'b [u8], + private_key: &'b SecretKey, + remote_public_key: Option<&'b PublicKey>, +) -> snow::Builder<'b> { + let mut builder = snow::Builder::with_resolver(params, Box::new(Resolver)) + .prologue(prologue.as_ref()) + .local_private_key(private_key.as_ref()); + + if let Some(remote_public_key) = remote_public_key { + builder = builder.remote_public_key(remote_public_key.as_ref()); } - fn sign(id_keys: &identity::Keypair, dh_pk: &PublicKey) -> Result, Error> - where - C: AsRef<[u8]>, - { - Ok(id_keys.sign(dh_pk.as_ref())?) - } + builder } /// DH keypair. #[derive(Clone)] -pub struct Keypair { - secret: SecretKey, - public: PublicKey, +pub(crate) struct Keypair { + secret: SecretKey, + public: PublicKey, } /// A DH keypair that is authentic w.r.t. a [`identity::PublicKey`]. #[derive(Clone)] -pub struct AuthenticKeypair { - pub(crate) keypair: Keypair, +pub(crate) struct AuthenticKeypair { + pub(crate) keypair: Keypair, pub(crate) identity: KeypairIdentity, } -impl AuthenticKeypair { - /// Returns the public DH key of this keypair. - pub fn public_dh_key(&self) -> &PublicKey { - &self.keypair.public - } - - /// Extract the public [`KeypairIdentity`] from this `AuthenticKeypair`, - /// dropping the DH `Keypair`. - #[deprecated( - since = "0.40.0", - note = "This function was only used internally and will be removed in the future unless more usecases come up." - )] - pub fn into_identity(self) -> KeypairIdentity { - self.identity - } -} - /// The associated public identity of a DH keypair. #[derive(Clone)] -pub struct KeypairIdentity { +pub(crate) struct KeypairIdentity { /// The public identity key. - pub public: identity::PublicKey, + pub(crate) public: identity::PublicKey, /// The signature over the public DH key. - pub signature: Option>, + pub(crate) signature: Vec, } -impl Keypair { - /// The public key of the DH keypair. - pub fn public(&self) -> &PublicKey { - &self.public - } - +impl Keypair { /// The secret key of the DH keypair. - pub fn secret(&self) -> &SecretKey { + pub(crate) fn secret(&self) -> &SecretKey { &self.secret } /// Turn this DH keypair into a [`AuthenticKeypair`], i.e. a DH keypair that /// is authentic w.r.t. the given identity keypair, by signing the DH public key. - pub fn into_authentic(self, id_keys: &identity::Keypair) -> Result, Error> - where - T: AsRef<[u8]>, - T: Protocol, - { - let sig = T::sign(id_keys, &self.public)?; + pub(crate) fn into_authentic( + self, + id_keys: &identity::Keypair, + ) -> Result { + let sig = id_keys.sign(&[STATIC_KEY_DOMAIN.as_bytes(), self.public.as_ref()].concat())?; let identity = KeypairIdentity { public: id_keys.public(), - signature: Some(sig), + signature: sig, }; Ok(AuthenticKeypair { @@ -195,37 +101,59 @@ impl Keypair { identity, }) } + + /// An "empty" keypair as a starting state for DH computations in `snow`, + /// which get manipulated through the `snow::types::Dh` interface. + pub(crate) fn empty() -> Self { + Keypair { + secret: SecretKey([0u8; 32]), + public: PublicKey([0u8; 32]), + } + } + + /// Create a new X25519 keypair. + pub(crate) fn new() -> Keypair { + let mut sk_bytes = [0u8; 32]; + rand::thread_rng().fill(&mut sk_bytes); + let sk = SecretKey(sk_bytes); // Copy + sk_bytes.zeroize(); + Self::from(sk) + } } /// DH secret key. -#[derive(Clone)] -pub struct SecretKey(T); +#[derive(Clone, Default)] +pub(crate) struct SecretKey([u8; 32]); -impl Drop for SecretKey { +impl Drop for SecretKey { fn drop(&mut self) { self.0.zeroize() } } -impl + Zeroize> AsRef<[u8]> for SecretKey { +impl AsRef<[u8]> for SecretKey { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// DH public key. -#[derive(Clone)] -pub struct PublicKey(T); +#[derive(Clone, PartialEq, Default)] +pub(crate) struct PublicKey([u8; 32]); + +impl PublicKey { + pub(crate) fn from_slice(slice: &[u8]) -> Result { + if slice.len() != 32 { + return Err(Error::InvalidLength); + } -impl> PartialEq for PublicKey { - fn eq(&self, other: &PublicKey) -> bool { - self.as_ref() == other.as_ref() + let mut key = [0u8; 32]; + key.copy_from_slice(slice); + Ok(PublicKey(key)) } } -impl> Eq for PublicKey {} - -impl> AsRef<[u8]> for PublicKey { +impl AsRef<[u8]> for PublicKey { fn as_ref(&self) -> &[u8] { self.0.as_ref() } @@ -244,7 +172,7 @@ impl snow::resolvers::CryptoResolver for Resolver { fn resolve_dh(&self, choice: &snow::params::DHChoice) -> Option> { if let snow::params::DHChoice::Curve25519 = choice { - Some(Box::new(Keypair::::default())) + Some(Box::new(Keypair::empty())) } else { None } @@ -304,10 +232,67 @@ impl rand::CryptoRng for Rng {} impl snow::types::Random for Rng {} +impl Default for Keypair { + fn default() -> Self { + Self::new() + } +} + +/// Promote a X25519 secret key into a keypair. +impl From for Keypair { + fn from(secret: SecretKey) -> Keypair { + let public = PublicKey(x25519(secret.0, X25519_BASEPOINT_BYTES)); + Keypair { secret, public } + } +} + +#[doc(hidden)] +impl snow::types::Dh for Keypair { + fn name(&self) -> &'static str { + "25519" + } + fn pub_len(&self) -> usize { + 32 + } + fn priv_len(&self) -> usize { + 32 + } + fn pubkey(&self) -> &[u8] { + self.public.as_ref() + } + fn privkey(&self) -> &[u8] { + self.secret.as_ref() + } + + fn set(&mut self, sk: &[u8]) { + let mut secret = [0u8; 32]; + secret.copy_from_slice(sk); + self.secret = SecretKey(secret); // Copy + self.public = PublicKey(x25519(secret, X25519_BASEPOINT_BYTES)); + secret.zeroize(); + } + + fn generate(&mut self, rng: &mut dyn snow::types::Random) { + let mut secret = [0u8; 32]; + rng.fill_bytes(&mut secret); + self.secret = SecretKey(secret); // Copy + self.public = PublicKey(x25519(secret, X25519_BASEPOINT_BYTES)); + secret.zeroize(); + } + + fn dh(&self, pk: &[u8], shared_secret: &mut [u8]) -> Result<(), snow::Error> { + let mut p = [0; 32]; + p.copy_from_slice(&pk[..32]); + let ss = x25519(self.secret.0, p); + shared_secret[..32].copy_from_slice(&ss[..]); + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; - use crate::X25519Spec; + use crate::protocol::PARAMS_XX; use once_cell::sync::Lazy; #[test] @@ -333,9 +318,9 @@ mod tests { } fn xx_builder(prologue: &'static [u8]) -> snow::Builder<'static> { - X25519Spec::params_xx().into_builder(prologue, TEST_KEY.secret(), None) + noise_params_into_builder(PARAMS_XX.clone(), prologue, TEST_KEY.secret(), None) } // Hack to work around borrow-checker. - static TEST_KEY: Lazy> = Lazy::new(Keypair::::new); + static TEST_KEY: Lazy = Lazy::new(Keypair::new); } diff --git a/transports/noise/src/protocol/x25519.rs b/transports/noise/src/protocol/x25519.rs deleted file mode 100644 index 4a572945ef8..00000000000 --- a/transports/noise/src/protocol/x25519.rs +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Legacy Noise protocols based on X25519. -//! -//! **Note**: This set of protocols is not interoperable with other -//! libp2p implementations. - -use crate::{Error, NoiseConfig, Protocol, ProtocolParams}; -use curve25519_dalek::edwards::CompressedEdwardsY; -use libp2p_core::UpgradeInfo; -use libp2p_identity as identity; -use libp2p_identity::ed25519; -use once_cell::sync::Lazy; -use rand::Rng; -use sha2::{Digest, Sha512}; -use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; -use zeroize::Zeroize; - -use super::*; - -static PARAMS_IK: Lazy = Lazy::new(|| { - "Noise_IK_25519_ChaChaPoly_SHA256" - .parse() - .map(ProtocolParams) - .expect("Invalid protocol name") -}); -static PARAMS_IX: Lazy = Lazy::new(|| { - "Noise_IX_25519_ChaChaPoly_SHA256" - .parse() - .map(ProtocolParams) - .expect("Invalid protocol name") -}); -static PARAMS_XX: Lazy = Lazy::new(|| { - "Noise_XX_25519_ChaChaPoly_SHA256" - .parse() - .map(ProtocolParams) - .expect("Invalid protocol name") -}); - -/// A X25519 key. -#[derive(Clone)] -pub struct X25519([u8; 32]); - -impl AsRef<[u8]> for X25519 { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl Zeroize for X25519 { - fn zeroize(&mut self) { - self.0.zeroize() - } -} - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise/ix/25519/chachapoly/sha256/0.1.0") - } -} - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise/xx/25519/chachapoly/sha256/0.1.0") - } -} - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise/ik/25519/chachapoly/sha256/0.1.0") - } -} - -/// Legacy Noise protocol for X25519. -/// -/// **Note**: This `Protocol` provides no configuration that -/// is interoperable with other libp2p implementations. -/// See [`crate::X25519Spec`] instead. -impl Protocol for X25519 { - fn params_ik() -> ProtocolParams { - PARAMS_IK.clone() - } - - fn params_ix() -> ProtocolParams { - PARAMS_IX.clone() - } - - fn params_xx() -> ProtocolParams { - PARAMS_XX.clone() - } - - fn public_from_bytes(bytes: &[u8]) -> Result, Error> { - if bytes.len() != 32 { - return Err(Error::InvalidLength); - } - let mut pk = [0u8; 32]; - pk.copy_from_slice(bytes); - Ok(PublicKey(X25519(pk))) - } - - #[allow(irrefutable_let_patterns)] - fn linked(id_pk: &identity::PublicKey, dh_pk: &PublicKey) -> bool { - if let identity::PublicKey::Ed25519(ref p) = id_pk { - PublicKey::from_ed25519(p).as_ref() == dh_pk.as_ref() - } else { - false - } - } -} - -impl Keypair { - /// Create a new X25519 keypair. - pub fn new() -> Keypair { - let mut sk_bytes = [0u8; 32]; - rand::thread_rng().fill(&mut sk_bytes); - let sk = SecretKey(X25519(sk_bytes)); // Copy - sk_bytes.zeroize(); - Self::from(sk) - } - - /// Creates an X25519 `Keypair` from an [`identity::Keypair`], if possible. - /// - /// The returned keypair will be [associated with](KeypairIdentity) the - /// given identity keypair. - /// - /// Returns `None` if the given identity keypair cannot be used as an X25519 keypair. - /// - /// > **Note**: If the identity keypair is already used in the context - /// > of other cryptographic protocols outside of Noise, it should be preferred to - /// > create a new static X25519 keypair for use in the Noise protocol. - /// > - /// > See also: - /// > - /// > * [Noise: Static Key Reuse](http://www.noiseprotocol.org/noise.html#security-considerations) - #[allow(unreachable_patterns)] - pub fn from_identity(id_keys: &identity::Keypair) -> Option> { - match id_keys { - identity::Keypair::Ed25519(p) => { - let kp = Keypair::from(SecretKey::from_ed25519(&p.secret())); - let id = KeypairIdentity { - public: id_keys.public(), - signature: None, - }; - Some(AuthenticKeypair { - keypair: kp, - identity: id, - }) - } - _ => None, - } - } -} - -impl Default for Keypair { - fn default() -> Self { - Self::new() - } -} - -/// Promote a X25519 secret key into a keypair. -impl From> for Keypair { - fn from(secret: SecretKey) -> Keypair { - let public = PublicKey(X25519(x25519((secret.0).0, X25519_BASEPOINT_BYTES))); - Keypair { secret, public } - } -} - -impl PublicKey { - /// Construct a curve25519 public key from an Ed25519 public key. - pub fn from_ed25519(pk: &ed25519::PublicKey) -> Self { - PublicKey(X25519( - CompressedEdwardsY(pk.encode()) - .decompress() - .expect("An Ed25519 public key is a valid point by construction.") - .to_montgomery() - .0, - )) - } -} - -impl SecretKey { - /// Construct a X25519 secret key from a Ed25519 secret key. - /// - /// > **Note**: If the Ed25519 secret key is already used in the context - /// > of other cryptographic protocols outside of Noise, it should be preferred - /// > to create a new keypair for use in the Noise protocol. - /// > - /// > See also: - /// > - /// > * [Noise: Static Key Reuse](http://www.noiseprotocol.org/noise.html#security-considerations) - /// > * [Ed25519 to Curve25519](https://libsodium.gitbook.io/doc/advanced/ed25519-curve25519) - pub fn from_ed25519(ed25519_sk: &ed25519::SecretKey) -> Self { - // An Ed25519 public key is derived off the left half of the SHA512 of the - // secret scalar, hence a matching conversion of the secret key must do - // the same to yield a Curve25519 keypair with the same public key. - // let ed25519_sk = ed25519::SecretKey::from(ed); - let mut curve25519_sk: [u8; 32] = [0; 32]; - let hash = Sha512::digest(ed25519_sk.as_ref()); - curve25519_sk.copy_from_slice(&hash[..32]); - let sk = SecretKey(X25519(curve25519_sk)); // Copy - curve25519_sk.zeroize(); - sk - } -} - -#[doc(hidden)] -impl snow::types::Dh for Keypair { - fn name(&self) -> &'static str { - "25519" - } - fn pub_len(&self) -> usize { - 32 - } - fn priv_len(&self) -> usize { - 32 - } - fn pubkey(&self) -> &[u8] { - self.public.as_ref() - } - fn privkey(&self) -> &[u8] { - self.secret.as_ref() - } - - fn set(&mut self, sk: &[u8]) { - let mut secret = [0u8; 32]; - secret.copy_from_slice(sk); - self.secret = SecretKey(X25519(secret)); // Copy - self.public = PublicKey(X25519(x25519(secret, X25519_BASEPOINT_BYTES))); - secret.zeroize(); - } - - fn generate(&mut self, rng: &mut dyn snow::types::Random) { - let mut secret = [0u8; 32]; - rng.fill_bytes(&mut secret); - self.secret = SecretKey(X25519(secret)); // Copy - self.public = PublicKey(X25519(x25519(secret, X25519_BASEPOINT_BYTES))); - secret.zeroize(); - } - - fn dh(&self, pk: &[u8], shared_secret: &mut [u8]) -> Result<(), snow::Error> { - let mut p = [0; 32]; - p.copy_from_slice(&pk[..32]); - let ss = x25519((self.secret.0).0, p); - shared_secret[..32].copy_from_slice(&ss[..]); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use libp2p_identity::ed25519; - use quickcheck::*; - - // The x25519 public key obtained through ed25519 keypair conversion - // (and thus derived from the converted secret key) must match the x25519 - // public key derived directly from the ed25519 public key. - #[test] - fn prop_public_ed25519_to_x25519_matches() { - fn prop() -> bool { - let ed25519 = ed25519::Keypair::generate(); - let x25519 = Keypair::from(SecretKey::from_ed25519(&ed25519.secret())); - let x25519_public = PublicKey::from_ed25519(&ed25519.public()); - x25519.public == x25519_public - } - - quickcheck(prop as fn() -> _); - } -} diff --git a/transports/noise/src/protocol/x25519_spec.rs b/transports/noise/src/protocol/x25519_spec.rs deleted file mode 100644 index 7f7a5a9c4e7..00000000000 --- a/transports/noise/src/protocol/x25519_spec.rs +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! [libp2p-noise-spec] compliant Noise protocols based on X25519. -//! -//! [libp2p-noise-spec]: https://github.com/libp2p/specs/tree/master/noise - -use crate::{Error, NoiseConfig, Protocol, ProtocolParams}; -use libp2p_core::UpgradeInfo; -use libp2p_identity as identity; -use rand::Rng; -use x25519_dalek::{x25519, X25519_BASEPOINT_BYTES}; -use zeroize::Zeroize; - -use super::*; - -/// Prefix of static key signatures for domain separation. -const STATIC_KEY_DOMAIN: &str = "noise-libp2p-static-key:"; - -/// A X25519 key. -#[derive(Clone)] -pub struct X25519Spec([u8; 32]); - -impl AsRef<[u8]> for X25519Spec { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl Zeroize for X25519Spec { - fn zeroize(&mut self) { - self.0.zeroize() - } -} - -impl Keypair { - /// An "empty" keypair as a starting state for DH computations in `snow`, - /// which get manipulated through the `snow::types::Dh` interface. - pub(super) fn default() -> Self { - Keypair { - secret: SecretKey(X25519Spec([0u8; 32])), - public: PublicKey(X25519Spec([0u8; 32])), - } - } - - /// Create a new X25519 keypair. - pub fn new() -> Keypair { - let mut sk_bytes = [0u8; 32]; - rand::thread_rng().fill(&mut sk_bytes); - let sk = SecretKey(X25519Spec(sk_bytes)); // Copy - sk_bytes.zeroize(); - Self::from(sk) - } -} - -impl Default for Keypair { - fn default() -> Self { - Self::new() - } -} - -/// Promote a X25519 secret key into a keypair. -impl From> for Keypair { - fn from(secret: SecretKey) -> Keypair { - let public = PublicKey(X25519Spec(x25519((secret.0).0, X25519_BASEPOINT_BYTES))); - Keypair { secret, public } - } -} - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise") - } -} - -/// **Note**: This is not currentlyy a standardised upgrade. - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise/ix/25519/chachapoly/sha256/0.1.0") - } -} - -/// **Note**: This is not currently a standardised upgrade. - -impl UpgradeInfo for NoiseConfig { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise/ik/25519/chachapoly/sha256/0.1.0") - } -} - -/// Noise protocols for X25519 with libp2p-spec compliant signatures. -/// -/// **Note**: Only the XX handshake pattern is currently guaranteed to be -/// interoperable with other libp2p implementations. -impl Protocol for X25519Spec { - fn params_ik() -> ProtocolParams { - x25519::X25519::params_ik() - } - - fn params_ix() -> ProtocolParams { - x25519::X25519::params_ix() - } - - fn params_xx() -> ProtocolParams { - x25519::X25519::params_xx() - } - - fn public_from_bytes(bytes: &[u8]) -> Result, Error> { - if bytes.len() != 32 { - return Err(Error::InvalidLength); - } - let mut pk = [0u8; 32]; - pk.copy_from_slice(bytes); - Ok(PublicKey(X25519Spec(pk))) - } - - fn verify( - id_pk: &identity::PublicKey, - dh_pk: &PublicKey, - sig: &Option>, - ) -> bool { - sig.as_ref().map_or(false, |s| { - id_pk.verify(&[STATIC_KEY_DOMAIN.as_bytes(), dh_pk.as_ref()].concat(), s) - }) - } - - fn sign(id_keys: &identity::Keypair, dh_pk: &PublicKey) -> Result, Error> { - Ok(id_keys.sign(&[STATIC_KEY_DOMAIN.as_bytes(), dh_pk.as_ref()].concat())?) - } -} - -#[doc(hidden)] -impl snow::types::Dh for Keypair { - fn name(&self) -> &'static str { - "25519" - } - fn pub_len(&self) -> usize { - 32 - } - fn priv_len(&self) -> usize { - 32 - } - fn pubkey(&self) -> &[u8] { - self.public.as_ref() - } - fn privkey(&self) -> &[u8] { - self.secret.as_ref() - } - - fn set(&mut self, sk: &[u8]) { - let mut secret = [0u8; 32]; - secret.copy_from_slice(sk); - self.secret = SecretKey(X25519Spec(secret)); // Copy - self.public = PublicKey(X25519Spec(x25519(secret, X25519_BASEPOINT_BYTES))); - secret.zeroize(); - } - - fn generate(&mut self, rng: &mut dyn snow::types::Random) { - let mut secret = [0u8; 32]; - rng.fill_bytes(&mut secret); - self.secret = SecretKey(X25519Spec(secret)); // Copy - self.public = PublicKey(X25519Spec(x25519(secret, X25519_BASEPOINT_BYTES))); - secret.zeroize(); - } - - fn dh(&self, pk: &[u8], shared_secret: &mut [u8]) -> Result<(), snow::Error> { - let mut p = [0; 32]; - p.copy_from_slice(&pk[..32]); - let ss = x25519((self.secret.0).0, p); - shared_secret[..32].copy_from_slice(&ss[..]); - Ok(()) - } -} diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index b862a944dfd..7100e7c7a8d 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -20,16 +20,17 @@ use futures::prelude::*; use libp2p_core::transport::{MemoryTransport, Transport}; -use libp2p_core::{upgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p_core::upgrade; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; -use log::info; use quickcheck::*; use std::{convert::TryInto, io}; +use tracing_subscriber::EnvFilter; #[allow(dead_code)] fn core_upgrade_compat() { - // Tests API compaibility with the libp2p-core upgrade API, + // Tests API compatibility with the libp2p-core upgrade API, // i.e. if it compiles, the "test" is considered a success. let id_keys = identity::Keypair::generate_ed25519(); let noise = noise::Config::new(&id_keys).unwrap(); @@ -40,7 +41,9 @@ fn core_upgrade_compat() { #[test] fn xx() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(mut messages: Vec) -> bool { messages.truncate(5); let server_id = identity::Keypair::generate_ed25519(); @@ -50,8 +53,8 @@ fn xx() { futures::executor::block_on(async move { let ( - (reported_client_id, mut client_session), - (reported_server_id, mut server_session), + (reported_client_id, mut server_session), + (reported_server_id, mut client_session), ) = futures::future::try_join( noise::Config::new(&server_id) .unwrap() @@ -85,7 +88,7 @@ fn xx() { Err(e) => panic!("error reading len: {e}"), } }; - info!("server: reading message ({} bytes)", len); + tracing::info!(bytes=%len, "server: reading message"); let mut server_buffer = vec![0; len.try_into().unwrap()]; server_session .read_exact(&mut server_buffer) diff --git a/transports/noise/tests/webtransport_certhashes.rs b/transports/noise/tests/webtransport_certhashes.rs new file mode 100644 index 00000000000..b3c924f8188 --- /dev/null +++ b/transports/noise/tests/webtransport_certhashes.rs @@ -0,0 +1,156 @@ +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_identity as identity; +use libp2p_noise as noise; +use multihash::Multihash; +use std::collections::HashSet; + +const SHA_256_MH: u64 = 0x12; + +#[test] +fn webtransport_same_set_of_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + handshake_with_certhashes(vec![certhash1, certhash2], vec![certhash1, certhash2]).unwrap(); +} + +#[test] +fn webtransport_subset_of_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + handshake_with_certhashes(vec![certhash1], vec![certhash1, certhash2]).unwrap(); +} + +#[test] +fn webtransport_client_without_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + // Valid when server uses CA-signed TLS certificate. + handshake_with_certhashes(vec![], vec![certhash1, certhash2]).unwrap(); +} + +#[test] +fn webtransport_client_and_server_without_certhashes() { + // Valid when server uses CA-signed TLS certificate. + handshake_with_certhashes(vec![], vec![]).unwrap(); +} + +#[test] +fn webtransport_server_empty_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + // Invalid case, because a MITM attacker may strip certificates of the server. + let Err(noise::Error::UnknownWebTransportCerthashes(expected, received)) = + handshake_with_certhashes(vec![certhash1, certhash2], vec![]) + else { + panic!("unexpected result"); + }; + + assert_eq!(expected, HashSet::from([certhash1, certhash2])); + assert_eq!(received, HashSet::new()); +} + +#[test] +fn webtransport_client_uninit_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + // Valid when server uses CA-signed TLS certificate. + handshake_with_certhashes(None, vec![certhash1, certhash2]).unwrap(); +} + +#[test] +fn webtransport_client_and_server_uninit_certhashes() { + // Valid when server uses CA-signed TLS certificate. + handshake_with_certhashes(None, None).unwrap(); +} + +#[test] +fn webtransport_server_uninit_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + // Invalid case, because a MITM attacker may strip certificates of the server. + let Err(noise::Error::UnknownWebTransportCerthashes(expected, received)) = + handshake_with_certhashes(vec![certhash1, certhash2], None) + else { + panic!("unexpected result"); + }; + + assert_eq!(expected, HashSet::from([certhash1, certhash2])); + assert_eq!(received, HashSet::new()); +} + +#[test] +fn webtransport_different_server_certhashes() { + let (certhash1, certhash2, certhash3) = certhashes(); + + let Err(noise::Error::UnknownWebTransportCerthashes(expected, received)) = + handshake_with_certhashes(vec![certhash1, certhash3], vec![certhash1, certhash2]) + else { + panic!("unexpected result"); + }; + + assert_eq!(expected, HashSet::from([certhash1, certhash3])); + assert_eq!(received, HashSet::from([certhash1, certhash2])); +} + +#[test] +fn webtransport_superset_of_certhashes() { + let (certhash1, certhash2, _) = certhashes(); + + let Err(noise::Error::UnknownWebTransportCerthashes(expected, received)) = + handshake_with_certhashes(vec![certhash1, certhash2], vec![certhash1]) + else { + panic!("unexpected result"); + }; + + assert_eq!(expected, HashSet::from([certhash1, certhash2])); + assert_eq!(received, HashSet::from([certhash1])); +} + +fn certhashes() -> (Multihash<64>, Multihash<64>, Multihash<64>) { + ( + Multihash::wrap(SHA_256_MH, b"1").unwrap(), + Multihash::wrap(SHA_256_MH, b"2").unwrap(), + Multihash::wrap(SHA_256_MH, b"3").unwrap(), + ) +} + +// `valid_certhases` must be a strict subset of `server_certhashes`. +fn handshake_with_certhashes( + valid_certhases: impl Into>>>, + server_certhashes: impl Into>>>, +) -> Result<(), noise::Error> { + let valid_certhases = valid_certhases.into(); + let server_certhashes = server_certhashes.into(); + + let client_id = identity::Keypair::generate_ed25519(); + let server_id = identity::Keypair::generate_ed25519(); + + let (client, server) = futures_ringbuf::Endpoint::pair(100, 100); + + futures::executor::block_on(async move { + let mut client_config = noise::Config::new(&client_id)?; + let mut server_config = noise::Config::new(&server_id)?; + + if let Some(valid_certhases) = valid_certhases { + client_config = + client_config.with_webtransport_certhashes(valid_certhases.into_iter().collect()); + } + + if let Some(server_certhashes) = server_certhashes { + server_config = + server_config.with_webtransport_certhashes(server_certhashes.into_iter().collect()); + } + + let ((reported_client_id, mut _server_session), (reported_server_id, mut _client_session)) = + futures::future::try_join( + server_config.upgrade_inbound(server, ""), + client_config.upgrade_outbound(client, ""), + ) + .await?; + + assert_eq!(reported_client_id, client_id.public().to_peer_id()); + assert_eq!(reported_server_id, server_id.public().to_peer_id()); + + Ok(()) + }) +} diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index 5f04ca16cba..42b53d12a88 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,9 +1,26 @@ -## 0.40.0 - unreleased +## 0.41.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). +- Remove deprecated type-aliases and make `Config::local_public_key` private. + See [PR 4734](https://github.com/libp2p/rust-libp2p/pull/4734). + +## 0.40.1 + +- Rename `Plaintext2Config` to `Config` to follow naming conventions across repository. + See [PR 4535](https://github.com/libp2p/rust-libp2p/pull/4535). + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove `Plaintext1Config`. + Use `Plaintext2Config` instead. + See [PR 3915]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3915]: https://github.com/libp2p/rust-libp2p/pull/3915 ## 0.39.1 diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 6946f22558e..e3f1e280851 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-plaintext" edition = "2021" rust-version = { workspace = true } description = "Plaintext encryption dummy protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,22 +11,21 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -asynchronous-codec = "0.6" +asynchronous-codec = { workspace = true } bytes = "1" -futures = "0.3.28" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.8" quick-protobuf = "0.8" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } -void = "1.0.2" +tracing = "0.1.37" +quick-protobuf-codec = { workspace = true } [dev-dependencies] -env_logger = "0.10.0" -libp2p-identity = { workspace = true, features = ["ed25519"] } +libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } quickcheck = { workspace = true } rand = "0.8" -futures_ringbuf = "0.3.1" +futures_ringbuf = "0.4.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -34,3 +33,6 @@ futures_ringbuf = "0.3.1" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index 133cca746af..7480874a85e 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -23,25 +23,25 @@ use std::fmt; use std::io::Error as IoError; #[derive(Debug)] -pub enum PlainTextError { +pub enum Error { /// I/O error. - IoError(IoError), + Io(IoError), /// Failed to parse the handshake protobuf message. InvalidPayload(DecodeError), /// Failed to parse public key from bytes in protobuf message. - InvalidPublicKey(libp2p_core::identity::error::DecodingError), + InvalidPublicKey(libp2p_identity::DecodingError), - /// Failed to parse the [`PeerId`](libp2p_core::PeerId) from bytes in the protobuf message. - InvalidPeerId(libp2p_core::multihash::Error), + /// Failed to parse the [`PeerId`](libp2p_identity::PeerId) from bytes in the protobuf message. + InvalidPeerId(libp2p_identity::ParseError), /// The peer id of the exchange isn't consistent with the remote public key. PeerIdMismatch, } #[derive(Debug)] -pub struct DecodeError(quick_protobuf::Error); +pub struct DecodeError(pub(crate) quick_protobuf_codec::Error); impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -55,52 +55,52 @@ impl error::Error for DecodeError { } } -impl error::Error for PlainTextError { +impl error::Error for Error { fn cause(&self) -> Option<&dyn error::Error> { match *self { - PlainTextError::IoError(ref err) => Some(err), - PlainTextError::InvalidPayload(ref err) => Some(err), - PlainTextError::InvalidPublicKey(ref err) => Some(err), - PlainTextError::InvalidPeerId(ref err) => Some(err), + Error::Io(ref err) => Some(err), + Error::InvalidPayload(ref err) => Some(err), + Error::InvalidPublicKey(ref err) => Some(err), + Error::InvalidPeerId(ref err) => Some(err), _ => None, } } } -impl fmt::Display for PlainTextError { +impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match self { - PlainTextError::IoError(e) => write!(f, "I/O error: {e}"), - PlainTextError::InvalidPayload(_) => f.write_str("Failed to decode protobuf"), - PlainTextError::PeerIdMismatch => f.write_str( + Error::Io(e) => write!(f, "I/O error: {e}"), + Error::InvalidPayload(_) => f.write_str("Failed to decode protobuf"), + Error::PeerIdMismatch => f.write_str( "The peer id of the exchange isn't consistent with the remote public key", ), - PlainTextError::InvalidPublicKey(_) => f.write_str("Failed to decode public key"), - PlainTextError::InvalidPeerId(_) => f.write_str("Failed to decode PeerId"), + Error::InvalidPublicKey(_) => f.write_str("Failed to decode public key"), + Error::InvalidPeerId(_) => f.write_str("Failed to decode PeerId"), } } } -impl From for PlainTextError { - fn from(err: IoError) -> PlainTextError { - PlainTextError::IoError(err) +impl From for Error { + fn from(err: IoError) -> Error { + Error::Io(err) } } -impl From for PlainTextError { - fn from(err: quick_protobuf::Error) -> PlainTextError { - PlainTextError::InvalidPayload(DecodeError(err)) +impl From for Error { + fn from(err: DecodeError) -> Error { + Error::InvalidPayload(err) } } -impl From for PlainTextError { - fn from(err: libp2p_core::identity::error::DecodingError) -> PlainTextError { - PlainTextError::InvalidPublicKey(err) +impl From for Error { + fn from(err: libp2p_identity::DecodingError) -> Error { + Error::InvalidPublicKey(err) } } -impl From for PlainTextError { - fn from(err: libp2p_core::multihash::Error) -> PlainTextError { - PlainTextError::InvalidPeerId(err) +impl From for Error { + fn from(err: libp2p_identity::ParseError) -> Error { + Error::InvalidPeerId(err) } } diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index b1e322459af..ddd5f7f8a9b 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -18,114 +18,56 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::error::PlainTextError; +use crate::error::{DecodeError, Error}; use crate::proto::Exchange; -use crate::PlainText2Config; - +use crate::Config; use asynchronous_codec::{Framed, FramedParts}; -use bytes::{Bytes, BytesMut}; +use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use log::{debug, trace}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; -use unsigned_varint::codec::UviBytes; - -struct HandshakeContext { - config: PlainText2Config, - state: T, -} - -// HandshakeContext<()> --with_local-> HandshakeContext -struct Local { - // Our local exchange's raw bytes: - exchange_bytes: Vec, -} - -// HandshakeContext --with_remote-> HandshakeContext -pub(crate) struct Remote { - // The remote's peer ID: - pub(crate) peer_id: PeerId, // The remote's public key: - pub(crate) public_key: PublicKey, -} - -impl HandshakeContext { - fn new(config: PlainText2Config) -> Self { - let exchange = Exchange { - id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.encode_protobuf()), - }; - let mut buf = Vec::with_capacity(exchange.get_size()); - let mut writer = Writer::new(&mut buf); - exchange - .write_message(&mut writer) - .expect("Encoding to succeed"); - - Self { - config, - state: Local { - exchange_bytes: buf, - }, - } - } - - fn with_remote( - self, - exchange_bytes: BytesMut, - ) -> Result, PlainTextError> { - let mut reader = BytesReader::from_bytes(&exchange_bytes); - let prop = Exchange::from_reader(&mut reader, &exchange_bytes)?; - let public_key = PublicKey::try_decode_protobuf(&prop.pubkey.unwrap_or_default())?; - let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; - - // Check the validity of the remote's `Exchange`. - if peer_id != public_key.to_peer_id() { - return Err(PlainTextError::PeerIdMismatch); - } - - Ok(HandshakeContext { - config: self.config, - state: Remote { - peer_id, - public_key, - }, - }) - } -} - -pub(crate) async fn handshake( - socket: S, - config: PlainText2Config, -) -> Result<(S, Remote, Bytes), PlainTextError> +pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where S: AsyncRead + AsyncWrite + Send + Unpin, { // The handshake messages all start with a variable-length integer indicating the size. - let mut framed_socket = Framed::new(socket, UviBytes::default()); + let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); - trace!("starting handshake"); - let context = HandshakeContext::new(config); - - trace!("sending exchange to remote"); + tracing::trace!("sending exchange to remote"); framed_socket - .send(BytesMut::from(&context.state.exchange_bytes[..])) - .await?; - - trace!("receiving the remote's exchange"); - let context = match framed_socket.next().await { - Some(p) => context.with_remote(p?)?, + .send(Exchange { + id: Some(config.local_public_key.to_peer_id().to_bytes()), + pubkey: Some(config.local_public_key.encode_protobuf()), + }) + .await + .map_err(DecodeError)?; + + tracing::trace!("receiving the remote's exchange"); + let public_key = match framed_socket + .next() + .await + .transpose() + .map_err(DecodeError)? + { + Some(remote) => { + let public_key = PublicKey::try_decode_protobuf(&remote.pubkey.unwrap_or_default())?; + let peer_id = PeerId::from_bytes(&remote.id.unwrap_or_default())?; + + if peer_id != public_key.to_peer_id() { + return Err(Error::PeerIdMismatch); + } + + public_key + } None => { - debug!("unexpected eof while waiting for remote's exchange"); + tracing::debug!("unexpected eof while waiting for remote's exchange"); let err = IoError::new(IoErrorKind::BrokenPipe, "unexpected eof"); return Err(err.into()); } }; - trace!( - "received exchange from remote; pubkey = {:?}", - context.state.public_key - ); + tracing::trace!(?public_key, "received exchange from remote"); let FramedParts { io, @@ -134,5 +76,5 @@ where .. } = framed_socket.into_parts(); assert!(write_buffer.is_empty()); - Ok((io, context.state, read_buffer.freeze())) + Ok((io, public_key, read_buffer.freeze())) } diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index 64aea0b82a6..4a322d63fab 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -22,23 +22,21 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::error::PlainTextError; +use crate::error::Error; use bytes::Bytes; use futures::future::BoxFuture; -use futures::future::{self, Ready}; use futures::prelude::*; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; -use log::debug; use std::{ io, iter, pin::Pin, task::{Context, Poll}, }; -use void::Void; mod error; mod handshake; @@ -48,72 +46,21 @@ mod proto { pub(crate) use self::structs::Exchange; } -/// `PlainText1Config` is an insecure connection handshake for testing purposes only. -/// -/// > **Note**: Given that `PlainText1Config` has no notion of exchanging peer identity information it is not compatible -/// > with the `libp2p_core::transport::upgrade::Builder` pattern. See -/// > [`PlainText2Config`](struct.PlainText2Config.html) if compatibility is needed. Even though not compatible with the -/// > Builder pattern one can still do an upgrade *manually*: -/// -/// ``` -/// # use libp2p_core::transport::{ Transport, memory::MemoryTransport }; -/// # use libp2p_plaintext::PlainText1Config; -/// # -/// MemoryTransport::default() -/// .and_then(move |io, endpoint| { -/// libp2p_core::upgrade::apply( -/// io, -/// PlainText1Config{}, -/// endpoint, -/// libp2p_core::transport::upgrade::Version::V1, -/// ) -/// }) -/// .map(|plaintext, _endpoint| { -/// unimplemented!(); -/// // let peer_id = somehow_derive_peer_id(); -/// // return (peer_id, plaintext); -/// }); -/// ``` -#[derive(Debug, Copy, Clone)] -pub struct PlainText1Config; - -impl UpgradeInfo for PlainText1Config { - type Info = &'static str; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once("/plaintext/1.0.0") - } -} - -impl InboundUpgrade for PlainText1Config { - type Output = C; - type Error = Void; - type Future = Ready>; - - fn upgrade_inbound(self, i: C, _: Self::Info) -> Self::Future { - future::ready(Ok(i)) - } +/// [`Config`] is an insecure connection handshake for testing purposes only. +#[derive(Clone)] +pub struct Config { + local_public_key: identity::PublicKey, } -impl OutboundUpgrade for PlainText1Config { - type Output = C; - type Error = Void; - type Future = Ready>; - - fn upgrade_outbound(self, i: C, _: Self::Info) -> Self::Future { - future::ready(Ok(i)) +impl Config { + pub fn new(identity: &identity::Keypair) -> Self { + Self { + local_public_key: identity.public(), + } } } -/// `PlainText2Config` is an insecure connection handshake for testing purposes only, implementing -/// the libp2p plaintext connection handshake specification. -#[derive(Clone)] -pub struct PlainText2Config { - pub local_public_key: identity::PublicKey, -} - -impl UpgradeInfo for PlainText2Config { +impl UpgradeInfo for Config { type Info = &'static str; type InfoIter = iter::Once; @@ -122,12 +69,12 @@ impl UpgradeInfo for PlainText2Config { } } -impl InboundUpgrade for PlainText2Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = (PeerId, PlainTextOutput); - type Error = PlainTextError; + type Output = (PeerId, Output); + type Error = Error; type Future = BoxFuture<'static, Result>; fn upgrade_inbound(self, socket: C, _: Self::Info) -> Self::Future { @@ -135,12 +82,12 @@ where } } -impl OutboundUpgrade for PlainText2Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - type Output = (PeerId, PlainTextOutput); - type Error = PlainTextError; + type Output = (PeerId, Output); + type Error = Error; type Future = BoxFuture<'static, Result>; fn upgrade_outbound(self, socket: C, _: Self::Info) -> Self::Future { @@ -148,20 +95,20 @@ where } } -impl PlainText2Config { - async fn handshake(self, socket: T) -> Result<(PeerId, PlainTextOutput), PlainTextError> +impl Config { + async fn handshake(self, socket: T) -> Result<(PeerId, Output), Error> where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - debug!("Starting plaintext handshake."); - let (socket, remote, read_buffer) = handshake::handshake(socket, self).await?; - debug!("Finished plaintext handshake."); + tracing::debug!("Starting plaintext handshake."); + let (socket, remote_key, read_buffer) = handshake::handshake(socket, self).await?; + tracing::debug!("Finished plaintext handshake."); Ok(( - remote.peer_id, - PlainTextOutput { + remote_key.to_peer_id(), + Output { socket, - remote_key: remote.public_key, + remote_key, read_buffer, }, )) @@ -169,7 +116,7 @@ impl PlainText2Config { } /// Output of the plaintext protocol. -pub struct PlainTextOutput +pub struct Output where S: AsyncRead + AsyncWrite + Unpin, { @@ -183,7 +130,7 @@ where read_buffer: Bytes, } -impl AsyncRead for PlainTextOutput { +impl AsyncRead for Output { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -199,7 +146,7 @@ impl AsyncRead for PlainTextOutput { } } -impl AsyncWrite for PlainTextOutput { +impl AsyncWrite for Output { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index 7147ed56686..f77f23d3ad3 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -19,25 +19,24 @@ // DEALINGS IN THE SOFTWARE. use futures::io::{AsyncReadExt, AsyncWriteExt}; -use libp2p_core::InboundUpgrade; +use libp2p_core::upgrade::InboundConnectionUpgrade; use libp2p_identity as identity; -use libp2p_plaintext::PlainText2Config; -use log::debug; +use libp2p_plaintext as plaintext; use quickcheck::QuickCheck; +use tracing_subscriber::EnvFilter; #[test] fn variable_msg_length() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(msg: Vec) { let msg_to_send = msg.clone(); let msg_to_receive = msg; let server_id = identity::Keypair::generate_ed25519(); - let server_id_public = server_id.public(); - let client_id = identity::Keypair::generate_ed25519(); - let client_id_public = client_id.public(); let (server, client) = futures_ringbuf::Endpoint::pair(100, 100); @@ -46,14 +45,8 @@ fn variable_msg_length() { (received_client_id, mut server_channel), (received_server_id, mut client_channel), ) = futures::future::try_join( - PlainText2Config { - local_public_key: server_id_public, - } - .upgrade_inbound(server, ""), - PlainText2Config { - local_public_key: client_id_public, - } - .upgrade_inbound(client, ""), + plaintext::Config::new(&server_id).upgrade_inbound(server, ""), + plaintext::Config::new(&client_id).upgrade_inbound(client, ""), ) .await .unwrap(); @@ -62,18 +55,18 @@ fn variable_msg_length() { assert_eq!(received_client_id, client_id.public().to_peer_id()); let client_fut = async { - debug!("Client: writing message."); + tracing::debug!("Client: writing message."); client_channel .write_all(&msg_to_send) .await .expect("no error"); - debug!("Client: flushing channel."); + tracing::debug!("Client: flushing channel."); client_channel.flush().await.expect("no error"); }; let server_fut = async { let mut server_buffer = vec![0; msg_to_receive.len()]; - debug!("Server: reading message."); + tracing::debug!("Server: reading message."); server_channel .read_exact(&mut server_buffer) .await diff --git a/transports/pnet/CHANGELOG.md b/transports/pnet/CHANGELOG.md index 9a4324f8371..1fbc2d08807 100644 --- a/transports/pnet/CHANGELOG.md +++ b/transports/pnet/CHANGELOG.md @@ -1,4 +1,15 @@ -## 0.23.0 - unreleased +## 0.24.0 + + +## 0.23.1 + + + +## 0.23.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index 3c834079e93..77dda92321f 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-pnet" edition = "2021" rust-version = { workspace = true } description = "Private swarm support for libp2p" -version = "0.23.0" +version = "0.24.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,23 +11,23 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures = "0.3.28" -log = "0.4.8" +futures = "0.3.30" salsa20 = "0.10" sha3 = "0.10" +tracing = "0.1.37" rand = "0.8" -pin-project = "1.0.2" +pin-project = "1.1.3" [dev-dependencies] -libp2p-core = { workspace = true, features = ["rsa", "ecdsa", "secp256k1"] } -libp2p-identity = { workspace = true, features = ["ed25519"] } +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "ecdsa","secp256k1", "rand"] } libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-websocket = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -tokio = { version = "1.28.0", features = ["full"] } +tokio = { version = "1.35.1", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -35,3 +35,6 @@ tokio = { version = "1.28.0", features = ["full"] } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index c5993548239..06f932fbe71 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -23,7 +23,6 @@ use futures::{ ready, task::{Context, Poll}, }; -use log::trace; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; use std::{fmt, pin::Pin}; @@ -120,7 +119,7 @@ impl AsyncWrite for CryptWriter { let res = Pin::new(&mut *this.buf).poll_write(cx, buf); if let Poll::Ready(Ok(count)) = res { this.cipher.apply_keystream(&mut this.buf[0..count]); - trace!("encrypted {} bytes", count); + tracing::trace!(bytes=%count, "encrypted bytes"); } else { debug_assert!(false); }; diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index 15f42556c62..083ffff36a3 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -29,7 +29,6 @@ mod crypt_writer; use crypt_writer::CryptWriter; use futures::prelude::*; -use log::trace; use pin_project::pin_project; use rand::RngCore; use salsa20::{ @@ -159,6 +158,7 @@ impl fmt::Display for Fingerprint { /// Error when parsing a PreSharedKey #[derive(Clone, Debug, PartialEq, Eq)] +#[allow(clippy::enum_variant_names)] // Maybe fix at some stage, not important now. pub enum KeyParseError { /// file does not have the expected structure InvalidKeyFile, @@ -209,7 +209,7 @@ impl PnetConfig { where TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - trace!("exchanging nonces"); + tracing::trace!("exchanging nonces"); let mut local_nonce = [0u8; NONCE_SIZE]; let mut remote_nonce = [0u8; NONCE_SIZE]; rand::thread_rng().fill_bytes(&mut local_nonce); @@ -222,7 +222,7 @@ impl PnetConfig { .read_exact(&mut remote_nonce) .await .map_err(PnetError::HandshakeError)?; - trace!("setting up ciphers"); + tracing::trace!("setting up ciphers"); let write_cipher = XSalsa20::new(&self.key.0.into(), &local_nonce.into()); let read_cipher = XSalsa20::new(&self.key.0.into(), &remote_nonce.into()); Ok(PnetOutput::new(socket, write_cipher, read_cipher)) @@ -256,9 +256,9 @@ impl AsyncRead for PnetOutput { let this = self.project(); let result = this.inner.get_pin_mut().poll_read(cx, buf); if let Poll::Ready(Ok(size)) = &result { - trace!("read {} bytes", size); + tracing::trace!(bytes=%size, "read bytes"); this.read_cipher.apply_keystream(&mut buf[..*size]); - trace!("decrypted {} bytes", size); + tracing::trace!(bytes=%size, "decrypted bytes"); } result } diff --git a/transports/pnet/tests/smoke.rs b/transports/pnet/tests/smoke.rs index a7635c00ca3..79ffaeab447 100644 --- a/transports/pnet/tests/smoke.rs +++ b/transports/pnet/tests/smoke.rs @@ -6,7 +6,7 @@ use libp2p_core::upgrade::Version; use libp2p_core::Transport; use libp2p_core::{multiaddr::Protocol, Multiaddr}; use libp2p_pnet::{PnetConfig, PreSharedKey}; -use libp2p_swarm::{keep_alive, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, Config, NetworkBehaviour, Swarm, SwarmEvent}; const TIMEOUT: Duration = Duration::from_secs(5); @@ -98,7 +98,7 @@ where assert_eq!(&outbound_peer_id, swarm1.local_peer_id()); } -fn make_swarm(transport: T, pnet: PnetConfig) -> Swarm +fn make_swarm(transport: T, pnet: PnetConfig) -> Swarm where T: Transport + Send + Unpin + 'static, ::Error: Send + Sync + 'static, @@ -113,12 +113,12 @@ where .authenticate(libp2p_noise::Config::new(&identity).unwrap()) .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::with_tokio_executor( + Swarm::new( transport, - keep_alive::Behaviour, + dummy::Behaviour, identity.public().to_peer_id(), + Config::with_tokio_executor(), ) - .build() } async fn listen_on(swarm: &mut Swarm, addr: Multiaddr) -> Multiaddr { diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index db50b7b8a31..3c34a1989f9 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,9 +1,65 @@ -## 0.8.0-alpha - unreleased +## 0.10.2 + +- Change `max_idle_timeout`to 10s. + See [PR 4965](https://github.com/libp2p/rust-libp2p/pull/4965). + +## 0.10.1 + +- Allow disabling path MTU discovery. + See [PR 4823](https://github.com/libp2p/rust-libp2p/pull/4823). + +## 0.10.0 + +- Improve hole-punch timing. + This should improve success rates for hole-punching QUIC connections. + See [PR 4549](https://github.com/libp2p/rust-libp2p/pull/4549). +- Remove deprecated `Error::EndpointDriverCrashed` variant. + See [PR 4738](https://github.com/libp2p/rust-libp2p/pull/4738). + +## 0.9.3 + +- No longer report error when explicit closing of a QUIC endpoint succeeds. + See [PR 4621]. + +- Support QUIC stateless resets for supported `libp2p_identity::Keypair`s. See [PR 4554]. + +[PR 4621]: https://github.com/libp2p/rust-libp2p/pull/4621 +[PR 4554]: https://github.com/libp2p/rust-libp2p/pull/4554 + +## 0.9.2 + +- Cut stable release. + +## 0.9.2-alpha + +- Add support for reusing an existing socket when dialing localhost address. + See [PR 4304]. + +[PR 4304]: https://github.com/libp2p/rust-libp2p/pull/4304 + +## 0.9.1-alpha + +- Allow listening on ipv4 and ipv6 separately. + See [PR 4289]. + +[PR 4289]: https://github.com/libp2p/rust-libp2p/pull/4289 + +## 0.9.0-alpha + +- Use `quinn` instead of `quinn-proto`. + See [PR 3454]. + +[PR 3454]: https://github.com/libp2p/rust-libp2p/pull/3454 + +## 0.8.0-alpha - Raise MSRV to 1.65. See [PR 3715]. +- Add hole punching support by implementing `Transport::dial_as_listener`. See [PR 3964]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3964]: https://github.com/libp2p/rust-libp2p/pull/3964 ## 0.7.0-alpha.3 diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 1b491ce0c37..c959ab09f0f 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-quic" -version = "0.8.0-alpha" +version = "0.10.2" authors = ["Parity Technologies "] edition = "2021" rust-version = { workspace = true } @@ -10,24 +10,26 @@ license = "MIT" [dependencies] async-std = { version = "1.12.0", optional = true } -bytes = "1.4.0" -futures = "0.3.28" +bytes = "1.5.0" +futures = "0.3.30" futures-timer = "3.0.2" -if-watch = "3.0.1" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" parking_lot = "0.12.0" -quinn-proto = { version = "0.9.3", default-features = false, features = ["tls-rustls"] } +quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" -rustls = { version = "0.20.2", default-features = false } -thiserror = "1.0.40" -tokio = { version = "1.28.0", default-features = false, features = ["net", "rt"], optional = true } +rustls = { version = "0.21.9", default-features = false } +thiserror = "1.0.51" +tokio = { version = "1.35.1", default-features = false, features = ["net", "rt", "time"], optional = true } +tracing = "0.1.37" +socket2 = "0.5.5" +ring = "0.16.20" [features] -tokio = ["dep:tokio", "if-watch/tokio"] -async-std = ["dep:async-std", "if-watch/smol"] +tokio = ["dep:tokio", "if-watch/tokio", "quinn/runtime-tokio"] +async-std = ["dep:async-std", "if-watch/smol", "quinn/runtime-async-std"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -38,14 +40,18 @@ rustc-args = ["--cfg", "docsrs"] [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" -libp2p-muxer-test-harness = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-muxer-test-harness = { path = "../../muxers/test-harness" } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" -tokio = { version = "1.28.0", features = ["macros", "rt-multi-thread", "time"] } +tokio = { version = "1.35.1", features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "stream_compliance" required-features = ["async-std"] + +[lints] +workspace = true diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs new file mode 100644 index 00000000000..540f13e726b --- /dev/null +++ b/transports/quic/src/config.rs @@ -0,0 +1,165 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use quinn::{MtuDiscoveryConfig, VarInt}; +use std::{sync::Arc, time::Duration}; + +/// Config for the transport. +#[derive(Clone)] +pub struct Config { + /// Timeout for the initial handshake when establishing a connection. + /// The actual timeout is the minimum of this and the [`Config::max_idle_timeout`]. + pub handshake_timeout: Duration, + /// Maximum duration of inactivity in ms to accept before timing out the connection. + pub max_idle_timeout: u32, + /// Period of inactivity before sending a keep-alive packet. + /// Must be set lower than the idle_timeout of both + /// peers to be effective. + /// + /// See [`quinn::TransportConfig::keep_alive_interval`] for more + /// info. + pub keep_alive_interval: Duration, + /// Maximum number of incoming bidirectional streams that may be open + /// concurrently by the remote peer. + pub max_concurrent_stream_limit: u32, + + /// Max unacknowledged data in bytes that may be send on a single stream. + pub max_stream_data: u32, + + /// Max unacknowledged data in bytes that may be send in total on all streams + /// of a connection. + pub max_connection_data: u32, + + /// Support QUIC version draft-29 for dialing and listening. + /// + /// Per default only QUIC Version 1 / [`libp2p_core::multiaddr::Protocol::QuicV1`] + /// is supported. + /// + /// If support for draft-29 is enabled servers support draft-29 and version 1 on all + /// QUIC listening addresses. + /// As client the version is chosen based on the remote's address. + pub support_draft_29: bool, + + /// TLS client config for the inner [`quinn::ClientConfig`]. + client_tls_config: Arc, + /// TLS server config for the inner [`quinn::ServerConfig`]. + server_tls_config: Arc, + /// Libp2p identity of the node. + keypair: libp2p_identity::Keypair, + + /// Parameters governing MTU discovery. See [`MtuDiscoveryConfig`] for details. + mtu_discovery_config: Option, +} + +impl Config { + /// Creates a new configuration object with default values. + pub fn new(keypair: &libp2p_identity::Keypair) -> Self { + let client_tls_config = Arc::new(libp2p_tls::make_client_config(keypair, None).unwrap()); + let server_tls_config = Arc::new(libp2p_tls::make_server_config(keypair).unwrap()); + Self { + client_tls_config, + server_tls_config, + support_draft_29: false, + handshake_timeout: Duration::from_secs(5), + max_idle_timeout: 10 * 1000, + max_concurrent_stream_limit: 256, + keep_alive_interval: Duration::from_secs(5), + max_connection_data: 15_000_000, + + // Ensure that one stream is not consuming the whole connection. + max_stream_data: 10_000_000, + keypair: keypair.clone(), + mtu_discovery_config: Some(Default::default()), + } + } + + /// Disable MTU path discovery (it is enabled by default). + pub fn disable_path_mtu_discovery(mut self) -> Self { + self.mtu_discovery_config = None; + self + } +} + +/// Represents the inner configuration for [`quinn`]. +#[derive(Debug, Clone)] +pub(crate) struct QuinnConfig { + pub(crate) client_config: quinn::ClientConfig, + pub(crate) server_config: quinn::ServerConfig, + pub(crate) endpoint_config: quinn::EndpointConfig, +} + +impl From for QuinnConfig { + fn from(config: Config) -> QuinnConfig { + let Config { + client_tls_config, + server_tls_config, + max_idle_timeout, + max_concurrent_stream_limit, + keep_alive_interval, + max_connection_data, + max_stream_data, + support_draft_29, + handshake_timeout: _, + keypair, + mtu_discovery_config, + } = config; + let mut transport = quinn::TransportConfig::default(); + // Disable uni-directional streams. + transport.max_concurrent_uni_streams(0u32.into()); + transport.max_concurrent_bidi_streams(max_concurrent_stream_limit.into()); + // Disable datagrams. + transport.datagram_receive_buffer_size(None); + transport.keep_alive_interval(Some(keep_alive_interval)); + transport.max_idle_timeout(Some(VarInt::from_u32(max_idle_timeout).into())); + transport.allow_spin(false); + transport.stream_receive_window(max_stream_data.into()); + transport.receive_window(max_connection_data.into()); + transport.mtu_discovery_config(mtu_discovery_config); + let transport = Arc::new(transport); + + let mut server_config = quinn::ServerConfig::with_crypto(server_tls_config); + server_config.transport = Arc::clone(&transport); + // Disables connection migration. + // Long-term this should be enabled, however we then need to handle address change + // on connections in the `Connection`. + server_config.migration(false); + + let mut client_config = quinn::ClientConfig::new(client_tls_config); + client_config.transport_config(transport); + + let mut endpoint_config = keypair + .derive_secret(b"libp2p quic stateless reset key") + .map(|secret| { + let reset_key = Arc::new(ring::hmac::Key::new(ring::hmac::HMAC_SHA256, &secret)); + quinn::EndpointConfig::new(reset_key) + }) + .unwrap_or_default(); + + if !support_draft_29 { + endpoint_config.supported_versions(vec![1]); + } + + QuinnConfig { + client_config, + server_config, + endpoint_config, + } + } +} diff --git a/transports/quic/src/connection.rs b/transports/quic/src/connection.rs index 0e5727dcf21..783258a0130 100644 --- a/transports/quic/src/connection.rs +++ b/transports/quic/src/connection.rs @@ -19,409 +19,113 @@ // DEALINGS IN THE SOFTWARE. mod connecting; -mod substream; +mod stream; -use crate::{ - endpoint::{self, ToEndpoint}, - Error, -}; pub use connecting::Connecting; -pub use substream::Substream; -use substream::{SubstreamState, WriteState}; +pub use stream::Stream; + +use crate::{ConnectionError, Error}; -use futures::{channel::mpsc, ready, FutureExt, StreamExt}; -use futures_timer::Delay; +use futures::{future::BoxFuture, FutureExt}; use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; -use parking_lot::Mutex; use std::{ - any::Any, - collections::HashMap, - net::SocketAddr, pin::Pin, - sync::Arc, - task::{Context, Poll, Waker}, - time::Instant, + task::{Context, Poll}, }; /// State for a single opened QUIC connection. -#[derive(Debug)] pub struct Connection { - /// State shared with the substreams. - state: Arc>, - /// Channel to the [`endpoint::Driver`] that drives the [`quinn_proto::Endpoint`] that - /// this connection belongs to. - endpoint_channel: endpoint::Channel, - /// Pending message to be sent to the [`quinn_proto::Endpoint`] in the [`endpoint::Driver`]. - pending_to_endpoint: Option, - /// Events that the [`quinn_proto::Endpoint`] will send in destination to our local - /// [`quinn_proto::Connection`]. - from_endpoint: mpsc::Receiver, - /// Identifier for this connection according to the [`quinn_proto::Endpoint`]. - /// Used when sending messages to the endpoint. - connection_id: quinn_proto::ConnectionHandle, - /// `Future` that triggers at the [`Instant`] that [`quinn_proto::Connection::poll_timeout`] - /// indicates. - next_timeout: Option<(Delay, Instant)>, + /// Underlying connection. + connection: quinn::Connection, + /// Future for accepting a new incoming bidirectional stream. + incoming: Option< + BoxFuture<'static, Result<(quinn::SendStream, quinn::RecvStream), quinn::ConnectionError>>, + >, + /// Future for opening a new outgoing bidirectional stream. + outgoing: Option< + BoxFuture<'static, Result<(quinn::SendStream, quinn::RecvStream), quinn::ConnectionError>>, + >, + /// Future to wait for the connection to be closed. + closing: Option>, } impl Connection { /// Build a [`Connection`] from raw components. /// - /// This function assumes that there exists a [`Driver`](super::endpoint::Driver) - /// that will process the messages sent to `EndpointChannel::to_endpoint` and send us messages - /// on `from_endpoint`. - /// - /// `connection_id` is used to identify the local connection in the messages sent to - /// `to_endpoint`. - /// - /// This function assumes that the [`quinn_proto::Connection`] is completely fresh and none of + /// This function assumes that the [`quinn::Connection`] is completely fresh and none of /// its methods has ever been called. Failure to comply might lead to logic errors and panics. - pub(crate) fn from_quinn_connection( - endpoint_channel: endpoint::Channel, - connection: quinn_proto::Connection, - connection_id: quinn_proto::ConnectionHandle, - from_endpoint: mpsc::Receiver, - ) -> Self { - let state = State { - connection, - substreams: HashMap::new(), - poll_connection_waker: None, - poll_inbound_waker: None, - poll_outbound_waker: None, - }; + fn new(connection: quinn::Connection) -> Self { Self { - endpoint_channel, - pending_to_endpoint: None, - next_timeout: None, - from_endpoint, - connection_id, - state: Arc::new(Mutex::new(state)), - } - } - - /// The address that the local socket is bound to. - pub(crate) fn local_addr(&self) -> &SocketAddr { - self.endpoint_channel.socket_addr() - } - - /// Returns the address of the node we're connected to. - pub(crate) fn remote_addr(&self) -> SocketAddr { - self.state.lock().connection.remote_address() - } - - /// Identity of the remote peer inferred from the handshake. - /// - /// `None` if the handshake is not complete yet, i.e. [`Self::poll_event`] - /// has not yet reported a [`quinn_proto::Event::Connected`] - fn peer_identity(&self) -> Option> { - self.state - .lock() - .connection - .crypto_session() - .peer_identity() - } - - /// Polls the connection for an event that happened on it. - /// - /// `quinn::proto::Connection` is polled in the order instructed in their docs: - /// 1. [`quinn_proto::Connection::poll_transmit`] - /// 2. [`quinn_proto::Connection::poll_timeout`] - /// 3. [`quinn_proto::Connection::poll_endpoint_events`] - /// 4. [`quinn_proto::Connection::poll`] - fn poll_event(&mut self, cx: &mut Context<'_>) -> Poll> { - let mut inner = self.state.lock(); - loop { - // Sending the pending event to the endpoint. If the endpoint is too busy, we just - // stop the processing here. - // We don't deliver substream-related events to the user as long as - // `to_endpoint` is full. This should propagate the back-pressure of `to_endpoint` - // being full to the user. - if let Some(to_endpoint) = self.pending_to_endpoint.take() { - match self.endpoint_channel.try_send(to_endpoint, cx) { - Ok(Ok(())) => {} - Ok(Err(to_endpoint)) => { - self.pending_to_endpoint = Some(to_endpoint); - return Poll::Pending; - } - Err(endpoint::Disconnected {}) => { - return Poll::Ready(None); - } - } - } - - match self.from_endpoint.poll_next_unpin(cx) { - Poll::Ready(Some(event)) => { - inner.connection.handle_event(event); - continue; - } - Poll::Ready(None) => { - return Poll::Ready(None); - } - Poll::Pending => {} - } - - // The maximum amount of segments which can be transmitted in a single Transmit - // if a platform supports Generic Send Offload (GSO). - // Set to 1 for now since not all platforms support GSO. - // TODO: Fix for platforms that support GSO. - let max_datagrams = 1; - // Poll the connection for packets to send on the UDP socket and try to send them on - // `to_endpoint`. - if let Some(transmit) = inner - .connection - .poll_transmit(Instant::now(), max_datagrams) - { - // TODO: ECN bits not handled - self.pending_to_endpoint = Some(ToEndpoint::SendUdpPacket(transmit)); - continue; - } - - match inner.connection.poll_timeout() { - Some(timeout) => match self.next_timeout { - Some((_, when)) if when == timeout => {} - _ => { - let now = Instant::now(); - // 0ns if now > when - let duration = timeout.duration_since(now); - let next_timeout = Delay::new(duration); - self.next_timeout = Some((next_timeout, timeout)) - } - }, - None => self.next_timeout = None, - } - - if let Some((timeout, when)) = self.next_timeout.as_mut() { - if timeout.poll_unpin(cx).is_ready() { - inner.connection.handle_timeout(*when); - continue; - } - } - - // The connection also needs to be able to send control messages to the endpoint. This is - // handled here, and we try to send them on `to_endpoint` as well. - if let Some(event) = inner.connection.poll_endpoint_events() { - let connection_id = self.connection_id; - self.pending_to_endpoint = Some(ToEndpoint::ProcessConnectionEvent { - connection_id, - event, - }); - continue; - } - - // The final step consists in returning the events related to the various substreams. - if let Some(ev) = inner.connection.poll() { - return Poll::Ready(Some(ev)); - } - - return Poll::Pending; + connection, + incoming: None, + outgoing: None, + closing: None, } } } impl StreamMuxer for Connection { - type Substream = Substream; + type Substream = Stream; type Error = Error; - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - while let Poll::Ready(event) = self.poll_event(cx) { - let mut inner = self.state.lock(); - let event = match event { - Some(event) => event, - None => return Poll::Ready(Err(Error::EndpointDriverCrashed)), - }; - match event { - quinn_proto::Event::Connected | quinn_proto::Event::HandshakeDataReady => { - debug_assert!( - false, - "Unexpected event {event:?} on established QUIC connection" - ); - } - quinn_proto::Event::ConnectionLost { reason } => { - inner - .connection - .close(Instant::now(), From::from(0u32), Default::default()); - inner.substreams.values_mut().for_each(|s| s.wake_all()); - return Poll::Ready(Err(Error::Connection(reason.into()))); - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { - dir: quinn_proto::Dir::Bi, - }) => { - if let Some(waker) = inner.poll_outbound_waker.take() { - waker.wake(); - } - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { - dir: quinn_proto::Dir::Bi, - }) => { - if let Some(waker) = inner.poll_inbound_waker.take() { - waker.wake(); - } - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Readable { id }) => { - if let Some(substream) = inner.substreams.get_mut(&id) { - if let Some(waker) = substream.read_waker.take() { - waker.wake(); - } - } - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Writable { id }) => { - if let Some(substream) = inner.substreams.get_mut(&id) { - if let Some(waker) = substream.write_waker.take() { - waker.wake(); - } - } - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Finished { id }) => { - if let Some(substream) = inner.substreams.get_mut(&id) { - if matches!( - substream.write_state, - WriteState::Open | WriteState::Closing - ) { - substream.write_state = WriteState::Closed; - } - if let Some(waker) = substream.write_waker.take() { - waker.wake(); - } - if let Some(waker) = substream.close_waker.take() { - waker.wake(); - } - } - } - quinn_proto::Event::Stream(quinn_proto::StreamEvent::Stopped { - id, - error_code: _, - }) => { - if let Some(substream) = inner.substreams.get_mut(&id) { - substream.write_state = WriteState::Stopped; - if let Some(waker) = substream.write_waker.take() { - waker.wake(); - } - if let Some(waker) = substream.close_waker.take() { - waker.wake(); - } - } - } - quinn_proto::Event::DatagramReceived - | quinn_proto::Event::Stream(quinn_proto::StreamEvent::Available { - dir: quinn_proto::Dir::Uni, - }) - | quinn_proto::Event::Stream(quinn_proto::StreamEvent::Opened { - dir: quinn_proto::Dir::Uni, - }) => { - unreachable!("We don't use datagrams or unidirectional streams.") - } - } - } - // TODO: If connection migration is enabled (currently disabled) address - // change on the connection needs to be handled. - - self.state.lock().poll_connection_waker = Some(cx.waker().clone()); - Poll::Pending - } - fn poll_inbound( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - let mut inner = self.state.lock(); + let this = self.get_mut(); - let substream_id = match inner.connection.streams().accept(quinn_proto::Dir::Bi) { - Some(id) => { - inner.poll_inbound_waker = None; - id - } - None => { - inner.poll_inbound_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - }; - inner.substreams.insert(substream_id, Default::default()); - let substream = Substream::new(substream_id, self.state.clone()); + let incoming = this.incoming.get_or_insert_with(|| { + let connection = this.connection.clone(); + async move { connection.accept_bi().await }.boxed() + }); - Poll::Ready(Ok(substream)) + let (send, recv) = futures::ready!(incoming.poll_unpin(cx)).map_err(ConnectionError)?; + this.incoming.take(); + let stream = Stream::new(send, recv); + Poll::Ready(Ok(stream)) } fn poll_outbound( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - let mut inner = self.state.lock(); - let substream_id = match inner.connection.streams().open(quinn_proto::Dir::Bi) { - Some(id) => { - inner.poll_outbound_waker = None; - id - } - None => { - inner.poll_outbound_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - }; - inner.substreams.insert(substream_id, Default::default()); - let substream = Substream::new(substream_id, self.state.clone()); - Poll::Ready(Ok(substream)) - } + let this = self.get_mut(); - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = self.state.lock(); - if inner.connection.is_drained() { - return Poll::Ready(Ok(())); - } + let outgoing = this.outgoing.get_or_insert_with(|| { + let connection = this.connection.clone(); + async move { connection.open_bi().await }.boxed() + }); - for substream in inner.substreams.keys().cloned().collect::>() { - let _ = inner.connection.send_stream(substream).finish(); - } - - if inner.connection.streams().send_streams() == 0 && !inner.connection.is_closed() { - inner - .connection - .close(Instant::now(), From::from(0u32), Default::default()) - } - drop(inner); - - loop { - match ready!(self.poll_event(cx)) { - Some(quinn_proto::Event::ConnectionLost { .. }) => return Poll::Ready(Ok(())), - None => return Poll::Ready(Err(Error::EndpointDriverCrashed)), - _ => {} - } - } + let (send, recv) = futures::ready!(outgoing.poll_unpin(cx)).map_err(ConnectionError)?; + this.outgoing.take(); + let stream = Stream::new(send, recv); + Poll::Ready(Ok(stream)) } -} -impl Drop for Connection { - fn drop(&mut self) { - let to_endpoint = ToEndpoint::ProcessConnectionEvent { - connection_id: self.connection_id, - event: quinn_proto::EndpointEvent::drained(), - }; - self.endpoint_channel.send_on_drop(to_endpoint); + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + // TODO: If connection migration is enabled (currently disabled) address + // change on the connection needs to be handled. + Poll::Pending } -} -/// Mutex-protected state of [`Connection`]. -#[derive(Debug)] -pub struct State { - /// The QUIC inner state machine for this specific connection. - connection: quinn_proto::Connection, + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); - /// State of all the substreams that the muxer reports as open. - pub substreams: HashMap, + let closing = this.closing.get_or_insert_with(|| { + this.connection.close(From::from(0u32), &[]); + let connection = this.connection.clone(); + async move { connection.closed().await }.boxed() + }); - /// Waker to wake if a new outbound substream is opened. - pub poll_outbound_waker: Option, - /// Waker to wake if a new inbound substream was happened. - pub poll_inbound_waker: Option, - /// Waker to wake if the connection should be polled again. - pub poll_connection_waker: Option, -} + match futures::ready!(closing.poll_unpin(cx)) { + // Expected error given that `connection.close` was called above. + quinn::ConnectionError::LocallyClosed => {} + error => return Poll::Ready(Err(Error::Connection(ConnectionError(error)))), + }; -impl State { - fn unchecked_substream_state(&mut self, id: quinn_proto::StreamId) -> &mut SubstreamState { - self.substreams - .get_mut(&id) - .expect("Substream should be known.") + Poll::Ready(Ok(())) } } diff --git a/transports/quic/src/connection/connecting.rs b/transports/quic/src/connection/connecting.rs index e9a7d3e5f2c..141f0b5542e 100644 --- a/transports/quic/src/connection/connecting.rs +++ b/transports/quic/src/connection/connecting.rs @@ -20,9 +20,12 @@ //! Future that drives a QUIC connection until is has performed its TLS handshake. -use crate::{Connection, Error}; +use crate::{Connection, ConnectionError, Error}; -use futures::prelude::*; +use futures::{ + future::{select, Either, FutureExt, Select}, + prelude::*, +}; use futures_timer::Delay; use libp2p_identity::PeerId; use std::{ @@ -34,64 +37,46 @@ use std::{ /// A QUIC connection currently being negotiated. #[derive(Debug)] pub struct Connecting { - connection: Option, - timeout: Delay, + connecting: Select, } impl Connecting { - pub(crate) fn new(connection: Connection, timeout: Duration) -> Self { + pub(crate) fn new(connection: quinn::Connecting, timeout: Duration) -> Self { Connecting { - connection: Some(connection), - timeout: Delay::new(timeout), + connecting: select(connection, Delay::new(timeout)), } } } +impl Connecting { + /// Returns the address of the node we're connected to. + /// Panics if the connection is still handshaking. + fn remote_peer_id(connection: &quinn::Connection) -> PeerId { + let identity = connection + .peer_identity() + .expect("connection got identity because it passed TLS handshake; qed"); + let certificates: Box> = + identity.downcast().expect("we rely on rustls feature; qed"); + let end_entity = certificates + .first() + .expect("there should be exactly one certificate; qed"); + let p2p_cert = libp2p_tls::certificate::parse(end_entity) + .expect("the certificate was validated during TLS handshake; qed"); + p2p_cert.peer_id() + } +} + impl Future for Connecting { type Output = Result<(PeerId, Connection), Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let connection = self - .connection - .as_mut() - .expect("Future polled after it has completed"); - - loop { - let event = match connection.poll_event(cx) { - Poll::Ready(Some(event)) => event, - Poll::Ready(None) => return Poll::Ready(Err(Error::EndpointDriverCrashed)), - Poll::Pending => { - return self - .timeout - .poll_unpin(cx) - .map(|()| Err(Error::HandshakeTimedOut)); - } - }; - match event { - quinn_proto::Event::Connected => { - // Parse the remote's Id identity from the certificate. - let identity = connection - .peer_identity() - .expect("connection got identity because it passed TLS handshake; qed"); - let certificates: Box> = - identity.downcast().expect("we rely on rustls feature; qed"); - let end_entity = certificates - .get(0) - .expect("there should be exactly one certificate; qed"); - let p2p_cert = libp2p_tls::certificate::parse(end_entity) - .expect("the certificate was validated during TLS handshake; qed"); - let peer_id = p2p_cert.peer_id(); + let connection = match futures::ready!(self.connecting.poll_unpin(cx)) { + Either::Right(_) => return Poll::Ready(Err(Error::HandshakeTimedOut)), + Either::Left((connection, _)) => connection.map_err(ConnectionError)?, + }; - return Poll::Ready(Ok((peer_id, self.connection.take().unwrap()))); - } - quinn_proto::Event::ConnectionLost { reason } => { - return Poll::Ready(Err(Error::Connection(reason.into()))) - } - quinn_proto::Event::HandshakeDataReady | quinn_proto::Event::Stream(_) => {} - quinn_proto::Event::DatagramReceived => { - debug_assert!(false, "Datagrams are not supported") - } - } - } + let peer_id = Self::remote_peer_id(&connection); + let muxer = Connection::new(connection); + Poll::Ready(Ok((peer_id, muxer))) } } diff --git a/transports/quic/src/connection/stream.rs b/transports/quic/src/connection/stream.rs new file mode 100644 index 00000000000..b0c505bf856 --- /dev/null +++ b/transports/quic/src/connection/stream.rs @@ -0,0 +1,86 @@ +// Copyright 2022 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + io::{self}, + pin::Pin, + task::{Context, Poll}, +}; + +use futures::{AsyncRead, AsyncWrite}; + +/// A single stream on a connection +pub struct Stream { + /// A send part of the stream + send: quinn::SendStream, + /// A receive part of the stream + recv: quinn::RecvStream, + /// Whether the stream is closed or not + close_result: Option>, +} + +impl Stream { + pub(super) fn new(send: quinn::SendStream, recv: quinn::RecvStream) -> Self { + Self { + send, + recv, + close_result: None, + } + } +} + +impl AsyncRead for Stream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + if let Some(close_result) = self.close_result { + if close_result.is_err() { + return Poll::Ready(Ok(0)); + } + } + Pin::new(&mut self.recv).poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.send).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Pin::new(&mut self.send).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + if let Some(close_result) = self.close_result { + // For some reason poll_close needs to be 'fuse'able + return Poll::Ready(close_result.map_err(Into::into)); + } + let close_result = futures::ready!(Pin::new(&mut self.send).poll_close(cx)); + self.close_result = Some(close_result.as_ref().map_err(|e| e.kind()).copied()); + Poll::Ready(close_result) + } +} diff --git a/transports/quic/src/connection/substream.rs b/transports/quic/src/connection/substream.rs deleted file mode 100644 index b3a82542e9c..00000000000 --- a/transports/quic/src/connection/substream.rs +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2022 Protocol Labs. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use std::{ - io::{self, Write}, - pin::Pin, - sync::Arc, - task::{Context, Poll, Waker}, -}; - -use futures::{AsyncRead, AsyncWrite}; -use parking_lot::Mutex; - -use super::State; - -/// Wakers for the [`AsyncRead`] and [`AsyncWrite`] on a substream. -#[derive(Debug, Default, Clone)] -pub struct SubstreamState { - /// Waker to wake if the substream becomes readable. - pub read_waker: Option, - /// Waker to wake if the substream becomes writable, closed or stopped. - pub write_waker: Option, - /// Waker to wake if the substream becomes closed or stopped. - pub close_waker: Option, - - pub write_state: WriteState, -} - -impl SubstreamState { - /// Wake all wakers for reading, writing and closed the stream. - pub fn wake_all(&mut self) { - if let Some(waker) = self.read_waker.take() { - waker.wake(); - } - if let Some(waker) = self.write_waker.take() { - waker.wake(); - } - if let Some(waker) = self.close_waker.take() { - waker.wake(); - } - } -} - -/// A single stream on a connection -#[derive(Debug)] -pub struct Substream { - /// The id of the stream. - id: quinn_proto::StreamId, - /// The state of the [`super::Connection`] this stream belongs to. - state: Arc>, -} - -impl Substream { - pub fn new(id: quinn_proto::StreamId, state: Arc>) -> Self { - Self { id, state } - } -} - -impl AsyncRead for Substream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - mut buf: &mut [u8], - ) -> Poll> { - let mut state = self.state.lock(); - - let mut stream = state.connection.recv_stream(self.id); - let mut chunks = match stream.read(true) { - Ok(chunks) => chunks, - Err(quinn_proto::ReadableError::UnknownStream) => { - return Poll::Ready(Ok(0)); - } - Err(quinn_proto::ReadableError::IllegalOrderedRead) => { - unreachable!( - "Illegal ordered read can only happen if `stream.read(false)` is used." - ); - } - }; - - let mut bytes = 0; - let mut pending = false; - let mut error = None; - loop { - if buf.is_empty() { - // Chunks::next will continue returning `Ok(Some(_))` with an - // empty chunk if there is no space left in the buffer, so we - // break early here. - break; - } - let chunk = match chunks.next(buf.len()) { - Ok(Some(chunk)) => chunk, - Ok(None) => break, - Err(err @ quinn_proto::ReadError::Reset(_)) => { - error = Some(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))); - break; - } - Err(quinn_proto::ReadError::Blocked) => { - pending = true; - break; - } - }; - - buf.write_all(&chunk.bytes).expect("enough buffer space"); - bytes += chunk.bytes.len(); - } - if chunks.finalize().should_transmit() { - if let Some(waker) = state.poll_connection_waker.take() { - waker.wake(); - } - } - if let Some(err) = error { - return Poll::Ready(err); - } - - if pending && bytes == 0 { - let substream_state = state.unchecked_substream_state(self.id); - substream_state.read_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - - Poll::Ready(Ok(bytes)) - } -} - -impl AsyncWrite for Substream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let mut state = self.state.lock(); - - match state.connection.send_stream(self.id).write(buf) { - Ok(bytes) => { - if let Some(waker) = state.poll_connection_waker.take() { - waker.wake(); - } - Poll::Ready(Ok(bytes)) - } - Err(quinn_proto::WriteError::Blocked) => { - let substream_state = state.unchecked_substream_state(self.id); - substream_state.write_waker = Some(cx.waker().clone()); - Poll::Pending - } - Err(err @ quinn_proto::WriteError::Stopped(_)) => { - Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))) - } - Err(quinn_proto::WriteError::UnknownStream) => { - Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } - } - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - // quinn doesn't support flushing, calling close will flush all substreams. - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut inner = self.state.lock(); - - let substream_state = inner.unchecked_substream_state(self.id); - match substream_state.write_state { - WriteState::Open => {} - WriteState::Closing => { - substream_state.close_waker = Some(cx.waker().clone()); - return Poll::Pending; - } - WriteState::Closed => return Poll::Ready(Ok(())), - WriteState::Stopped => { - let err = quinn_proto::FinishError::Stopped(0u32.into()); - return Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))); - } - } - - match inner.connection.send_stream(self.id).finish() { - Ok(()) => { - let substream_state = inner.unchecked_substream_state(self.id); - substream_state.close_waker = Some(cx.waker().clone()); - substream_state.write_state = WriteState::Closing; - Poll::Pending - } - Err(err @ quinn_proto::FinishError::Stopped(_)) => { - Poll::Ready(Err(io::Error::new(io::ErrorKind::ConnectionReset, err))) - } - Err(quinn_proto::FinishError::UnknownStream) => { - // We never make up IDs so the stream must have existed at some point if we get to here. - // `UnknownStream` is also emitted in case the stream is already finished, hence just - // return `Ok(())` here. - Poll::Ready(Ok(())) - } - } - } -} - -impl Drop for Substream { - fn drop(&mut self) { - let mut state = self.state.lock(); - state.substreams.remove(&self.id); - // Send `STOP_STREAM` if the remote did not finish the stream yet. - // We have to manually check the read stream since we might have - // received a `FIN` (without any other stream data) after the last - // time we tried to read. - let mut is_read_done = false; - if let Ok(mut chunks) = state.connection.recv_stream(self.id).read(true) { - if let Ok(chunk) = chunks.next(0) { - is_read_done = chunk.is_none(); - } - let _ = chunks.finalize(); - } - if !is_read_done { - let _ = state.connection.recv_stream(self.id).stop(0u32.into()); - } - // Close the writing side. - let mut send_stream = state.connection.send_stream(self.id); - match send_stream.finish() { - Ok(()) => {} - // Already finished or reset, which is fine. - Err(quinn_proto::FinishError::UnknownStream) => {} - Err(quinn_proto::FinishError::Stopped(reason)) => { - let _ = send_stream.reset(reason); - } - } - } -} - -#[derive(Debug, Default, Clone)] -pub enum WriteState { - /// The stream is open for writing. - #[default] - Open, - /// The writing side of the stream is closing. - Closing, - /// All data was successfully sent to the remote and the stream closed, - /// i.e. a [`quinn_proto::StreamEvent::Finished`] was reported for it. - Closed, - /// The stream was stopped by the remote before all data could be - /// sent. - Stopped, -} diff --git a/transports/quic/src/endpoint.rs b/transports/quic/src/endpoint.rs deleted file mode 100644 index 10e650ba41a..00000000000 --- a/transports/quic/src/endpoint.rs +++ /dev/null @@ -1,667 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::{ - provider::Provider, - transport::{ProtocolVersion, SocketFamily}, - ConnectError, Connection, Error, -}; - -use bytes::BytesMut; -use futures::{ - channel::{mpsc, oneshot}, - prelude::*, -}; -use quinn_proto::VarInt; -use std::{ - collections::HashMap, - net::{Ipv4Addr, Ipv6Addr, SocketAddr}, - ops::ControlFlow, - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::{Duration, Instant}, -}; - -// The `Driver` drops packets if the channel to the connection -// or transport is full. -// Set capacity 10 to avoid unnecessary packet drops if the receiver -// is only very briefly busy, but not buffer a large amount of packets -// if it is blocked longer. -const CHANNEL_CAPACITY: usize = 10; - -/// Config for the transport. -#[derive(Clone)] -pub struct Config { - /// Timeout for the initial handshake when establishing a connection. - /// The actual timeout is the minimum of this an the [`Config::max_idle_timeout`]. - pub handshake_timeout: Duration, - /// Maximum duration of inactivity in ms to accept before timing out the connection. - pub max_idle_timeout: u32, - /// Period of inactivity before sending a keep-alive packet. - /// Must be set lower than the idle_timeout of both - /// peers to be effective. - /// - /// See [`quinn_proto::TransportConfig::keep_alive_interval`] for more - /// info. - pub keep_alive_interval: Duration, - /// Maximum number of incoming bidirectional streams that may be open - /// concurrently by the remote peer. - pub max_concurrent_stream_limit: u32, - - /// Max unacknowledged data in bytes that may be send on a single stream. - pub max_stream_data: u32, - - /// Max unacknowledged data in bytes that may be send in total on all streams - /// of a connection. - pub max_connection_data: u32, - - /// Support QUIC version draft-29 for dialing and listening. - /// - /// Per default only QUIC Version 1 / [`libp2p_core::multiaddr::Protocol::QuicV1`] - /// is supported. - /// - /// If support for draft-29 is enabled servers support draft-29 and version 1 on all - /// QUIC listening addresses. - /// As client the version is chosen based on the remote's address. - pub support_draft_29: bool, - - /// TLS client config for the inner [`quinn_proto::ClientConfig`]. - client_tls_config: Arc, - /// TLS server config for the inner [`quinn_proto::ServerConfig`]. - server_tls_config: Arc, -} - -impl Config { - /// Creates a new configuration object with default values. - pub fn new(keypair: &libp2p_identity::Keypair) -> Self { - let client_tls_config = Arc::new(libp2p_tls::make_client_config(keypair, None).unwrap()); - let server_tls_config = Arc::new(libp2p_tls::make_server_config(keypair).unwrap()); - Self { - client_tls_config, - server_tls_config, - support_draft_29: false, - handshake_timeout: Duration::from_secs(5), - max_idle_timeout: 30 * 1000, - max_concurrent_stream_limit: 256, - keep_alive_interval: Duration::from_secs(15), - max_connection_data: 15_000_000, - - // Ensure that one stream is not consuming the whole connection. - max_stream_data: 10_000_000, - } - } -} - -/// Represents the inner configuration for [`quinn_proto`]. -#[derive(Debug, Clone)] -pub(crate) struct QuinnConfig { - client_config: quinn_proto::ClientConfig, - server_config: Arc, - endpoint_config: Arc, -} - -impl From for QuinnConfig { - fn from(config: Config) -> QuinnConfig { - let Config { - client_tls_config, - server_tls_config, - max_idle_timeout, - max_concurrent_stream_limit, - keep_alive_interval, - max_connection_data, - max_stream_data, - support_draft_29, - handshake_timeout: _, - } = config; - let mut transport = quinn_proto::TransportConfig::default(); - // Disable uni-directional streams. - transport.max_concurrent_uni_streams(0u32.into()); - transport.max_concurrent_bidi_streams(max_concurrent_stream_limit.into()); - // Disable datagrams. - transport.datagram_receive_buffer_size(None); - transport.keep_alive_interval(Some(keep_alive_interval)); - transport.max_idle_timeout(Some(VarInt::from_u32(max_idle_timeout).into())); - transport.allow_spin(false); - transport.stream_receive_window(max_stream_data.into()); - transport.receive_window(max_connection_data.into()); - let transport = Arc::new(transport); - - let mut server_config = quinn_proto::ServerConfig::with_crypto(server_tls_config); - server_config.transport = Arc::clone(&transport); - // Disables connection migration. - // Long-term this should be enabled, however we then need to handle address change - // on connections in the `Connection`. - server_config.migration(false); - - let mut client_config = quinn_proto::ClientConfig::new(client_tls_config); - client_config.transport_config(transport); - - let mut endpoint_config = quinn_proto::EndpointConfig::default(); - if !support_draft_29 { - endpoint_config.supported_versions(vec![1]); - } - - QuinnConfig { - client_config, - server_config: Arc::new(server_config), - endpoint_config: Arc::new(endpoint_config), - } - } -} - -/// Channel used to send commands to the [`Driver`]. -#[derive(Debug, Clone)] -pub(crate) struct Channel { - /// Channel to the background of the endpoint. - to_endpoint: mpsc::Sender, - /// Address that the socket is bound to. - /// Note: this may be a wildcard ip address. - socket_addr: SocketAddr, -} - -impl Channel { - /// Builds a new endpoint that is listening on the [`SocketAddr`]. - pub(crate) fn new_bidirectional( - quinn_config: QuinnConfig, - socket_addr: SocketAddr, - ) -> Result<(Self, mpsc::Receiver), Error> { - // Channel for forwarding new inbound connections to the listener. - let (new_connections_tx, new_connections_rx) = mpsc::channel(CHANNEL_CAPACITY); - let endpoint = Self::new::

(quinn_config, socket_addr, Some(new_connections_tx))?; - Ok((endpoint, new_connections_rx)) - } - - /// Builds a new endpoint that only supports outbound connections. - pub(crate) fn new_dialer( - quinn_config: QuinnConfig, - socket_family: SocketFamily, - ) -> Result { - let socket_addr = match socket_family { - SocketFamily::Ipv4 => SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0), - SocketFamily::Ipv6 => SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0), - }; - Self::new::

(quinn_config, socket_addr, None) - } - - /// Spawn a new [`Driver`] that runs in the background. - fn new( - quinn_config: QuinnConfig, - socket_addr: SocketAddr, - new_connections: Option>, - ) -> Result { - let socket = std::net::UdpSocket::bind(socket_addr)?; - // NOT blocking, as per man:bind(2), as we pass an IP address. - socket.set_nonblocking(true)?; - // Capacity 0 to back-pressure the rest of the application if - // the udp socket is busy. - let (to_endpoint_tx, to_endpoint_rx) = mpsc::channel(0); - - let channel = Self { - to_endpoint: to_endpoint_tx, - socket_addr: socket.local_addr()?, - }; - - let server_config = new_connections - .is_some() - .then_some(quinn_config.server_config); - - let provider_socket = P::from_socket(socket)?; - - let driver = Driver::

::new( - quinn_config.endpoint_config, - quinn_config.client_config, - new_connections, - server_config, - channel.clone(), - provider_socket, - to_endpoint_rx, - ); - - // Drive the endpoint future in the background. - P::spawn(driver); - - Ok(channel) - } - - pub(crate) fn socket_addr(&self) -> &SocketAddr { - &self.socket_addr - } - - /// Try to send a message to the background task without blocking. - /// - /// This first polls the channel for capacity. - /// If the channel is full, the message is returned in `Ok(Err(_))` - /// and the context's waker is registered for wake-up. - /// - /// If the background task crashed `Err` is returned. - pub(crate) fn try_send( - &mut self, - to_endpoint: ToEndpoint, - cx: &mut Context<'_>, - ) -> Result, Disconnected> { - match self.to_endpoint.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(e)) => { - debug_assert!( - e.is_disconnected(), - "mpsc::Sender can only be disconnected when calling `poll_ready_unpin" - ); - - return Err(Disconnected {}); - } - Poll::Pending => return Ok(Err(to_endpoint)), - }; - - if let Err(e) = self.to_endpoint.start_send(to_endpoint) { - debug_assert!(e.is_disconnected(), "We called `Sink::poll_ready` so we are guaranteed to have a slot. If this fails, it means we are disconnected."); - - return Err(Disconnected {}); - } - - Ok(Ok(())) - } - - /// Send a message to inform the [`Driver`] about an - /// event caused by the owner of this [`Channel`] dropping. - /// This clones the sender to the endpoint to guarantee delivery. - /// This should *not* be called for regular messages. - pub(crate) fn send_on_drop(&mut self, to_endpoint: ToEndpoint) { - let _ = self.to_endpoint.clone().try_send(to_endpoint); - } -} - -#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] -#[error("Background task disconnected")] -pub(crate) struct Disconnected {} -/// Message sent to the endpoint background task. -#[derive(Debug)] -pub(crate) enum ToEndpoint { - /// Instruct the [`quinn_proto::Endpoint`] to start connecting to the given address. - Dial { - /// UDP address to connect to. - addr: SocketAddr, - /// Version to dial the remote on. - version: ProtocolVersion, - /// Channel to return the result of the dialing to. - result: oneshot::Sender>, - }, - /// Send by a [`quinn_proto::Connection`] when the endpoint needs to process an event generated - /// by a connection. The event itself is opaque to us. Only `quinn_proto` knows what is in - /// there. - ProcessConnectionEvent { - connection_id: quinn_proto::ConnectionHandle, - event: quinn_proto::EndpointEvent, - }, - /// Instruct the endpoint to send a packet of data on its UDP socket. - SendUdpPacket(quinn_proto::Transmit), - /// The [`GenTransport`][crate::GenTransport] dialer or listener coupled to this endpoint - /// was dropped. - /// Once all pending connections are closed, the [`Driver`] should shut down. - Decoupled, -} - -/// Driver that runs in the background for as long as the endpoint is alive. Responsible for -/// processing messages and the UDP socket. -/// -/// # Behaviour -/// -/// This background task is responsible for the following: -/// -/// - Sending packets on the UDP socket. -/// - Receiving packets from the UDP socket and feed them to the [`quinn_proto::Endpoint`] state -/// machine. -/// - Transmitting events generated by the [`quinn_proto::Endpoint`] to the corresponding -/// [`crate::Connection`]. -/// - Receiving messages from the `rx` and processing the requested actions. This includes -/// UDP packets to send and events emitted by the [`crate::Connection`] objects. -/// - Sending new connections on `new_connection_tx`. -/// -/// When it comes to channels, there exists three main multi-producer-single-consumer channels -/// in play: -/// -/// - One channel, represented by `EndpointChannel::to_endpoint` and `Driver::rx`, -/// that communicates messages from [`Channel`] to the [`Driver`]. -/// - One channel for each existing connection that communicates messages from the -/// [`Driver` to that [`crate::Connection`]. -/// - One channel for the [`Driver`] to send newly-opened connections to. The receiving -/// side is processed by the [`GenTransport`][crate::GenTransport]. -/// -/// -/// ## Back-pressure -/// -/// ### If writing to the UDP socket is blocked -/// -/// In order to avoid an unbounded buffering of events, we prioritize sending data on the UDP -/// socket over everything else. Messages from the rest of the application sent through the -/// [`Channel`] are only processed if the UDP socket is ready so that we propagate back-pressure -/// in case of a busy socket. For connections, thus this eventually also back-pressures the -/// `AsyncWrite`on substreams. -/// -/// -/// ### Back-pressuring the remote if the application is busy -/// -/// If the channel to a connection is full because the connection is busy, inbound datagrams -/// for that connection are dropped so that the remote is backpressured. -/// The same applies for new connections if the transport is too busy to received it. -/// -/// -/// # Shutdown -/// -/// The background task shuts down if an [`ToEndpoint::Decoupled`] event was received and the -/// last active connection has drained. -#[derive(Debug)] -pub(crate) struct Driver { - // The actual QUIC state machine. - endpoint: quinn_proto::Endpoint, - // QuinnConfig for client connections. - client_config: quinn_proto::ClientConfig, - // Copy of the channel to the endpoint driver that is passed to each new connection. - channel: Channel, - // Channel to receive messages from the transport or connections. - rx: mpsc::Receiver, - - // Socket for sending and receiving datagrams. - provider_socket: P, - // Future for writing the next packet to the socket. - next_packet_out: Option, - - // List of all active connections, with a sender to notify them of events. - alive_connections: - HashMap>, - // Channel to forward new inbound connections to the transport. - // `None` if server capabilities are disabled, i.e. the endpoint is only used for dialing. - new_connection_tx: Option>, - // Whether the transport dropped its handle for this endpoint. - is_decoupled: bool, -} - -impl Driver

{ - fn new( - endpoint_config: Arc, - client_config: quinn_proto::ClientConfig, - new_connection_tx: Option>, - server_config: Option>, - channel: Channel, - socket: P, - rx: mpsc::Receiver, - ) -> Self { - Driver { - endpoint: quinn_proto::Endpoint::new(endpoint_config, server_config), - client_config, - channel, - rx, - provider_socket: socket, - next_packet_out: None, - alive_connections: HashMap::new(), - new_connection_tx, - is_decoupled: false, - } - } - - /// Handle a message sent from either the [`GenTransport`](super::GenTransport) - /// or a [`crate::Connection`]. - fn handle_message( - &mut self, - to_endpoint: ToEndpoint, - ) -> ControlFlow<(), Option> { - match to_endpoint { - ToEndpoint::Dial { - addr, - result, - version, - } => { - let mut config = self.client_config.clone(); - if version == ProtocolVersion::Draft29 { - config.version(0xff00_001d); - } - // This `"l"` seems necessary because an empty string is an invalid domain - // name. While we don't use domain names, the underlying rustls library - // is based upon the assumption that we do. - let (connection_id, connection) = match self.endpoint.connect(config, addr, "l") { - Ok(c) => c, - Err(err) => { - let _ = result.send(Err(ConnectError::from(err).into())); - return ControlFlow::Continue(None); - } - }; - - debug_assert_eq!(connection.side(), quinn_proto::Side::Client); - let (tx, rx) = mpsc::channel(CHANNEL_CAPACITY); - let connection = Connection::from_quinn_connection( - self.channel.clone(), - connection, - connection_id, - rx, - ); - self.alive_connections.insert(connection_id, tx); - let _ = result.send(Ok(connection)); - } - - // A connection wants to notify the endpoint of something. - ToEndpoint::ProcessConnectionEvent { - connection_id, - event, - } => { - let has_key = self.alive_connections.contains_key(&connection_id); - if !has_key { - return ControlFlow::Continue(None); - } - // We "drained" event indicates that the connection no longer exists and - // its ID can be reclaimed. - let is_drained_event = event.is_drained(); - if is_drained_event { - self.alive_connections.remove(&connection_id); - if self.is_decoupled && self.alive_connections.is_empty() { - log::debug!( - "Driver is decoupled and no active connections remain. Shutting down." - ); - return ControlFlow::Break(()); - } - } - - let event_back = self.endpoint.handle_event(connection_id, event); - - if let Some(event_back) = event_back { - debug_assert!(!is_drained_event); - if let Some(sender) = self.alive_connections.get_mut(&connection_id) { - // We clone the sender to guarantee that there will be at least one - // free slot to send the event. - // The channel can not grow out of bound because an `event_back` is - // only sent if we previously received an event from the same connection. - // If the connection is busy, it won't sent us any more events to handle. - let _ = sender.clone().start_send(event_back); - } else { - log::error!("State mismatch: event for closed connection"); - } - } - } - - // Data needs to be sent on the UDP socket. - ToEndpoint::SendUdpPacket(transmit) => return ControlFlow::Continue(Some(transmit)), - ToEndpoint::Decoupled => self.handle_decoupling()?, - } - ControlFlow::Continue(None) - } - - /// Handle an UDP datagram received on the socket. - /// The datagram content was written into the `socket_recv_buffer`. - fn handle_datagram(&mut self, packet: BytesMut, packet_src: SocketAddr) -> ControlFlow<()> { - let local_ip = self.channel.socket_addr.ip(); - // TODO: ECN bits aren't handled - let (connec_id, event) = - match self - .endpoint - .handle(Instant::now(), packet_src, Some(local_ip), None, packet) - { - Some(event) => event, - None => return ControlFlow::Continue(()), - }; - match event { - quinn_proto::DatagramEvent::ConnectionEvent(event) => { - // `event` has type `quinn_proto::ConnectionEvent`, which has multiple - // variants. `quinn_proto::Endpoint::handle` however only ever returns - // `ConnectionEvent::Datagram`. - debug_assert!(format!("{event:?}").contains("Datagram")); - - // Redirect the datagram to its connection. - if let Some(sender) = self.alive_connections.get_mut(&connec_id) { - match sender.try_send(event) { - Ok(()) => {} - Err(err) if err.is_disconnected() => { - // Connection was dropped by the user. - // Inform the endpoint that this connection is drained. - self.endpoint - .handle_event(connec_id, quinn_proto::EndpointEvent::drained()); - self.alive_connections.remove(&connec_id); - } - Err(err) if err.is_full() => { - // Connection is too busy. Drop the datagram to back-pressure the remote. - log::debug!( - "Dropping packet for connection {:?} because the connection's channel is full.", - connec_id - ); - } - Err(_) => unreachable!("Error is either `Full` or `Disconnected`."), - } - } else { - log::error!("State mismatch: event for closed connection"); - } - } - quinn_proto::DatagramEvent::NewConnection(connec) => { - // A new connection has been received. `connec_id` is a newly-allocated - // identifier. - debug_assert_eq!(connec.side(), quinn_proto::Side::Server); - let connection_tx = match self.new_connection_tx.as_mut() { - Some(tx) => tx, - None => { - debug_assert!(false, "Endpoint reported a new connection even though server capabilities are disabled."); - return ControlFlow::Continue(()); - } - }; - - let (tx, rx) = mpsc::channel(CHANNEL_CAPACITY); - let connection = - Connection::from_quinn_connection(self.channel.clone(), connec, connec_id, rx); - match connection_tx.start_send(connection) { - Ok(()) => { - self.alive_connections.insert(connec_id, tx); - } - Err(e) if e.is_disconnected() => self.handle_decoupling()?, - Err(e) if e.is_full() => log::warn!( - "Dropping new incoming connection {:?} because the channel to the listener is full", - connec_id - ), - Err(_) => unreachable!("Error is either `Full` or `Disconnected`."), - } - } - } - ControlFlow::Continue(()) - } - - /// The transport dropped the channel to this [`Driver`]. - fn handle_decoupling(&mut self) -> ControlFlow<()> { - if self.alive_connections.is_empty() { - return ControlFlow::Break(()); - } - // Listener was closed. - self.endpoint.reject_new_connections(); - self.new_connection_tx = None; - self.is_decoupled = true; - ControlFlow::Continue(()) - } -} - -/// Future that runs until the [`Driver`] is decoupled and not active connections -/// remain -impl Future for Driver

{ - type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - loop { - // Flush any pending pocket so that the socket is reading to write an next - // packet. - match self.provider_socket.poll_send_flush(cx) { - // The pending packet was send or no packet was pending. - Poll::Ready(Ok(_)) => { - // Start sending a packet on the socket. - if let Some(transmit) = self.next_packet_out.take() { - self.provider_socket - .start_send(transmit.contents, transmit.destination); - continue; - } - - // The endpoint might request packets to be sent out. This is handled in - // priority to avoid buffering up packets. - if let Some(transmit) = self.endpoint.poll_transmit() { - self.next_packet_out = Some(transmit); - continue; - } - - // Handle messages from transport and connections. - match self.rx.poll_next_unpin(cx) { - Poll::Ready(Some(to_endpoint)) => match self.handle_message(to_endpoint) { - ControlFlow::Continue(Some(transmit)) => { - self.next_packet_out = Some(transmit); - continue; - } - ControlFlow::Continue(None) => continue, - ControlFlow::Break(()) => break, - }, - Poll::Ready(None) => { - unreachable!("Sender side is never dropped or closed.") - } - Poll::Pending => {} - } - } - // Errors on the socket are expected to never happen, and we handle them by simply - // printing a log message. The packet gets discarded in case of error, but we are - // robust to packet losses and it is consequently not a logic error to proceed with - // normal operations. - Poll::Ready(Err(err)) => { - log::warn!("Error while sending on QUIC UDP socket: {:?}", err); - continue; - } - Poll::Pending => {} - } - - // Poll for new packets from the remote. - match self.provider_socket.poll_recv_from(cx) { - Poll::Ready(Ok((bytes, packet_src))) => { - let bytes_mut = bytes.as_slice().into(); - match self.handle_datagram(bytes_mut, packet_src) { - ControlFlow::Continue(()) => continue, - ControlFlow::Break(()) => break, - } - } - // Errors on the socket are expected to never happen, and we handle them by - // simply printing a log message. - Poll::Ready(Err(err)) => { - log::warn!("Error while receive on QUIC UDP socket: {:?}", err); - continue; - } - Poll::Pending => {} - } - - return Poll::Pending; - } - - Poll::Ready(()) - } -} diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs new file mode 100644 index 00000000000..605799af5e1 --- /dev/null +++ b/transports/quic/src/hole_punching.rs @@ -0,0 +1,44 @@ +use crate::{provider::Provider, Error}; + +use futures::future::Either; + +use rand::{distributions, Rng}; + +use std::convert::Infallible; +use std::{ + net::{SocketAddr, UdpSocket}, + time::Duration, +}; + +pub(crate) async fn hole_puncher( + socket: UdpSocket, + remote_addr: SocketAddr, + timeout_duration: Duration, +) -> Error { + let punch_holes_future = punch_holes::

(socket, remote_addr); + futures::pin_mut!(punch_holes_future); + match futures::future::select(P::sleep(timeout_duration), punch_holes_future).await { + Either::Left(_) => Error::HandshakeTimedOut, + Either::Right((Err(hole_punch_err), _)) => hole_punch_err, + Either::Right((Ok(never), _)) => match never {}, + } +} + +async fn punch_holes( + socket: UdpSocket, + remote_addr: SocketAddr, +) -> Result { + loop { + let contents: Vec = rand::thread_rng() + .sample_iter(distributions::Standard) + .take(64) + .collect(); + + tracing::trace!("Sending random UDP packet to {remote_addr}"); + + P::send_to(&socket, &contents, remote_addr).await?; + + let sleep_duration = Duration::from_millis(rand::thread_rng().gen_range(10..=200)); + P::sleep(sleep_duration).await; + } +} diff --git a/transports/quic/src/lib.rs b/transports/quic/src/lib.rs index af6dd1871dd..7ae649b6914 100644 --- a/transports/quic/src/lib.rs +++ b/transports/quic/src/lib.rs @@ -32,7 +32,7 @@ //! # fn main() -> std::io::Result<()> { //! # //! use libp2p_quic as quic; -//! use libp2p_core::{Multiaddr, Transport}; +//! use libp2p_core::{Multiaddr, Transport, transport::ListenerId}; //! //! let keypair = libp2p_identity::Keypair::generate_ed25519(); //! let quic_config = quic::Config::new(&keypair); @@ -40,7 +40,7 @@ //! let mut quic_transport = quic::async_std::Transport::new(quic_config); //! //! let addr = "/ip4/127.0.0.1/udp/12345/quic-v1".parse().expect("address should be valid"); -//! quic_transport.listen_on(addr).expect("listen error."); +//! quic_transport.listen_on(ListenerId::next(), addr).expect("listen error."); //! # //! # Ok(()) //! # } @@ -57,13 +57,17 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod config; mod connection; -mod endpoint; +mod hole_punching; mod provider; mod transport; -pub use connection::{Connecting, Connection, Substream}; -pub use endpoint::Config; +use std::net::SocketAddr; + +pub use config::Config; +pub use connection::{Connecting, Connection, Stream}; + #[cfg(feature = "async-std")] pub use provider::async_std; #[cfg(feature = "tokio")] @@ -86,22 +90,25 @@ pub enum Error { #[error(transparent)] Io(#[from] std::io::Error), - /// The task spawned in [`Provider::spawn`] to drive - /// the quic endpoint has crashed. - #[error("Endpoint driver crashed")] - EndpointDriverCrashed, - /// The [`Connecting`] future timed out. #[error("Handshake with the remote timed out.")] HandshakeTimedOut, + + /// Error when `Transport::dial_as_listener` is called without an active listener. + #[error("Tried to dial as listener without an active listener.")] + NoActiveListenerForDialAsListener, + + /// Error when holepunching for a remote is already in progress + #[error("Already punching hole for {0}).")] + HolePunchInProgress(SocketAddr), } /// Dialing a remote peer failed. #[derive(Debug, thiserror::Error)] #[error(transparent)] -pub struct ConnectError(#[from] quinn_proto::ConnectError); +pub struct ConnectError(quinn::ConnectError); /// Error on an established [`Connection`]. #[derive(Debug, thiserror::Error)] #[error(transparent)] -pub struct ConnectionError(#[from] quinn_proto::ConnectionError); +pub struct ConnectionError(quinn::ConnectionError); diff --git a/transports/quic/src/provider.rs b/transports/quic/src/provider.rs index c38f77fd1b9..6f1122ee55f 100644 --- a/transports/quic/src/provider.rs +++ b/transports/quic/src/provider.rs @@ -18,12 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::Future; +use futures::future::BoxFuture; use if_watch::IfEvent; use std::{ io, - net::SocketAddr, + net::{SocketAddr, UdpSocket}, task::{Context, Poll}, + time::Duration, }; #[cfg(feature = "async-std")] @@ -31,40 +32,20 @@ pub mod async_std; #[cfg(feature = "tokio")] pub mod tokio; -/// Size of the buffer for reading data 0x10000. -#[cfg(any(feature = "async-std", feature = "tokio"))] -const RECEIVE_BUFFER_SIZE: usize = 65536; +pub enum Runtime { + #[cfg(feature = "tokio")] + Tokio, + #[cfg(feature = "async-std")] + AsyncStd, + Dummy, +} -/// Provider for non-blocking receiving and sending on a [`std::net::UdpSocket`] -/// and spawning tasks. +/// Provider for a corresponding quinn runtime and spawning tasks. pub trait Provider: Unpin + Send + Sized + 'static { type IfWatcher: Unpin + Send; - /// Create a new providing that is wrapping the socket. - /// - /// Note: The socket must be set to non-blocking. - fn from_socket(socket: std::net::UdpSocket) -> io::Result; - - /// Receive a single packet. - /// - /// Returns the message and the address the message came from. - fn poll_recv_from(&mut self, cx: &mut Context<'_>) -> Poll, SocketAddr)>>; - - /// Set sending a packet on the socket. - /// - /// Since only one packet can be sent at a time, this may only be called if a preceding - /// call to [`Provider::poll_send_flush`] returned [`Poll::Ready`]. - fn start_send(&mut self, data: Vec, addr: SocketAddr); - - /// Flush a packet send in [`Provider::start_send`]. - /// - /// If [`Poll::Ready`] is returned the socket is ready for sending a new packet. - fn poll_send_flush(&mut self, cx: &mut Context<'_>) -> Poll>; - - /// Run the given future in the background until it ends. - /// - /// This is used to spawn the task that is driving the endpoint. - fn spawn(future: impl Future + Send + 'static); + /// Run the corresponding runtime + fn runtime() -> Runtime; /// Create a new [`if_watch`] watcher that reports [`IfEvent`]s for network interface changes. fn new_if_watcher() -> io::Result; @@ -74,4 +55,14 @@ pub trait Provider: Unpin + Send + Sized + 'static { watcher: &mut Self::IfWatcher, cx: &mut Context<'_>, ) -> Poll>; + + /// Sleep for specified amount of time. + fn sleep(duration: Duration) -> BoxFuture<'static, ()>; + + /// Sends data on the socket to the given address. On success, returns the number of bytes written. + fn send_to<'a>( + udp_socket: &'a UdpSocket, + buf: &'a [u8], + target: SocketAddr, + ) -> BoxFuture<'a, io::Result>; } diff --git a/transports/quic/src/provider/async_std.rs b/transports/quic/src/provider/async_std.rs index 222c8e55e90..a110058108c 100644 --- a/transports/quic/src/provider/async_std.rs +++ b/transports/quic/src/provider/async_std.rs @@ -18,14 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use async_std::{net::UdpSocket, task::spawn}; -use futures::{future::BoxFuture, ready, Future, FutureExt, Stream, StreamExt}; +use futures::{future::BoxFuture, FutureExt}; use std::{ io, - net::SocketAddr, - pin::Pin, - sync::Arc, + net::UdpSocket, task::{Context, Poll}, + time::Duration, }; use crate::GenTransport; @@ -33,65 +31,14 @@ use crate::GenTransport; /// Transport with [`async-std`] runtime. pub type Transport = GenTransport; -/// Provider for reading / writing to a sockets and spawning -/// tasks using [`async-std`]. -pub struct Provider { - socket: Arc, - // Future for sending a packet. - // This is needed since [`async_Std::net::UdpSocket`] does not - // provide a poll-style interface for sending a packet. - send_packet: Option>>, - recv_stream: ReceiveStream, -} +/// Provider for quinn runtime and spawning tasks using [`async-std`]. +pub struct Provider; impl super::Provider for Provider { type IfWatcher = if_watch::smol::IfWatcher; - fn from_socket(socket: std::net::UdpSocket) -> io::Result { - let socket = Arc::new(socket.into()); - let recv_stream = ReceiveStream::new(Arc::clone(&socket)); - Ok(Provider { - socket, - send_packet: None, - recv_stream, - }) - } - - fn poll_recv_from(&mut self, cx: &mut Context<'_>) -> Poll, SocketAddr)>> { - match self.recv_stream.poll_next_unpin(cx) { - Poll::Ready(ready) => { - Poll::Ready(ready.expect("ReceiveStream::poll_next never returns None.")) - } - Poll::Pending => Poll::Pending, - } - } - - fn start_send(&mut self, data: Vec, addr: SocketAddr) { - let socket = self.socket.clone(); - let send = async move { - socket.send_to(&data, addr).await?; - Ok(()) - } - .boxed(); - self.send_packet = Some(send) - } - - fn poll_send_flush(&mut self, cx: &mut Context<'_>) -> Poll> { - let pending = match self.send_packet.as_mut() { - Some(pending) => pending, - None => return Poll::Ready(Ok(())), - }; - match pending.poll_unpin(cx) { - Poll::Ready(result) => { - self.send_packet = None; - Poll::Ready(result) - } - Poll::Pending => Poll::Pending, - } - } - - fn spawn(future: impl Future + Send + 'static) { - spawn(future); + fn runtime() -> super::Runtime { + super::Runtime::AsyncStd } fn new_if_watcher() -> io::Result { @@ -104,48 +51,20 @@ impl super::Provider for Provider { ) -> Poll> { watcher.poll_if_event(cx) } -} -type ReceiveStreamItem = ( - Result<(usize, SocketAddr), io::Error>, - Arc, - Vec, -); - -/// Wrapper around the socket to implement `Stream` on it. -struct ReceiveStream { - /// Future for receiving a packet on the socket. - // This is needed since [`async_Std::net::UdpSocket`] does not - // provide a poll-style interface for receiving packets. - fut: BoxFuture<'static, ReceiveStreamItem>, -} - -impl ReceiveStream { - fn new(socket: Arc) -> Self { - let fut = ReceiveStream::next(socket, vec![0; super::RECEIVE_BUFFER_SIZE]).boxed(); - Self { fut: fut.boxed() } - } - - async fn next(socket: Arc, mut socket_recv_buffer: Vec) -> ReceiveStreamItem { - let recv = socket.recv_from(&mut socket_recv_buffer).await; - (recv, socket, socket_recv_buffer) + fn sleep(duration: Duration) -> BoxFuture<'static, ()> { + async_std::task::sleep(duration).boxed() } -} - -impl Stream for ReceiveStream { - type Item = Result<(Vec, SocketAddr), io::Error>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let (result, socket, buffer) = ready!(self.fut.poll_unpin(cx)); - let result = result.map(|(packet_len, packet_src)| { - debug_assert!(packet_len <= buffer.len()); - // Copies the bytes from the `socket_recv_buffer` they were written into. - (buffer[..packet_len].into(), packet_src) - }); - // Set the future for receiving the next packet on the stream. - self.fut = ReceiveStream::next(socket, buffer).boxed(); - - Poll::Ready(Some(result)) + fn send_to<'a>( + udp_socket: &'a UdpSocket, + buf: &'a [u8], + target: std::net::SocketAddr, + ) -> BoxFuture<'a, io::Result> { + Box::pin(async move { + async_std::net::UdpSocket::from(udp_socket.try_clone()?) + .send_to(buf, target) + .await + }) } } diff --git a/transports/quic/src/provider/tokio.rs b/transports/quic/src/provider/tokio.rs index 07e23f8813c..9cb148d6ef2 100644 --- a/transports/quic/src/provider/tokio.rs +++ b/transports/quic/src/provider/tokio.rs @@ -18,71 +18,27 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use futures::{ready, Future}; +use futures::{future::BoxFuture, FutureExt}; use std::{ io, - net::SocketAddr, + net::{SocketAddr, UdpSocket}, task::{Context, Poll}, + time::Duration, }; -use tokio::{io::ReadBuf, net::UdpSocket}; use crate::GenTransport; /// Transport with [`tokio`] runtime. pub type Transport = GenTransport; -/// Provider for reading / writing to a sockets and spawning -/// tasks using [`tokio`]. -pub struct Provider { - socket: UdpSocket, - socket_recv_buffer: Vec, - next_packet_out: Option<(Vec, SocketAddr)>, -} +/// Provider for quinn runtime and spawning tasks using [`tokio`]. +pub struct Provider; impl super::Provider for Provider { type IfWatcher = if_watch::tokio::IfWatcher; - fn from_socket(socket: std::net::UdpSocket) -> std::io::Result { - let socket = UdpSocket::from_std(socket)?; - Ok(Provider { - socket, - socket_recv_buffer: vec![0; super::RECEIVE_BUFFER_SIZE], - next_packet_out: None, - }) - } - - fn poll_send_flush(&mut self, cx: &mut Context<'_>) -> Poll> { - let (data, addr) = match self.next_packet_out.as_ref() { - Some(pending) => pending, - None => return Poll::Ready(Ok(())), - }; - match self.socket.poll_send_to(cx, data.as_slice(), *addr) { - Poll::Ready(result) => { - self.next_packet_out = None; - Poll::Ready(result.map(|_| ())) - } - Poll::Pending => Poll::Pending, - } - } - - fn poll_recv_from(&mut self, cx: &mut Context<'_>) -> Poll, SocketAddr)>> { - let Self { - socket, - socket_recv_buffer, - .. - } = self; - let mut read_buf = ReadBuf::new(socket_recv_buffer.as_mut_slice()); - let packet_src = ready!(socket.poll_recv_from(cx, &mut read_buf)?); - let bytes = read_buf.filled().to_vec(); - Poll::Ready(Ok((bytes, packet_src))) - } - - fn start_send(&mut self, data: Vec, addr: SocketAddr) { - self.next_packet_out = Some((data, addr)); - } - - fn spawn(future: impl Future + Send + 'static) { - tokio::spawn(future); + fn runtime() -> super::Runtime { + super::Runtime::Tokio } fn new_if_watcher() -> io::Result { @@ -95,4 +51,20 @@ impl super::Provider for Provider { ) -> Poll> { watcher.poll_if_event(cx) } + + fn sleep(duration: Duration) -> BoxFuture<'static, ()> { + tokio::time::sleep(duration).boxed() + } + + fn send_to<'a>( + udp_socket: &'a UdpSocket, + buf: &'a [u8], + target: SocketAddr, + ) -> BoxFuture<'a, io::Result> { + Box::pin(async move { + tokio::net::UdpSocket::from_std(udp_socket.try_clone()?)? + .send_to(buf, target) + .await + }) + } } diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index d68eb7f1928..aea3c91093f 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -18,12 +18,13 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::endpoint::{Config, QuinnConfig, ToEndpoint}; +use crate::config::{Config, QuinnConfig}; +use crate::hole_punching::hole_puncher; use crate::provider::Provider; -use crate::{endpoint, Connecting, Connection, Error}; +use crate::{ConnectError, Connecting, Connection, Error}; -use futures::channel::{mpsc, oneshot}; -use futures::future::BoxFuture; +use futures::channel::oneshot; +use futures::future::{BoxFuture, Either}; use futures::ready; use futures::stream::StreamExt; use futures::{prelude::*, stream::SelectAll}; @@ -36,12 +37,13 @@ use libp2p_core::{ Transport, }; use libp2p_identity::PeerId; +use socket2::{Domain, Socket, Type}; use std::collections::hash_map::{DefaultHasher, Entry}; -use std::collections::{HashMap, VecDeque}; -use std::fmt; +use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}; use std::time::Duration; +use std::{fmt, io}; use std::{ net::SocketAddr, pin::Pin, @@ -61,7 +63,7 @@ use std::{ /// See . #[derive(Debug)] pub struct GenTransport { - /// Config for the inner [`quinn_proto`] structs. + /// Config for the inner [`quinn`] structs. quinn_config: QuinnConfig, /// Timeout for the [`Connecting`] future. handshake_timeout: Duration, @@ -70,9 +72,11 @@ pub struct GenTransport { /// Streams of active [`Listener`]s. listeners: SelectAll>, /// Dialer for each socket family if no matching listener exists. - dialer: HashMap, + dialer: HashMap, /// Waker to poll the transport again when a new dialer or listener is added. waker: Option, + /// Holepunching attempts + hole_punch_attempts: HashMap>, } impl GenTransport

{ @@ -88,8 +92,109 @@ impl GenTransport

{ dialer: HashMap::new(), waker: None, support_draft_29, + hole_punch_attempts: Default::default(), } } + + /// Create a new [`quinn::Endpoint`] with the given configs. + fn new_endpoint( + endpoint_config: quinn::EndpointConfig, + server_config: Option, + socket: UdpSocket, + ) -> Result { + use crate::provider::Runtime; + match P::runtime() { + #[cfg(feature = "tokio")] + Runtime::Tokio => { + let runtime = std::sync::Arc::new(quinn::TokioRuntime); + let endpoint = + quinn::Endpoint::new(endpoint_config, server_config, socket, runtime)?; + Ok(endpoint) + } + #[cfg(feature = "async-std")] + Runtime::AsyncStd => { + let runtime = std::sync::Arc::new(quinn::AsyncStdRuntime); + let endpoint = + quinn::Endpoint::new(endpoint_config, server_config, socket, runtime)?; + Ok(endpoint) + } + Runtime::Dummy => { + let _ = endpoint_config; + let _ = server_config; + let _ = socket; + let err = std::io::Error::new(std::io::ErrorKind::Other, "no async runtime found"); + Err(Error::Io(err)) + } + } + } + + /// Extract the addr, quic version and peer id from the given [`Multiaddr`]. + fn remote_multiaddr_to_socketaddr( + &self, + addr: Multiaddr, + check_unspecified_addr: bool, + ) -> Result< + (SocketAddr, ProtocolVersion, Option), + TransportError<::Error>, + > { + let (socket_addr, version, peer_id) = multiaddr_to_socketaddr(&addr, self.support_draft_29) + .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; + if check_unspecified_addr && (socket_addr.port() == 0 || socket_addr.ip().is_unspecified()) + { + return Err(TransportError::MultiaddrNotSupported(addr)); + } + Ok((socket_addr, version, peer_id)) + } + + /// Pick any listener to use for dialing. + fn eligible_listener(&mut self, socket_addr: &SocketAddr) -> Option<&mut Listener

> { + let mut listeners: Vec<_> = self + .listeners + .iter_mut() + .filter(|l| { + if l.is_closed { + return false; + } + SocketFamily::is_same(&l.socket_addr().ip(), &socket_addr.ip()) + }) + .filter(|l| { + if socket_addr.ip().is_loopback() { + l.listening_addresses + .iter() + .any(|ip_addr| ip_addr.is_loopback()) + } else { + true + } + }) + .collect(); + match listeners.len() { + 0 => None, + 1 => listeners.pop(), + _ => { + // Pick any listener to use for dialing. + // We hash the socket address to achieve determinism. + let mut hasher = DefaultHasher::new(); + socket_addr.hash(&mut hasher); + let index = hasher.finish() as usize % listeners.len(); + Some(listeners.swap_remove(index)) + } + } + } + + fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { + let socket = Socket::new( + Domain::for_address(socket_addr), + Type::DGRAM, + Some(socket2::Protocol::UDP), + )?; + if socket_addr.is_ipv6() { + socket.set_only_v6(true)?; + } + + socket.bind(&socket_addr.into())?; + + Ok(socket.into()) + } } impl Transport for GenTransport

{ @@ -98,14 +203,22 @@ impl Transport for GenTransport

{ type ListenerUpgrade = Connecting; type Dial = BoxFuture<'static, Result>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let (socket_addr, version) = multiaddr_to_socketaddr(&addr, self.support_draft_29) - .ok_or(TransportError::MultiaddrNotSupported(addr))?; - let listener_id = ListenerId::new(); + fn listen_on( + &mut self, + listener_id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + let (socket_addr, version, _peer_id) = self.remote_multiaddr_to_socketaddr(addr, false)?; + let endpoint_config = self.quinn_config.endpoint_config.clone(); + let server_config = self.quinn_config.server_config.clone(); + let socket = self.create_socket(socket_addr).map_err(Self::Error::from)?; + + let socket_c = socket.try_clone().map_err(Self::Error::from)?; + let endpoint = Self::new_endpoint(endpoint_config, Some(server_config), socket)?; let listener = Listener::new( listener_id, - socket_addr, - self.quinn_config.clone(), + socket_c, + endpoint, self.handshake_timeout, version, )?; @@ -120,7 +233,7 @@ impl Transport for GenTransport

{ // New outbound connections will use the bidirectional (listener) endpoint. self.dialer.remove(&socket_addr.ip().into()); - Ok(listener_id) + Ok(()) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -144,83 +257,146 @@ impl Transport for GenTransport

{ } fn dial(&mut self, addr: Multiaddr) -> Result> { - let (socket_addr, version) = multiaddr_to_socketaddr(&addr, self.support_draft_29) - .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; - if socket_addr.port() == 0 || socket_addr.ip().is_unspecified() { - return Err(TransportError::MultiaddrNotSupported(addr)); - } - - let mut listeners = self - .listeners - .iter_mut() - .filter(|l| { - if l.is_closed { - return false; - } - let listen_addr = l.endpoint_channel.socket_addr(); - SocketFamily::is_same(&listen_addr.ip(), &socket_addr.ip()) - && listen_addr.ip().is_loopback() == socket_addr.ip().is_loopback() - }) - .collect::>(); + let (socket_addr, version, _peer_id) = self.remote_multiaddr_to_socketaddr(addr, true)?; - let dialer_state = match listeners.len() { - 0 => { + let endpoint = match self.eligible_listener(&socket_addr) { + None => { // No listener. Get or create an explicit dialer. let socket_family = socket_addr.ip().into(); let dialer = match self.dialer.entry(socket_family) { - Entry::Occupied(occupied) => occupied.into_mut(), + Entry::Occupied(occupied) => occupied.get().clone(), Entry::Vacant(vacant) => { if let Some(waker) = self.waker.take() { waker.wake(); } - vacant.insert(Dialer::new::

(self.quinn_config.clone(), socket_family)?) + let listen_socket_addr = match socket_family { + SocketFamily::Ipv4 => SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0), + SocketFamily::Ipv6 => SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0), + }; + let socket = + UdpSocket::bind(listen_socket_addr).map_err(Self::Error::from)?; + let endpoint_config = self.quinn_config.endpoint_config.clone(); + let endpoint = Self::new_endpoint(endpoint_config, None, socket)?; + + vacant.insert(endpoint.clone()); + endpoint } }; - &mut dialer.state - } - 1 => &mut listeners[0].dialer_state, - _ => { - // Pick any listener to use for dialing. - // We hash the socket address to achieve determinism. - let mut hasher = DefaultHasher::new(); - socket_addr.hash(&mut hasher); - let index = hasher.finish() as usize % listeners.len(); - &mut listeners[index].dialer_state + dialer } + Some(listener) => listener.endpoint.clone(), }; - Ok(dialer_state.new_dial(socket_addr, self.handshake_timeout, version)) + let handshake_timeout = self.handshake_timeout; + let mut client_config = self.quinn_config.client_config.clone(); + if version == ProtocolVersion::Draft29 { + client_config.version(0xff00_001d); + } + Ok(Box::pin(async move { + // This `"l"` seems necessary because an empty string is an invalid domain + // name. While we don't use domain names, the underlying rustls library + // is based upon the assumption that we do. + let connecting = endpoint + .connect_with(client_config, socket_addr, "l") + .map_err(ConnectError)?; + Connecting::new(connecting, handshake_timeout).await + })) } fn dial_as_listener( &mut self, addr: Multiaddr, ) -> Result> { - // TODO: As the listener of a QUIC hole punch, we need to send a random UDP packet to the - // `addr`. See DCUtR specification below. - // - // https://github.com/libp2p/specs/blob/master/relay/DCUtR.md#the-protocol - Err(TransportError::MultiaddrNotSupported(addr)) + let (socket_addr, _version, peer_id) = + self.remote_multiaddr_to_socketaddr(addr.clone(), true)?; + let peer_id = peer_id.ok_or(TransportError::MultiaddrNotSupported(addr.clone()))?; + + let socket = self + .eligible_listener(&socket_addr) + .ok_or(TransportError::Other( + Error::NoActiveListenerForDialAsListener, + ))? + .try_clone_socket() + .map_err(Self::Error::from)?; + + tracing::debug!("Preparing for hole-punch from {addr}"); + + let hole_puncher = hole_puncher::

(socket, socket_addr, self.handshake_timeout); + + let (sender, receiver) = oneshot::channel(); + + match self.hole_punch_attempts.entry(socket_addr) { + Entry::Occupied(mut sender_entry) => { + // Stale senders, i.e. from failed hole punches are not removed. + // Thus, we can just overwrite a stale sender. + if !sender_entry.get().is_canceled() { + return Err(TransportError::Other(Error::HolePunchInProgress( + socket_addr, + ))); + } + sender_entry.insert(sender); + } + Entry::Vacant(entry) => { + entry.insert(sender); + } + }; + + Ok(Box::pin(async move { + futures::pin_mut!(hole_puncher); + match futures::future::select(receiver, hole_puncher).await { + Either::Left((message, _)) => { + let (inbound_peer_id, connection) = message + .expect("hole punch connection sender is never dropped before receiver") + .await?; + if inbound_peer_id != peer_id { + tracing::warn!( + peer=%peer_id, + inbound_peer=%inbound_peer_id, + socket_address=%socket_addr, + "expected inbound connection from socket_address to resolve to peer but got inbound peer" + ); + } + Ok((inbound_peer_id, connection)) + } + Either::Right((hole_punch_err, _)) => Err(hole_punch_err), + } + })) } fn poll( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { - let mut errored = Vec::new(); - for (key, dialer) in &mut self.dialer { - if let Poll::Ready(_error) = dialer.poll(cx) { - errored.push(*key); - } - } - - for key in errored { - // Endpoint driver of dialer crashed. - // Drop dialer and all pending dials so that the connection receiver is notified. - self.dialer.remove(&key); - } + while let Poll::Ready(Some(ev)) = self.listeners.poll_next_unpin(cx) { + match ev { + TransportEvent::Incoming { + listener_id, + mut upgrade, + local_addr, + send_back_addr, + } => { + let socket_addr = + multiaddr_to_socketaddr(&send_back_addr, self.support_draft_29) + .unwrap() + .0; + + if let Some(sender) = self.hole_punch_attempts.remove(&socket_addr) { + match sender.send(upgrade) { + Ok(()) => continue, + Err(timed_out_holepunch) => { + upgrade = timed_out_holepunch; + } + } + } - if let Poll::Ready(Some(ev)) = self.listeners.poll_next_unpin(cx) { - return Poll::Ready(ev); + return Poll::Ready(TransportEvent::Incoming { + listener_id, + upgrade, + local_addr, + send_back_addr, + }); + } + _ => return Poll::Ready(ev), + } } self.waker = Some(cx.waker().clone()); @@ -234,112 +410,22 @@ impl From for TransportError { } } -/// Dialer for addresses if no matching listener exists. -#[derive(Debug)] -struct Dialer { - /// Channel to the [`crate::endpoint::Driver`] that - /// is driving the endpoint. - endpoint_channel: endpoint::Channel, - /// Queued dials for the endpoint. - state: DialerState, -} - -impl Dialer { - fn new( - config: QuinnConfig, - socket_family: SocketFamily, - ) -> Result> { - let endpoint_channel = endpoint::Channel::new_dialer::

(config, socket_family) - .map_err(TransportError::Other)?; - Ok(Dialer { - endpoint_channel, - state: DialerState::default(), - }) - } - - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { - self.state.poll(&mut self.endpoint_channel, cx) - } -} - -impl Drop for Dialer { - fn drop(&mut self) { - self.endpoint_channel.send_on_drop(ToEndpoint::Decoupled); - } -} - -/// Pending dials to be sent to the endpoint was the [`endpoint::Channel`] -/// has capacity -#[derive(Default, Debug)] -struct DialerState { - pending_dials: VecDeque, - waker: Option, -} - -impl DialerState { - fn new_dial( - &mut self, - address: SocketAddr, - timeout: Duration, - version: ProtocolVersion, - ) -> BoxFuture<'static, Result<(PeerId, Connection), Error>> { - let (rx, tx) = oneshot::channel(); - - let message = ToEndpoint::Dial { - addr: address, - result: rx, - version, - }; - - self.pending_dials.push_back(message); - - if let Some(waker) = self.waker.take() { - waker.wake(); - } - - async move { - // Our oneshot getting dropped means the message didn't make it to the endpoint driver. - let connection = tx.await.map_err(|_| Error::EndpointDriverCrashed)??; - let (peer, connection) = Connecting::new(connection, timeout).await?; - - Ok((peer, connection)) - } - .boxed() - } - - /// Send all pending dials into the given [`endpoint::Channel`]. - /// - /// This only ever returns [`Poll::Pending`], or an error in case the channel is closed. - fn poll(&mut self, channel: &mut endpoint::Channel, cx: &mut Context<'_>) -> Poll { - while let Some(to_endpoint) = self.pending_dials.pop_front() { - match channel.try_send(to_endpoint, cx) { - Ok(Ok(())) => {} - Ok(Err(to_endpoint)) => { - self.pending_dials.push_front(to_endpoint); - break; - } - Err(endpoint::Disconnected {}) => return Poll::Ready(Error::EndpointDriverCrashed), - } - } - self.waker = Some(cx.waker().clone()); - Poll::Pending - } -} - /// Listener for incoming connections. struct Listener { /// Id of the listener. listener_id: ListenerId, + /// Version of the supported quic protocol. version: ProtocolVersion, - /// Channel to the endpoint to initiate dials. - endpoint_channel: endpoint::Channel, - /// Queued dials. - dialer_state: DialerState, + /// Endpoint + endpoint: quinn::Endpoint, + + /// An underlying copy of the socket to be able to hole punch with + socket: UdpSocket, - /// Channel where new connections are being sent. - new_connections_rx: mpsc::Receiver, + /// A future to poll new incoming connections. + accept: BoxFuture<'static, Option>, /// Timeout for connection establishment on inbound connections. handshake_timeout: Duration, @@ -356,44 +442,50 @@ struct Listener { /// The stream must be awaken after it has been closed to deliver the last event. close_listener_waker: Option, + + listening_addresses: HashSet, } impl Listener

{ fn new( listener_id: ListenerId, - socket_addr: SocketAddr, - config: QuinnConfig, + socket: UdpSocket, + endpoint: quinn::Endpoint, handshake_timeout: Duration, version: ProtocolVersion, ) -> Result { - let (endpoint_channel, new_connections_rx) = - endpoint::Channel::new_bidirectional::

(config, socket_addr)?; - let if_watcher; let pending_event; - if socket_addr.ip().is_unspecified() { + let mut listening_addresses = HashSet::new(); + let local_addr = socket.local_addr()?; + if local_addr.ip().is_unspecified() { if_watcher = Some(P::new_if_watcher()?); pending_event = None; } else { if_watcher = None; - let ma = socketaddr_to_multiaddr(endpoint_channel.socket_addr(), version); + listening_addresses.insert(local_addr.ip()); + let ma = socketaddr_to_multiaddr(&local_addr, version); pending_event = Some(TransportEvent::NewAddress { listener_id, listen_addr: ma, }) } + let endpoint_c = endpoint.clone(); + let accept = async move { endpoint_c.accept().await }.boxed(); + Ok(Listener { - endpoint_channel, + endpoint, + socket, + accept, listener_id, version, - new_connections_rx, handshake_timeout, if_watcher, is_closed: false, pending_event, - dialer_state: DialerState::default(), close_listener_waker: None, + listening_addresses, }) } @@ -403,6 +495,7 @@ impl Listener

{ if self.is_closed { return; } + self.endpoint.close(From::from(0u32), &[]); self.pending_event = Some(TransportEvent::ListenerClosed { listener_id: self.listener_id, reason, @@ -415,21 +508,34 @@ impl Listener

{ } } + /// Clone underlying socket (for hole punching). + fn try_clone_socket(&self) -> std::io::Result { + self.socket.try_clone() + } + + fn socket_addr(&self) -> SocketAddr { + self.socket + .local_addr() + .expect("Cannot fail because the socket is bound") + } + /// Poll for a next If Event. fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - let if_watcher = match self.if_watcher.as_mut() { - Some(iw) => iw, - None => return Poll::Pending, + let endpoint_addr = self.socket_addr(); + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; loop { match ready!(P::poll_if_event(if_watcher, cx)) { Ok(IfEvent::Up(inet)) => { - if let Some(listen_addr) = ip_to_listenaddr( - self.endpoint_channel.socket_addr(), - inet.addr(), - self.version, - ) { - log::debug!("New listen address: {}", listen_addr); + if let Some(listen_addr) = + ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) + { + tracing::debug!( + address=%listen_addr, + "New listen address" + ); + self.listening_addresses.insert(inet.addr()); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, listen_addr, @@ -437,12 +543,14 @@ impl Listener

{ } } Ok(IfEvent::Down(inet)) => { - if let Some(listen_addr) = ip_to_listenaddr( - self.endpoint_channel.socket_addr(), - inet.addr(), - self.version, - ) { - log::debug!("Expired listen address: {}", listen_addr); + if let Some(listen_addr) = + ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) + { + tracing::debug!( + address=%listen_addr, + "Expired listen address" + ); + self.listening_addresses.remove(&inet.addr()); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, listen_addr, @@ -458,21 +566,10 @@ impl Listener

{ } } } - - /// Poll [`DialerState`] to initiate requested dials. - fn poll_dialer(&mut self, cx: &mut Context<'_>) -> Poll { - let Self { - dialer_state, - endpoint_channel, - .. - } = self; - - dialer_state.poll(endpoint_channel, cx) - } } impl Stream for Listener

{ - type Item = TransportEvent; + type Item = TransportEvent< as Transport>::ListenerUpgrade, Error>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { if let Some(event) = self.pending_event.take() { @@ -484,17 +581,18 @@ impl Stream for Listener

{ if let Poll::Ready(event) = self.poll_if_addr(cx) { return Poll::Ready(Some(event)); } - if let Poll::Ready(error) = self.poll_dialer(cx) { - self.close(Err(error)); - continue; - } - match self.new_connections_rx.poll_next_unpin(cx) { - Poll::Ready(Some(connection)) => { - let local_addr = socketaddr_to_multiaddr(connection.local_addr(), self.version); - let send_back_addr = - socketaddr_to_multiaddr(&connection.remote_addr(), self.version); + + match self.accept.poll_unpin(cx) { + Poll::Ready(Some(connecting)) => { + let endpoint = self.endpoint.clone(); + self.accept = async move { endpoint.accept().await }.boxed(); + + let local_addr = socketaddr_to_multiaddr(&self.socket_addr(), self.version); + let remote_addr = connecting.remote_address(); + let send_back_addr = socketaddr_to_multiaddr(&remote_addr, self.version); + let event = TransportEvent::Incoming { - upgrade: Connecting::new(connection, self.handshake_timeout), + upgrade: Connecting::new(connecting, self.handshake_timeout), local_addr, send_back_addr, listener_id: self.listener_id, @@ -502,7 +600,7 @@ impl Stream for Listener

{ return Poll::Ready(Some(event)); } Poll::Ready(None) => { - self.close(Err(Error::EndpointDriverCrashed)); + self.close(Ok(())); continue; } Poll::Pending => {} @@ -519,9 +617,6 @@ impl fmt::Debug for Listener

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Listener") .field("listener_id", &self.listener_id) - .field("endpoint_channel", &self.endpoint_channel) - .field("dialer_state", &self.dialer_state) - .field("new_connections_rx", &self.new_connections_rx) .field("handshake_timeout", &self.handshake_timeout) .field("is_closed", &self.is_closed) .field("pending_event", &self.pending_event) @@ -529,12 +624,6 @@ impl fmt::Debug for Listener

{ } } -impl Drop for Listener

{ - fn drop(&mut self) { - self.endpoint_channel.send_on_drop(ToEndpoint::Decoupled); - } -} - #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ProtocolVersion { V1, // i.e. RFC9000 @@ -591,15 +680,18 @@ fn ip_to_listenaddr( fn multiaddr_to_socketaddr( addr: &Multiaddr, support_draft_29: bool, -) -> Option<(SocketAddr, ProtocolVersion)> { +) -> Option<(SocketAddr, ProtocolVersion, Option)> { let mut iter = addr.iter(); let proto1 = iter.next()?; let proto2 = iter.next()?; let proto3 = iter.next()?; + let mut peer_id = None; for proto in iter { match proto { - Protocol::P2p(_) => {} // Ignore a `/p2p/...` prefix of possibly outer protocols, if present. + Protocol::P2p(id) => { + peer_id = Some(id); + } _ => return None, } } @@ -611,10 +703,10 @@ fn multiaddr_to_socketaddr( match (proto1, proto2) { (Protocol::Ip4(ip), Protocol::Udp(port)) => { - Some((SocketAddr::new(ip.into(), port), version)) + Some((SocketAddr::new(ip.into(), port), version, peer_id)) } (Protocol::Ip6(ip), Protocol::Udp(port)) => { - Some((SocketAddr::new(ip.into(), port), version)) + Some((SocketAddr::new(ip.into(), port), version, peer_id)) } _ => None, } @@ -624,17 +716,14 @@ fn multiaddr_to_socketaddr( fn is_quic_addr(addr: &Multiaddr, support_draft_29: bool) -> bool { use Protocol::*; let mut iter = addr.iter(); - let first = match iter.next() { - Some(p) => p, - None => return false, + let Some(first) = iter.next() else { + return false; }; - let second = match iter.next() { - Some(p) => p, - None => return false, + let Some(second) = iter.next() else { + return false; }; - let third = match iter.next() { - Some(p) => p, - None => return false, + let Some(third) = iter.next() else { + return false; }; let fourth = iter.next(); let fifth = iter.next(); @@ -647,7 +736,7 @@ fn is_quic_addr(addr: &Multiaddr, support_draft_29: bool) -> bool { matches!(third, QuicV1) } && matches!(fourth, Some(P2p(_)) | None) - && matches!(fifth, None) + && fifth.is_none() } /// Turns an IP address and port into the corresponding QUIC multiaddr. @@ -664,9 +753,8 @@ fn socketaddr_to_multiaddr(socket_addr: &SocketAddr, version: ProtocolVersion) - #[cfg(test)] #[cfg(any(feature = "async-std", feature = "tokio"))] -mod test { +mod tests { use futures::future::poll_fn; - use futures_timer::Delay; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::*; @@ -688,7 +776,8 @@ mod test { ), Some(( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12345,), - ProtocolVersion::V1 + ProtocolVersion::V1, + None )) ); assert_eq!( @@ -700,7 +789,8 @@ mod test { ), Some(( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(255, 255, 255, 255)), 8080,), - ProtocolVersion::V1 + ProtocolVersion::V1, + None )) ); assert_eq!( @@ -712,7 +802,7 @@ mod test { Some((SocketAddr::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 55148, - ), ProtocolVersion::V1)) + ), ProtocolVersion::V1, Some("12D3KooW9xk7Zp1gejwfwNpfm6L9zH5NL4Bx5rm94LRYJJHJuARZ".parse().unwrap()))) ); assert_eq!( multiaddr_to_socketaddr( @@ -721,7 +811,8 @@ mod test { ), Some(( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 12345,), - ProtocolVersion::V1 + ProtocolVersion::V1, + None )) ); assert_eq!( @@ -738,7 +829,8 @@ mod test { )), 8080, ), - ProtocolVersion::V1 + ProtocolVersion::V1, + None )) ); @@ -754,7 +846,8 @@ mod test { ), Some(( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1234,), - ProtocolVersion::Draft29 + ProtocolVersion::Draft29, + None )) ); } @@ -772,19 +865,11 @@ mod test { // Run test twice to check that there is no unexpected behaviour if `Transport.listener` // is temporarily empty. for _ in 0..2 { - let id = transport - .listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap()) + let id = ListenerId::next(); + transport + .listen_on(id, "/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap()) .unwrap(); - // Copy channel to use it later. - let mut channel = transport - .listeners - .iter() - .next() - .unwrap() - .endpoint_channel - .clone(); - match poll_fn(|cx| Pin::new(&mut transport).as_mut().poll(cx)).await { TransportEvent::NewAddress { listener_id, @@ -817,14 +902,6 @@ mod test { .now_or_never() .is_none()); assert!(transport.listeners.is_empty()); - - // Check that the [`Driver`] has shut down. - Delay::new(Duration::from_millis(10)).await; - poll_fn(|cx| { - assert!(channel.try_send(ToEndpoint::Decoupled, cx).is_err()); - Poll::Ready(()) - }) - .await; } } @@ -839,44 +916,41 @@ mod test { .dial("/ip4/123.45.67.8/udp/1234/quic-v1".parse().unwrap()) .unwrap(); - // Expect a dialer and its background task to exist. - let mut channel = transport - .dialer - .get(&SocketFamily::Ipv4) - .unwrap() - .endpoint_channel - .clone(); + assert!(transport.dialer.contains_key(&SocketFamily::Ipv4)); assert!(!transport.dialer.contains_key(&SocketFamily::Ipv6)); - // Send dummy dial to check that the endpoint driver is running. - poll_fn(|cx| { - let (tx, _) = oneshot::channel(); - let _ = channel - .try_send( - ToEndpoint::Dial { - addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), - result: tx, - version: ProtocolVersion::V1, - }, - cx, - ) - .unwrap(); - Poll::Ready(()) - }) - .await; - // Start listening so that the dialer and driver are dropped. - let _ = transport - .listen_on("/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap()) + transport + .listen_on( + ListenerId::next(), + "/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap(), + ) .unwrap(); assert!(!transport.dialer.contains_key(&SocketFamily::Ipv4)); + } - // Check that the [`Driver`] has shut down. - Delay::new(Duration::from_millis(10)).await; - poll_fn(|cx| { - assert!(channel.try_send(ToEndpoint::Decoupled, cx).is_err()); - Poll::Ready(()) - }) - .await; + #[cfg(feature = "tokio")] + #[tokio::test] + async fn test_listens_ipv4_ipv6_separately() { + let keypair = libp2p_identity::Keypair::generate_ed25519(); + let config = Config::new(&keypair); + let mut transport = crate::tokio::Transport::new(config); + let port = { + let socket = UdpSocket::bind("127.0.0.1:0").unwrap(); + socket.local_addr().unwrap().port() + }; + + transport + .listen_on( + ListenerId::next(), + format!("/ip4/0.0.0.0/udp/{port}/quic-v1").parse().unwrap(), + ) + .unwrap(); + transport + .listen_on( + ListenerId::next(), + format!("/ip6/::/udp/{port}/quic-v1").parse().unwrap(), + ) + .unwrap(); } } diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index a576d3c9ef5..36fb72a5ee7 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -26,6 +26,7 @@ use std::{ pin::Pin, sync::{Arc, Mutex}, }; +use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] #[tokio::test] @@ -39,28 +40,12 @@ async fn async_std_smoke() { smoke::().await } -#[cfg(feature = "async-std")] -#[async_std::test] -async fn dial_failure() { - let _ = env_logger::try_init(); - let mut a = create_default_transport::().1; - let mut b = create_default_transport::().1; - - let addr = start_listening(&mut a, "/ip4/127.0.0.1/udp/0/quic-v1").await; - drop(a); // stop a so b can never reach it - - match dial(&mut b, addr).await { - Ok(_) => panic!("Expected dial to fail"), - Err(error) => { - assert_eq!("Handshake with the remote timed out.", error.to_string()) - } - }; -} - #[cfg(feature = "tokio")] #[tokio::test] async fn endpoint_reuse() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_default_transport::(); let (_, mut b_transport) = create_default_transport::(); @@ -85,7 +70,9 @@ async fn endpoint_reuse() { #[cfg(feature = "async-std")] #[async_std::test] async fn ipv4_dial_ipv6() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::(); let (b_peer_id, mut b_transport) = create_default_transport::(); @@ -103,7 +90,9 @@ async fn ipv4_dial_ipv6() { #[cfg(feature = "async-std")] #[async_std::test] async fn wrapped_with_delay() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); struct DialDelay(Arc>>); @@ -115,9 +104,10 @@ async fn wrapped_with_delay() { fn listen_on( &mut self, + id: ListenerId, addr: Multiaddr, - ) -> Result> { - self.0.lock().unwrap().listen_on(addr) + ) -> Result<(), TransportError> { + self.0.lock().unwrap().listen_on(id, addr) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -214,7 +204,7 @@ async fn wrong_peerid() { let (b_peer_id, mut b_transport) = create_default_transport::(); let a_addr = start_listening(&mut a_transport, "/ip6/::1/udp/0/quic-v1").await; - let a_addr_random_peer = a_addr.with(Protocol::P2p(PeerId::random().into())); + let a_addr_random_peer = a_addr.with(Protocol::P2p(PeerId::random())); let ((a_connected, _, _), (b_connected, _)) = connect(&mut a_transport, &mut b_transport, a_addr_random_peer).await; @@ -270,7 +260,9 @@ async fn tcp_and_quic() { #[cfg(feature = "async-std")] #[test] fn concurrent_connections_and_streams_async_std() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); quickcheck::QuickCheck::new() .min_tests_passed(1) @@ -281,7 +273,9 @@ fn concurrent_connections_and_streams_async_std() { #[cfg(feature = "tokio")] #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -298,7 +292,9 @@ async fn draft_29_support() { use futures::{future::poll_fn, select}; use libp2p_core::transport::TransportError; - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); @@ -327,7 +323,10 @@ async fn draft_29_support() { let (_, mut d_transport) = create_transport::(|cfg| cfg.support_draft_29 = false); assert!(matches!( - d_transport.listen_on("/ip4/127.0.0.1/udp/0/quic".parse().unwrap()), + d_transport.listen_on( + ListenerId::next(), + "/ip4/127.0.0.1/udp/0/quic".parse().unwrap() + ), Err(TransportError::MultiaddrNotSupported(_)) )); let d_quic_v1_addr = start_listening(&mut d_transport, "/ip4/127.0.0.1/udp/0/quic-v1").await; @@ -356,7 +355,9 @@ async fn draft_29_support() { #[cfg(feature = "async-std")] #[async_std::test] async fn backpressure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let max_stream_data = quic::Config::new(&generate_tls_keypair()).max_stream_data; let (mut stream_a, mut stream_b) = build_streams::().await; @@ -380,7 +381,9 @@ async fn backpressure() { #[cfg(feature = "async-std")] #[async_std::test] async fn read_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (mut stream_a, mut stream_b) = build_streams::().await; let data = vec![0; 10]; @@ -400,7 +403,9 @@ async fn read_after_peer_dropped_stream() { #[async_std::test] #[should_panic] async fn write_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (stream_a, mut stream_b) = build_streams::().await; drop(stream_a); futures_timer::Delay::new(Duration::from_millis(1)).await; @@ -410,8 +415,53 @@ async fn write_after_peer_dropped_stream() { stream_b.close().await.expect("Close failed."); } +/// - A listens on 0.0.0.0:0 +/// - B listens on 127.0.0.1:0 +/// - A dials B +/// - Source port of A at B is the A's listen port +#[cfg(feature = "tokio")] +#[tokio::test] +async fn test_local_listener_reuse() { + let (_, mut a_transport) = create_default_transport::(); + let (_, mut b_transport) = create_default_transport::(); + + a_transport + .listen_on( + ListenerId::next(), + "/ip4/0.0.0.0/udp/0/quic-v1".parse().unwrap(), + ) + .unwrap(); + + // wait until a listener reports a loopback address + let a_listen_addr = 'outer: loop { + let ev = a_transport.next().await.unwrap(); + let listen_addr = ev.into_new_address().unwrap(); + for proto in listen_addr.iter() { + if let Protocol::Ip4(ip4) = proto { + if ip4.is_loopback() { + break 'outer listen_addr; + } + } + } + }; + // If we do not poll until the end, `NewAddress` events may be `Ready` and `connect` function + // below will panic due to an unexpected event. + poll_fn(|cx| { + let mut pinned = Pin::new(&mut a_transport); + while pinned.as_mut().poll(cx).is_ready() {} + Poll::Ready(()) + }) + .await; + + let b_addr = start_listening(&mut b_transport, "/ip4/127.0.0.1/udp/0/quic-v1").await; + let (_, send_back_addr, _) = connect(&mut b_transport, &mut a_transport, b_addr).await.0; + assert_eq!(send_back_addr, a_listen_addr); +} + async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::

(); let (b_peer_id, mut b_transport) = create_default_transport::

(); @@ -424,7 +474,7 @@ async fn smoke() { assert_eq!(b_connected, a_peer_id); } -async fn build_streams() -> (SubstreamBox, SubstreamBox) { +async fn build_streams() -> (SubstreamBox, SubstreamBox) { let (_, mut a_transport) = create_default_transport::

(); let (_, mut b_transport) = create_default_transport::

(); @@ -509,14 +559,16 @@ fn create_transport( } async fn start_listening(transport: &mut Boxed<(PeerId, StreamMuxerBox)>, addr: &str) -> Multiaddr { - transport.listen_on(addr.parse().unwrap()).unwrap(); + transport + .listen_on(ListenerId::next(), addr.parse().unwrap()) + .unwrap(); match transport.next().await { Some(TransportEvent::NewAddress { listen_addr, .. }) => listen_addr, e => panic!("{e:?}"), } } -fn prop( +fn prop( number_listeners: NonZeroU8, number_streams: NonZeroU8, ) -> quickcheck::TestResult { @@ -531,7 +583,11 @@ fn prop( let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -593,19 +649,17 @@ fn prop( quickcheck::TestResult::passed() } -async fn answer_inbound_streams( +async fn answer_inbound_streams( mut connection: StreamMuxerBox, ) { loop { - let mut inbound_stream = match future::poll_fn(|cx| { + let Ok(mut inbound_stream) = future::poll_fn(|cx| { let _ = connection.poll_unpin(cx)?; - connection.poll_inbound_unpin(cx) }) .await - { - Ok(s) => s, - Err(_) => return, + else { + return; }; P::spawn(async move { @@ -628,7 +682,7 @@ async fn answer_inbound_streams( } } -async fn open_outbound_streams( +async fn open_outbound_streams( mut connection: StreamMuxerBox, number_streams: usize, completed_streams_tx: mpsc::Sender<()>, @@ -672,7 +726,10 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!( + stream_count=%number_streams, + "Created streams" + ); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await @@ -681,6 +738,7 @@ async fn open_outbound_streams( } /// Helper function for driving two transports until they established a connection. +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. async fn connect( listener: &mut Boxed<(PeerId, StreamMuxerBox)>, dialer: &mut Boxed<(PeerId, StreamMuxerBox)>, @@ -734,3 +792,22 @@ impl BlockOn for libp2p_quic::tokio::Provider { .unwrap() } } + +trait Spawn { + /// Run the given future in the background until it ends. + fn spawn(future: impl Future + Send + 'static); +} + +#[cfg(feature = "async-std")] +impl Spawn for libp2p_quic::async_std::Provider { + fn spawn(future: impl Future + Send + 'static) { + async_std::task::spawn(future); + } +} + +#[cfg(feature = "tokio")] +impl Spawn for libp2p_quic::tokio::Provider { + fn spawn(future: impl Future + Send + 'static) { + tokio::spawn(future); + } +} diff --git a/transports/quic/tests/stream_compliance.rs b/transports/quic/tests/stream_compliance.rs index ec4c3121819..0eff0584588 100644 --- a/transports/quic/tests/stream_compliance.rs +++ b/transports/quic/tests/stream_compliance.rs @@ -1,5 +1,6 @@ use futures::channel::oneshot; use futures::StreamExt; +use libp2p_core::transport::ListenerId; use libp2p_core::Transport; use libp2p_quic as quic; use std::time::Duration; @@ -23,7 +24,10 @@ async fn connected_peers() -> (quic::Connection, quic::Connection) { let mut listener = new_transport().boxed(); listener - .listen_on("/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap()) + .listen_on( + ListenerId::next(), + "/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap(), + ) .unwrap(); let listen_address = listener.next().await.unwrap().into_new_address().unwrap(); diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index eeaedc0706d..2bde64056cb 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,9 +1,20 @@ -## 0.40.0 - unreleased +## 0.41.0 + + +## 0.40.1 + +- Expose `async_io::TcpStream`. + See [PR 4683](https://github.com/libp2p/rust-libp2p/pull/4683). + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove deprecated items. See [PR 3978]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3978]: https://github.com/libp2p/rust-libp2p/pull/3978 ## 0.39.0 diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index b093560310c..52e38e24b6e 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-tcp" edition = "2021" rust-version = { workspace = true } description = "TCP/IP transport protocol for libp2p" -version = "0.40.0" +version = "0.41.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,16 +11,16 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "1.13.0", optional = true } -futures = "0.3.28" +async-io = { version = "2.2.2", optional = true } +futures = "0.3.30" futures-timer = "3.0" -if-watch = "3.0.1" -libc = "0.2.142" +if-watch = "3.2.0" +libc = "0.2.151" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.11" -socket2 = { version = "0.5.2", features = ["all"] } -tokio = { version = "1.28.0", default-features = false, features = ["net"], optional = true } +socket2 = { version = "0.5.5", features = ["all"] } +tokio = { version = "1.35.1", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -28,12 +28,17 @@ async-io = ["dep:async-io", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.6.5", features = ["attributes"] } -tokio = { version = "1.28.0", default-features = false, features = ["full"] } -env_logger = "0.10.0" +libp2p-identity = { workspace = true, features = ["rand"] } +tokio = { version = "1.35.1", default-features = false, features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true + rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 78f6c3f4656..fbb7008aa5b 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -98,7 +98,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn register(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Registering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Registering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -111,7 +111,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn unregister(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Unregistering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Unregistering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -248,7 +248,7 @@ impl Config { /// let listen_addr2: Multiaddr = "/ip4/127.0.0.1/tcp/9002".parse().unwrap(); /// /// let mut tcp1 = libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::new().port_reuse(true)).boxed(); - /// tcp1.listen_on( listen_addr1.clone()).expect("listener"); + /// tcp1.listen_on(ListenerId::next(), listen_addr1.clone()).expect("listener"); /// match tcp1.select_next_some().await { /// TransportEvent::NewAddress { listen_addr, .. } => { /// println!("Listening on {:?}", listen_addr); @@ -259,7 +259,7 @@ impl Config { /// } /// /// let mut tcp2 = libp2p_tcp::async_io::Transport::new(libp2p_tcp::Config::new().port_reuse(true)).boxed(); - /// tcp2.listen_on( listen_addr2).expect("listener"); + /// tcp2.listen_on(ListenerId::next(), listen_addr2).expect("listener"); /// match tcp2.select_next_some().await { /// TransportEvent::NewAddress { listen_addr, .. } => { /// println!("Listening on {:?}", listen_addr); @@ -346,13 +346,12 @@ where } } - fn create_socket(&self, socket_addr: &SocketAddr) -> io::Result { - let domain = if socket_addr.is_ipv4() { - Domain::IPV4 - } else { - Domain::IPV6 - }; - let socket = Socket::new(domain, Type::STREAM, Some(socket2::Protocol::TCP))?; + fn create_socket(&self, socket_addr: SocketAddr) -> io::Result { + let socket = Socket::new( + Domain::for_address(socket_addr), + Type::STREAM, + Some(socket2::Protocol::TCP), + )?; if socket_addr.is_ipv6() { socket.set_only_v6(true)?; } @@ -375,7 +374,7 @@ where id: ListenerId, socket_addr: SocketAddr, ) -> io::Result> { - let socket = self.create_socket(&socket_addr)?; + let socket = self.create_socket(socket_addr)?; socket.bind(&socket_addr.into())?; socket.listen(self.config.backlog as _)?; socket.set_nonblocking(true)?; @@ -437,19 +436,19 @@ where type Dial = Pin> + Send>>; type ListenerUpgrade = Ready>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let socket_addr = if let Ok(sa) = multiaddr_to_socketaddr(addr.clone()) { - sa - } else { - return Err(TransportError::MultiaddrNotSupported(addr)); - }; - let id = ListenerId::new(); - log::debug!("listening on {}", socket_addr); + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + let socket_addr = multiaddr_to_socketaddr(addr.clone()) + .map_err(|_| TransportError::MultiaddrNotSupported(addr))?; + tracing::debug!("listening on {}", socket_addr); let listener = self .do_listen(id, socket_addr) .map_err(TransportError::Other)?; self.listeners.push(listener); - Ok(id) + Ok(()) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -470,14 +469,14 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("dialing {}", socket_addr); + tracing::debug!(address=%socket_addr, "dialing address"); let socket = self - .create_socket(&socket_addr) + .create_socket(socket_addr) .map_err(TransportError::Other)?; if let Some(addr) = self.port_reuse.local_dial_addr(&socket_addr.ip()) { - log::trace!("Binding dial socket to listen socket {}", addr); + tracing::trace!(address=%addr, "Binding dial socket to listen socket address"); socket.bind(&addr.into()).map_err(TransportError::Other)?; } @@ -536,6 +535,7 @@ where } /// Poll all listeners. + #[tracing::instrument(level = "trace", name = "Transport::poll", skip(self, cx))] fn poll( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -662,9 +662,8 @@ where /// Poll for a next If Event. fn poll_if_addr(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - let if_watcher = match self.if_watcher.as_mut() { - Some(if_watcher) => if_watcher, - None => return Poll::Pending, + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; let my_listen_addr_port = self.listen_addr.port(); @@ -675,7 +674,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("New listen address: {}", ma); + tracing::debug!(address=%ma, "New listen address"); self.port_reuse.register(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -687,7 +686,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("Expired listen address: {}", ma); + tracing::debug!(address=%ma, "Expired listen address"); self.port_reuse.unregister(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, @@ -760,7 +759,11 @@ where let local_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); let remote_addr = ip_to_multiaddr(remote_addr.ip(), remote_addr.port()); - log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + tracing::debug!( + remote_address=%remote_addr, + local_address=%local_addr, + "Incoming connection from remote at local" + ); return Poll::Ready(Some(TransportEvent::Incoming { listener_id: self.listener_id, @@ -837,20 +840,6 @@ fn is_tcp_addr(addr: &Multiaddr) -> bool { matches!(first, Ip4(_) | Ip6(_) | Dns(_) | Dns4(_) | Dns6(_)) && matches!(second, Tcp(_)) } -/// The configuration for a TCP/IP transport capability for libp2p. -#[deprecated(since = "0.37.0", note = "Use `Config` instead.")] -pub type GenTcpConfig = Config; - -/// The type of a [`Transport`](libp2p_core::Transport) using the `async-io` implementation. -#[cfg(feature = "async-io")] -#[deprecated(since = "0.37.0", note = "Use `async_io::Transport` instead.")] -pub type TcpTransport = Transport; - -/// The type of a [`Transport`](libp2p_core::Transport) using the `tokio` implementation. -#[cfg(feature = "tokio")] -#[deprecated(since = "0.37.0", note = "Use `tokio::Transport` instead.")] -pub type TokioTcpTransport = Transport; - #[cfg(test)] mod tests { use super::*; @@ -912,11 +901,13 @@ mod tests { #[test] fn communicating_between_dialer_and_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); loop { match tcp.select_next_some().await { TransportEvent::NewAddress { listen_addr, .. } => { @@ -981,11 +972,13 @@ mod tests { #[test] fn wildcard_expansion() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); loop { match tcp.select_next_some().await { @@ -1050,7 +1043,9 @@ mod tests { #[test] fn port_reuse_dialing() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener( addr: Multiaddr, @@ -1058,7 +1053,7 @@ mod tests { port_reuse_rx: oneshot::Receiver>, ) { let mut tcp = Transport::::new(Config::new()).boxed(); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); loop { match tcp.select_next_some().await { TransportEvent::NewAddress { listen_addr, .. } => { @@ -1093,7 +1088,7 @@ mod tests { ) { let dest_addr = ready_rx.next().await.unwrap(); let mut tcp = Transport::::new(Config::new().port_reuse(true)); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); match poll_fn(|cx| Pin::new(&mut tcp).poll(cx)).await { TransportEvent::NewAddress { .. } => { // Check that tcp and listener share the same port reuse SocketAddr @@ -1157,11 +1152,13 @@ mod tests { #[test] fn port_reuse_listening() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen_twice(addr: Multiaddr) { let mut tcp = Transport::::new(Config::new().port_reuse(true)); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); match poll_fn(|cx| Pin::new(&mut tcp).poll(cx)).await { TransportEvent::NewAddress { listen_addr: addr1, .. @@ -1176,7 +1173,7 @@ mod tests { assert_eq!(port_reuse_tcp, port_reuse_listener1); // Listen on the same address a second time. - tcp.listen_on(addr1.clone()).unwrap(); + tcp.listen_on(ListenerId::next(), addr1.clone()).unwrap(); match poll_fn(|cx| Pin::new(&mut tcp).poll(cx)).await { TransportEvent::NewAddress { listen_addr: addr2, .. @@ -1211,11 +1208,13 @@ mod tests { #[test] fn listen_port_0() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen(addr: Multiaddr) -> Multiaddr { let mut tcp = Transport::::default().boxed(); - tcp.listen_on(addr).unwrap(); + tcp.listen_on(ListenerId::next(), addr).unwrap(); tcp.select_next_some() .await .into_new_address() @@ -1246,19 +1245,21 @@ mod tests { #[test] fn listen_invalid_addr() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); fn test(addr: Multiaddr) { #[cfg(feature = "async-io")] { let mut tcp = async_io::Transport::default(); - assert!(tcp.listen_on(addr.clone()).is_err()); + assert!(tcp.listen_on(ListenerId::next(), addr.clone()).is_err()); } #[cfg(feature = "tokio")] { let mut tcp = tokio::Transport::default(); - assert!(tcp.listen_on(addr).is_err()); + assert!(tcp.listen_on(ListenerId::next(), addr).is_err()); } } @@ -1291,7 +1292,7 @@ mod tests { let tcp_observed_addr = Multiaddr::empty() .with(Protocol::Ip4(observed_ip)) .with(Protocol::Tcp(1)) - .with(Protocol::P2p(PeerId::random().into())); + .with(Protocol::P2p(PeerId::random())); let translated = transport .address_translation(&tcp_listen_addr, &tcp_observed_addr) @@ -1316,12 +1317,14 @@ mod tests { #[test] fn test_remove_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn cycle_listeners() -> bool { let mut tcp = Transport::::default().boxed(); - let listener_id = tcp - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + let listener_id = ListenerId::next(); + tcp.listen_on(listener_id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()) .unwrap(); tcp.remove_listener(listener_id) } @@ -1340,4 +1343,42 @@ mod tests { assert!(rt.block_on(cycle_listeners::())); } } + + #[test] + fn test_listens_ipv4_ipv6_separately() { + fn test() { + let port = { + let listener = TcpListener::bind("127.0.0.1:0").unwrap(); + listener.local_addr().unwrap().port() + }; + let mut tcp = Transport::::default().boxed(); + let listener_id = ListenerId::next(); + tcp.listen_on( + listener_id, + format!("/ip4/0.0.0.0/tcp/{port}").parse().unwrap(), + ) + .unwrap(); + tcp.listen_on( + ListenerId::next(), + format!("/ip6/::/tcp/{port}").parse().unwrap(), + ) + .unwrap(); + } + #[cfg(feature = "async-io")] + { + async_std::task::block_on(async { + test::(); + }) + } + #[cfg(feature = "tokio")] + { + let rt = ::tokio::runtime::Builder::new_current_thread() + .enable_io() + .build() + .unwrap(); + rt.block_on(async { + test::(); + }); + } + } } diff --git a/transports/tcp/src/provider/async_io.rs b/transports/tcp/src/provider/async_io.rs index 590f109d3c3..fe0abe42d54 100644 --- a/transports/tcp/src/provider/async_io.rs +++ b/transports/tcp/src/provider/async_io.rs @@ -32,14 +32,15 @@ use std::task::{Context, Poll}; /// /// ```rust /// # use libp2p_tcp as tcp; -/// # use libp2p_core::Transport; +/// # use libp2p_core::{Transport, transport::ListenerId}; /// # use futures::future; /// # use std::pin::Pin; /// # /// # #[async_std::main] /// # async fn main() { /// let mut transport = tcp::async_io::Transport::new(tcp::Config::default()); -/// let id = transport.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// let id = ListenerId::next(); +/// transport.listen_on(id, "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); /// /// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); /// @@ -53,7 +54,7 @@ pub type Transport = crate::Transport; pub enum Tcp {} impl Provider for Tcp { - type Stream = Async; + type Stream = TcpStream; type Listener = Async; type IfWatcher = if_watch::smol::IfWatcher; @@ -115,3 +116,5 @@ impl Provider for Tcp { })) } } + +pub type TcpStream = Async; diff --git a/transports/tcp/src/provider/tokio.rs b/transports/tcp/src/provider/tokio.rs index e4b75c8d814..b991c6bdae1 100644 --- a/transports/tcp/src/provider/tokio.rs +++ b/transports/tcp/src/provider/tokio.rs @@ -36,14 +36,14 @@ use std::task::{Context, Poll}; /// /// ```rust /// # use libp2p_tcp as tcp; -/// # use libp2p_core::Transport; +/// # use libp2p_core::{Transport, transport::ListenerId}; /// # use futures::future; /// # use std::pin::Pin; /// # /// # #[tokio::main] /// # async fn main() { /// let mut transport = tcp::tokio::Transport::new(tcp::Config::default()); -/// let id = transport.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); +/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0".parse().unwrap()).unwrap(); /// /// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); /// diff --git a/transports/tls/CHANGELOG.md b/transports/tls/CHANGELOG.md index 6e55e7f0864..83f72286559 100644 --- a/transports/tls/CHANGELOG.md +++ b/transports/tls/CHANGELOG.md @@ -1,4 +1,18 @@ -## 0.2.0 - unreleased +## 0.3.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.2.1 + +- Switch from webpki to rustls-webpki. + This is a part of the resolution of the [RUSTSEC-2023-0052]. + See [PR 4381]. + +[PR 4381]: https://github.com/libp2p/rust-libp2p/pull/4381 +[RUSTSEC-2023-0052]: https://rustsec.org/advisories/RUSTSEC-2023-0052.html + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index f7cfc01444c..2f8cc0e5880 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-tls" -version = "0.2.0" +version = "0.3.0" edition = "2021" rust-version = { workspace = true } description = "TLS configuration based on libp2p TLS specs." @@ -9,20 +9,20 @@ license = "MIT" exclude = ["src/test_assets"] [dependencies] -futures = { version = "0.3.28", default-features = false } -futures-rustls = "0.22.2" +futures = { version = "0.3.30", default-features = false } +futures-rustls = "0.24.0" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -rcgen = "0.10.0" +rcgen = "0.11.3" ring = "0.16.20" -thiserror = "1.0.40" -webpki = "0.22.0" -x509-parser = "0.15.0" +thiserror = "1.0.51" +webpki = { version = "0.101.4", package = "rustls-webpki", features = ["std"] } +x509-parser = "0.15.1" yasna = "0.5.2" # Exposed dependencies. Breaking changes to these are breaking changes to us. [dependencies.rustls] -version = "0.20.7" +version = "0.21.9" default-features = false features = ["dangerous_configuration"] # Must enable this to allow for custom verification code. @@ -30,10 +30,10 @@ features = ["dangerous_configuration"] # Must enable this to allow for custom ve hex = "0.4.3" hex-literal = "0.4.1" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa"] } -libp2p-swarm = { workspace = true } +libp2p-identity = { workspace = true, features = ["ed25519", "rsa", "secp256k1", "ecdsa", "rand"] } +libp2p-swarm = { workspace = true, features = ["tokio"] } libp2p-yamux = { workspace = true } -tokio = { version = "1.28.0", features = ["full"] } +tokio = { version = "1.35.1", features = ["full"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -41,3 +41,6 @@ tokio = { version = "1.28.0", features = ["full"] } all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/tls/src/certificate.rs b/transports/tls/src/certificate.rs index 8531ade72fa..801ba3fe3ce 100644 --- a/transports/tls/src/certificate.rs +++ b/transports/tls/src/certificate.rs @@ -27,7 +27,7 @@ use libp2p_identity::PeerId; use x509_parser::{prelude::*, signature_algorithm::SignatureAlgorithm}; /// The libp2p Public Key Extension is a X.509 extension -/// with the Object Identier 1.3.6.1.4.1.53594.1.1, +/// with the Object Identifier 1.3.6.1.4.1.53594.1.1, /// allocated by IANA to the libp2p project at Protocol Labs. const P2P_EXT_OID: [u64; 9] = [1, 3, 6, 1, 4, 1, 53594, 1, 1]; @@ -286,7 +286,7 @@ impl P2pCertificate<'_> { // In particular, MD5 and SHA1 MUST NOT be used. RSA_PKCS1_SHA1 => return Err(webpki::Error::UnsupportedSignatureAlgorithm), ECDSA_SHA1_Legacy => return Err(webpki::Error::UnsupportedSignatureAlgorithm), - Unknown(_) => return Err(webpki::Error::UnsupportedSignatureAlgorithm), + _ => return Err(webpki::Error::UnsupportedSignatureAlgorithm), }; let spki = &self.certificate.tbs_certificate.subject_pki; let key = signature::UnparsedPublicKey::new( @@ -374,7 +374,7 @@ impl P2pCertificate<'_> { } if signature_algorithm.algorithm == OID_PKCS1_RSASSAPSS { // According to https://datatracker.ietf.org/doc/html/rfc4055#section-3.1: - // Inside of params there shuld be a sequence of: + // Inside of params there should be a sequence of: // - Hash Algorithm // - Mask Algorithm // - Salt Length diff --git a/transports/tls/src/lib.rs b/transports/tls/src/lib.rs index 00a4878856d..1edd83e9807 100644 --- a/transports/tls/src/lib.rs +++ b/transports/tls/src/lib.rs @@ -54,7 +54,7 @@ pub fn make_client_config( .with_custom_certificate_verifier(Arc::new( verifier::Libp2pCertificateVerifier::with_remote_peer_id(remote_peer_id), )) - .with_single_cert(vec![certificate], private_key) + .with_client_auth_cert(vec![certificate], private_key) .expect("Client cert key DER is valid; qed"); crypto.alpn_protocols = vec![P2P_ALPN.to_vec()]; diff --git a/transports/tls/src/upgrade.rs b/transports/tls/src/upgrade.rs index bf64ce61505..84510b6bab0 100644 --- a/transports/tls/src/upgrade.rs +++ b/transports/tls/src/upgrade.rs @@ -24,7 +24,8 @@ use futures::future::BoxFuture; use futures::AsyncWrite; use futures::{AsyncRead, FutureExt}; use futures_rustls::TlsStream; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use rustls::{CommonState, ServerName}; @@ -67,7 +68,7 @@ impl UpgradeInfo for Config { } } -impl InboundUpgrade for Config +impl InboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -90,7 +91,7 @@ where } } -impl OutboundUpgrade for Config +impl OutboundConnectionUpgrade for Config where C: AsyncRead + AsyncWrite + Send + Unpin + 'static, { @@ -120,12 +121,8 @@ where fn extract_single_certificate( state: &CommonState, ) -> Result, certificate::ParseError> { - let cert = match state - .peer_certificates() - .expect("config enforces presence of certificates") - { - [single] => single, - _ => panic!("config enforces exactly one certificate"), + let Some([cert]) = state.peer_certificates() else { + panic!("config enforces exactly one certificate"); }; certificate::parse(cert) diff --git a/transports/tls/src/verifier.rs b/transports/tls/src/verifier.rs index a9d9aecaa65..01fdb8fdf11 100644 --- a/transports/tls/src/verifier.rs +++ b/transports/tls/src/verifier.rs @@ -30,11 +30,11 @@ use rustls::{ TLS13_AES_128_GCM_SHA256, TLS13_AES_256_GCM_SHA384, TLS13_CHACHA20_POLY1305_SHA256, }, client::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, - internal::msgs::handshake::DigitallySignedStruct, server::{ClientCertVerified, ClientCertVerifier}, - Certificate, DistinguishedNames, SignatureScheme, SupportedCipherSuite, - SupportedProtocolVersion, + Certificate, CertificateError, DigitallySignedStruct, DistinguishedName, SignatureScheme, + SupportedCipherSuite, SupportedProtocolVersion, }; +use std::sync::Arc; /// The protocol versions supported by this verifier. /// @@ -118,8 +118,8 @@ impl ServerCertVerifier for Libp2pCertificateVerifier { // the certificate matches the peer ID they intended to connect to, // and MUST abort the connection if there is a mismatch. if remote_peer_id != peer_id { - return Err(rustls::Error::PeerMisbehavedError( - "Wrong peer ID in p2p extension".to_string(), + return Err(rustls::Error::InvalidCertificate( + CertificateError::ApplicationVerificationFailure, )); } } @@ -162,8 +162,8 @@ impl ClientCertVerifier for Libp2pCertificateVerifier { true } - fn client_auth_root_subjects(&self) -> Option { - Some(vec![]) + fn client_auth_root_subjects(&self) -> &[DistinguishedName] { + &[] } fn verify_client_cert( @@ -236,8 +236,8 @@ impl From for rustls::Error { fn from(certificate::ParseError(e): certificate::ParseError) -> Self { use webpki::Error::*; match e { - BadDer => rustls::Error::InvalidCertificateEncoding, - e => rustls::Error::InvalidCertificateData(format!("invalid peer certificate: {e}")), + BadDer => rustls::Error::InvalidCertificate(CertificateError::BadEncoding), + e => rustls::Error::InvalidCertificate(CertificateError::Other(Arc::new(e))), } } } @@ -245,11 +245,10 @@ impl From for rustls::Error { fn from(certificate::VerificationError(e): certificate::VerificationError) -> Self { use webpki::Error::*; match e { - InvalidSignatureForPublicKey => rustls::Error::InvalidCertificateSignature, - UnsupportedSignatureAlgorithm | UnsupportedSignatureAlgorithmForPublicKey => { - rustls::Error::InvalidCertificateSignatureType + InvalidSignatureForPublicKey => { + rustls::Error::InvalidCertificate(CertificateError::BadSignature) } - e => rustls::Error::InvalidCertificateData(format!("invalid peer certificate: {e}")), + other => rustls::Error::InvalidCertificate(CertificateError::Other(Arc::new(other))), } } } diff --git a/transports/tls/tests/smoke.rs b/transports/tls/tests/smoke.rs index 17aa959c4b2..d488ae7846a 100644 --- a/transports/tls/tests/smoke.rs +++ b/transports/tls/tests/smoke.rs @@ -3,7 +3,8 @@ use libp2p_core::multiaddr::Protocol; use libp2p_core::transport::MemoryTransport; use libp2p_core::upgrade::Version; use libp2p_core::Transport; -use libp2p_swarm::{keep_alive, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p_swarm::{dummy, Config, Swarm, SwarmEvent}; +use std::time::Duration; #[tokio::test] async fn can_establish_connection() { @@ -55,7 +56,7 @@ async fn can_establish_connection() { assert_eq!(&outbound_peer_id, swarm1.local_peer_id()); } -fn make_swarm() -> Swarm { +fn make_swarm() -> Swarm { let identity = libp2p_identity::Keypair::generate_ed25519(); let transport = MemoryTransport::default() @@ -64,10 +65,10 @@ fn make_swarm() -> Swarm { .multiplex(libp2p_yamux::Config::default()) .boxed(); - SwarmBuilder::without_executor( + Swarm::new( transport, - keep_alive::Behaviour, + dummy::Behaviour, identity.public().to_peer_id(), + Config::with_tokio_executor().with_idle_connection_timeout(Duration::from_secs(60)), ) - .build() } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 72858840fd1..aad61d21547 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.39.0 - unreleased +## 0.40.0 + + +## 0.39.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index ae5eb84e19e..4efb2e45be4 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-uds" edition = "2021" rust-version = { workspace = true } description = "Unix domain sockets transport for libp2p" -version = "0.39.0" +version = "0.40.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -13,12 +13,12 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.6.2", optional = true } libp2p-core = { workspace = true } -log = "0.4.1" -futures = "0.3.28" -tokio = { version = "1.28", default-features = false, features = ["net"], optional = true } +futures = "0.3.30" +tokio = { version = "1.35", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [dev-dependencies] -tempfile = "3.5" +tempfile = "3.8" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -26,3 +26,6 @@ tempfile = "3.5" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 5f3f9ab7265..075cbadb80a 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -49,7 +49,6 @@ use libp2p_core::{ transport::{TransportError, TransportEvent}, Transport, }; -use log::debug; use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; @@ -93,17 +92,18 @@ macro_rules! codegen { fn listen_on( &mut self, + id: ListenerId, addr: Multiaddr, - ) -> Result> { + ) -> Result<(), TransportError> { if let Ok(path) = multiaddr_to_path(&addr) { - let id = ListenerId::new(); + #[allow(clippy::redundant_closure_call)] let listener = $build_listener(path) .map_err(Err) .map_ok(move |listener| { stream::once({ let addr = addr.clone(); async move { - debug!("Now listening on {}", addr); + tracing::debug!(address=%addr, "Now listening on address"); Ok(TransportEvent::NewAddress { listener_id: id, listen_addr: addr, @@ -117,7 +117,7 @@ macro_rules! codegen { async move { let event = match listener.accept().await { Ok((stream, _)) => { - debug!("incoming connection on {}", addr); + tracing::debug!(address=%addr, "incoming connection on address"); TransportEvent::Incoming { upgrade: future::ok(stream), local_addr: addr.clone(), @@ -138,7 +138,7 @@ macro_rules! codegen { .try_flatten_stream() .boxed(); self.listeners.push_back((id, listener)); - Ok(id) + Ok(()) } else { Err(TransportError::MultiaddrNotSupported(addr)) } @@ -162,7 +162,7 @@ macro_rules! codegen { fn dial(&mut self, addr: Multiaddr) -> Result> { // TODO: Should we dial at all? if let Ok(path) = multiaddr_to_path(&addr) { - debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); Ok(async move { <$unix_stream>::connect(&path).await }.boxed()) } else { Err(TransportError::MultiaddrNotSupported(addr)) @@ -260,6 +260,7 @@ mod tests { use futures::{channel::oneshot, prelude::*}; use libp2p_core::{ multiaddr::{Multiaddr, Protocol}, + transport::ListenerId, Transport, }; use std::{self, borrow::Cow, path::Path}; @@ -292,7 +293,7 @@ mod tests { async_std::task::spawn(async move { let mut transport = UdsConfig::new().boxed(); - transport.listen_on(addr).unwrap(); + transport.listen_on(ListenerId::next(), addr).unwrap(); let listen_addr = transport .select_next_some() @@ -328,7 +329,7 @@ mod tests { let mut uds = UdsConfig::new(); let addr = "/unix//foo/bar".parse::().unwrap(); - assert!(uds.listen_on(addr).is_err()); + assert!(uds.listen_on(ListenerId::next(), addr).is_err()); } #[test] diff --git a/transports/wasm-ext/CHANGELOG.md b/transports/wasm-ext/CHANGELOG.md deleted file mode 100644 index 63fa731975e..00000000000 --- a/transports/wasm-ext/CHANGELOG.md +++ /dev/null @@ -1,118 +0,0 @@ -## 0.40.0 - unreleased - -- Raise MSRV to 1.65. - See [PR 3715]. - -[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 - -## 0.39.0 - -- Update to `libp2p-core` `v0.39.0`. - -## 0.38.0 - -- Update to `libp2p-core` `v0.38.0`. - -- Update `rust-version` to reflect the actual MSRV: 1.60.0. See [PR 3090]. - -[PR 3090]: https://github.com/libp2p/rust-libp2p/pull/3090 - -## 0.37.0 - -- Update to `libp2p-core` `v0.37.0`. - -## 0.36.0 - -- Update to `libp2p-core` `v0.36.0`. - -## 0.35.0 - -- Update to `libp2p-core` `v0.35.0`. - -## 0.34.0 - -- Update to `libp2p-core` `v0.34.0`. -- Add `Transport::poll` and `Transport::remove_listener` and remove `Transport::Listener` - for `ExtTransport`. Drive the `Listen` streams within `ExtTransport`. See [PR 2652]. - -[PR 2652]: https://github.com/libp2p/rust-libp2p/pull/2652 - -## 0.33.0 - -- Update to `libp2p-core` `v0.33.0`. - -## 0.32.0 [2022-02-22] - -- Update to `libp2p-core` `v0.32.0`. - -## 0.31.0 [2022-01-27] - -- Update dependencies. - -- Migrate to Rust edition 2021 (see [PR 2339]). - -[PR 2339]: https://github.com/libp2p/rust-libp2p/pull/2339 - -## 0.30.0 [2021-11-01] - -- Make default features of `libp2p-core` optional. - [PR 2181](https://github.com/libp2p/rust-libp2p/pull/2181) - -- Update dependencies. - -## 0.29.0 [2021-07-12] - -- Update dependencies. - -## 0.28.2 [2021-04-27] - -- Support dialing `Multiaddr` with `/p2p` protocol [PR - 2058](https://github.com/libp2p/rust-libp2p/pull/2058). - -## 0.28.1 [2021-04-01] - -- Require at least js-sys v0.3.50 [PR - 2023](https://github.com/libp2p/rust-libp2p/pull/2023). - -## 0.28.0 [2021-03-17] - -- Update `libp2p-core`. - -## 0.27.0 [2021-01-12] - -- Update dependencies. - -## 0.26.0 [2020-12-17] - -- Update `libp2p-core`. - -## 0.25.0 [2020-11-25] - -- Update `libp2p-core`. - -## 0.24.0 [2020-11-09] - -- Fix the WebSocket implementation parsing `x-parity-ws` multiaddresses as `x-parity-wss`. -- Update dependencies. - -## 0.23.0 [2020-10-16] - -- Update `libp2p-core` dependency. - -## 0.22.0 [2020-09-09] - -- Update `libp2p-core` dependency. - -## 0.21.0 [2020-08-18] - -- Update `libp2p-core` dependency. - -## 0.20.1 [2020-07-06] - -- Improve the code quality of the `websockets.js` binding with the browser's `WebSocket` API. - -## 0.20.0 [2020-07-01] - -- Updated dependencies. -- Support `/dns` in the websocket implementation - ([PR 1626](https://github.com/libp2p/rust-libp2p/pull/1626)) diff --git a/transports/wasm-ext/src/lib.rs b/transports/wasm-ext/src/lib.rs deleted file mode 100644 index 164cbff45c3..00000000000 --- a/transports/wasm-ext/src/lib.rs +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implementation of the libp2p `Transport` trait for external transports. -//! -//! This `Transport` is used in the context of WASM to allow delegating the transport mechanism -//! to the code that uses rust-libp2p, as opposed to inside of rust-libp2p itself. -//! -//! > **Note**: This only allows transports that produce a raw stream with the remote. You -//! > couldn't, for example, pass an implementation QUIC. -//! -//! # Usage -//! -//! Call `new()` with a JavaScript object that implements the interface described in the `ffi` -//! module. -//! - -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use futures::{future::Ready, prelude::*, ready, stream::SelectAll}; -use libp2p_core::{ - connection::Endpoint, - transport::{ListenerId, TransportError, TransportEvent}, - Multiaddr, Transport, -}; -use parity_send_wrapper::SendWrapper; -use std::{collections::VecDeque, error, fmt, io, mem, pin::Pin, task::Context, task::Poll}; -use wasm_bindgen::{prelude::*, JsCast}; -use wasm_bindgen_futures::JsFuture; - -/// Contains the definition that one must match on the JavaScript side. -pub mod ffi { - use wasm_bindgen::prelude::*; - - #[wasm_bindgen] - extern "C" { - /// Type of the object that allows opening connections. - pub type Transport; - /// Type of the object that represents an open connection with a remote. - pub type Connection; - /// Type of the object that represents an event generated by listening. - pub type ListenEvent; - /// Type of the object that represents an event containing a new connection with a remote. - pub type ConnectionEvent; - - /// Start attempting to dial the given multiaddress. - /// - /// The returned `Promise` must yield a [`Connection`] on success. - /// - /// If the multiaddress is not supported, you should return an instance of `Error` whose - /// `name` property has been set to the string `"NotSupportedError"`. - #[wasm_bindgen(method, catch)] - pub fn dial( - this: &Transport, - multiaddr: &str, - _role_override: bool, - ) -> Result; - - /// Start listening on the given multiaddress. - /// - /// The returned `Iterator` must yield `Promise`s to [`ListenEvent`] events. - /// - /// If the multiaddress is not supported, you should return an instance of `Error` whose - /// `name` property has been set to the string `"NotSupportedError"`. - #[wasm_bindgen(method, catch)] - pub fn listen_on(this: &Transport, multiaddr: &str) -> Result; - - /// Returns an iterator of JavaScript `Promise`s that resolve to `ArrayBuffer` objects - /// (or resolve to null, see below). These `ArrayBuffer` objects contain the data that the - /// remote has sent to us. If the remote closes the connection, the iterator must produce - /// a `Promise` that resolves to `null`. - #[wasm_bindgen(method, getter)] - pub fn read(this: &Connection) -> js_sys::Iterator; - - /// Writes data to the connection. Returns a `Promise` that resolves when the connection is - /// ready for writing again. - /// - /// If the `Promise` produces an error, the writing side of the connection is considered - /// unrecoverable and the connection should be closed as soon as possible. - /// - /// Guaranteed to only be called after the previous write promise has resolved. - #[wasm_bindgen(method, catch)] - pub fn write(this: &Connection, data: &[u8]) -> Result; - - /// Shuts down the writing side of the connection. After this has been called, the `write` - /// method will no longer be called. - #[wasm_bindgen(method, catch)] - pub fn shutdown(this: &Connection) -> Result<(), JsValue>; - - /// Closes the connection. No other method will be called on this connection anymore. - #[wasm_bindgen(method)] - pub fn close(this: &Connection); - - /// List of addresses we have started listening on. Must be an array of strings of - /// multiaddrs. - #[wasm_bindgen(method, getter)] - pub fn new_addrs(this: &ListenEvent) -> Option>; - - /// List of addresses that have expired. Must be an array of strings of multiaddrs. - #[wasm_bindgen(method, getter)] - pub fn expired_addrs(this: &ListenEvent) -> Option>; - - /// List of [`ConnectionEvent`] object that has been received. - #[wasm_bindgen(method, getter)] - pub fn new_connections(this: &ListenEvent) -> Option>; - - /// Promise to the next event that the listener will generate. - #[wasm_bindgen(method, getter)] - pub fn next_event(this: &ListenEvent) -> JsValue; - - /// The [`Connection`] object for communication with the remote. - #[wasm_bindgen(method, getter)] - pub fn connection(this: &ConnectionEvent) -> Connection; - - /// The address we observe for the remote connection. - #[wasm_bindgen(method, getter)] - pub fn observed_addr(this: &ConnectionEvent) -> String; - - /// The address we are listening on, that received the remote connection. - #[wasm_bindgen(method, getter)] - pub fn local_addr(this: &ConnectionEvent) -> String; - } - - #[cfg(feature = "websocket")] - #[wasm_bindgen(module = "/src/websockets.js")] - extern "C" { - /// Returns a `Transport` implemented using websockets. - pub fn websocket_transport() -> Transport; - } -} - -/// Implementation of `Transport` whose implementation is handled by some FFI. -pub struct ExtTransport { - inner: SendWrapper, - listeners: SelectAll, -} - -impl ExtTransport { - /// Creates a new `ExtTransport` that uses the given external `Transport`. - pub fn new(transport: ffi::Transport) -> Self { - ExtTransport { - inner: SendWrapper::new(transport), - listeners: SelectAll::new(), - } - } - - fn do_dial( - &mut self, - addr: Multiaddr, - role_override: Endpoint, - ) -> Result<::Dial, TransportError<::Error>> { - let promise = self - .inner - .dial( - &addr.to_string(), - matches!(role_override, Endpoint::Listener), - ) - .map_err(|err| { - if is_not_supported_error(&err) { - TransportError::MultiaddrNotSupported(addr) - } else { - TransportError::Other(JsErr::from(err)) - } - })?; - - Ok(Dial { - inner: SendWrapper::new(promise.into()), - }) - } -} - -impl fmt::Debug for ExtTransport { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("ExtTransport").finish() - } -} - -impl Transport for ExtTransport { - type Output = Connection; - type Error = JsErr; - type ListenerUpgrade = Ready>; - type Dial = Dial; - - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let iter = self.inner.listen_on(&addr.to_string()).map_err(|err| { - if is_not_supported_error(&err) { - TransportError::MultiaddrNotSupported(addr) - } else { - TransportError::Other(JsErr::from(err)) - } - })?; - let listener_id = ListenerId::new(); - let listen = Listen { - listener_id, - iterator: SendWrapper::new(iter), - next_event: None, - pending_events: VecDeque::new(), - is_closed: false, - }; - self.listeners.push(listen); - Ok(listener_id) - } - - fn remove_listener(&mut self, id: ListenerId) -> bool { - match self.listeners.iter_mut().find(|l| l.listener_id == id) { - Some(listener) => { - listener.close(Ok(())); - true - } - None => false, - } - } - - fn dial(&mut self, addr: Multiaddr) -> Result> { - self.do_dial(addr, Endpoint::Dialer) - } - - fn dial_as_listener( - &mut self, - addr: Multiaddr, - ) -> Result> { - self.do_dial(addr, Endpoint::Listener) - } - - fn address_translation(&self, _server: &Multiaddr, _observed: &Multiaddr) -> Option { - None - } - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - match ready!(self.listeners.poll_next_unpin(cx)) { - Some(event) => Poll::Ready(event), - None => Poll::Pending, - } - } -} - -/// Future that dial a remote through an external transport. -#[must_use = "futures do nothing unless polled"] -pub struct Dial { - /// A promise that will resolve to a `ffi::Connection` on success. - inner: SendWrapper, -} - -impl fmt::Debug for Dial { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Dial").finish() - } -} - -impl Future for Dial { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Future::poll(Pin::new(&mut *self.inner), cx) { - Poll::Ready(Ok(connec)) => Poll::Ready(Ok(Connection::new(connec.into()))), - Poll::Pending => Poll::Pending, - Poll::Ready(Err(err)) => Poll::Ready(Err(JsErr::from(err))), - } - } -} - -/// Stream that listens for incoming connections through an external transport. -#[must_use = "futures do nothing unless polled"] -pub struct Listen { - listener_id: ListenerId, - /// Iterator of `ListenEvent`s. - iterator: SendWrapper, - /// Promise that will yield the next `ListenEvent`. - next_event: Option>, - /// List of events that we are waiting to propagate. - pending_events: VecDeque<::Item>, - /// If the iterator is done close the listener. - is_closed: bool, -} - -impl Listen { - /// Report the listener as closed and terminate its stream. - fn close(&mut self, reason: Result<(), JsErr>) { - self.pending_events - .push_back(TransportEvent::ListenerClosed { - listener_id: self.listener_id, - reason, - }); - self.is_closed = true; - } -} - -impl fmt::Debug for Listen { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Listen").field(&self.listener_id).finish() - } -} - -impl Stream for Listen { - type Item = TransportEvent<::ListenerUpgrade, JsErr>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(Some(ev)); - } - - if self.is_closed { - // Terminate the stream if the listener closed and all remaining events have been reported. - return Poll::Ready(None); - } - - // Try to fill `self.next_event` if necessary and possible. If we fail, then - // `Ready(None)` is returned below. - if self.next_event.is_none() { - if let Ok(ev) = self.iterator.next() { - if !ev.done() { - let promise: js_sys::Promise = ev.value().into(); - self.next_event = Some(SendWrapper::new(promise.into())); - } - } - } - - let event = if let Some(next_event) = self.next_event.as_mut() { - let e = match Future::poll(Pin::new(&mut **next_event), cx) { - Poll::Ready(Ok(ev)) => ffi::ListenEvent::from(ev), - Poll::Pending => return Poll::Pending, - Poll::Ready(Err(err)) => { - self.close(Err(err.into())); - continue; - } - }; - self.next_event = None; - e - } else { - self.close(Ok(())); - continue; - }; - - let listener_id = self.listener_id; - - if let Some(addrs) = event.new_addrs() { - for addr in addrs.iter() { - match js_value_to_addr(addr) { - Ok(addr) => self.pending_events.push_back(TransportEvent::NewAddress { - listener_id, - listen_addr: addr, - }), - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err, - }), - }; - } - } - - if let Some(upgrades) = event.new_connections() { - for upgrade in upgrades.iter().cloned() { - let upgrade: ffi::ConnectionEvent = upgrade.into(); - match upgrade.local_addr().parse().and_then(|local| { - let observed = upgrade.observed_addr().parse()?; - Ok((local, observed)) - }) { - Ok((local_addr, send_back_addr)) => { - self.pending_events.push_back(TransportEvent::Incoming { - listener_id, - local_addr, - send_back_addr, - upgrade: futures::future::ok(Connection::new(upgrade.connection())), - }) - } - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err.into(), - }), - } - } - } - - if let Some(addrs) = event.expired_addrs() { - for addr in addrs.iter() { - match js_value_to_addr(addr) { - Ok(addr) => self - .pending_events - .push_back(TransportEvent::AddressExpired { - listener_id, - listen_addr: addr, - }), - Err(err) => self - .pending_events - .push_back(TransportEvent::ListenerError { - listener_id, - error: err, - }), - } - } - } - } - } -} - -/// Active stream of data with a remote. -/// -/// It is guaranteed that each call to `io::Write::write` on this object maps to exactly one call -/// to `write` on the FFI. In other words, no internal buffering happens for writes, and data can't -/// be split. -pub struct Connection { - /// The FFI object. - inner: SendWrapper, - - /// The iterator that was returned by `read()`. - read_iterator: SendWrapper, - - /// Reading part of the connection. - read_state: ConnectionReadState, - - /// When we write data using the FFI, a promise is returned containing the moment when the - /// underlying transport is ready to accept data again. This promise is stored here. - /// If this is `Some`, we must wait until the contained promise is resolved to write again. - previous_write_promise: Option>, -} - -impl Connection { - /// Initializes a `Connection` object from the FFI connection. - fn new(inner: ffi::Connection) -> Self { - let read_iterator = inner.read(); - - Connection { - inner: SendWrapper::new(inner), - read_iterator: SendWrapper::new(read_iterator), - read_state: ConnectionReadState::PendingData(Vec::new()), - previous_write_promise: None, - } - } -} - -/// Reading side of the connection. -enum ConnectionReadState { - /// Some data have been read and are waiting to be transferred. Can be empty. - PendingData(Vec), - /// Waiting for a `Promise` containing the next data. - Waiting(SendWrapper), - /// An error occurred or an earlier read yielded EOF. - Finished, -} - -impl fmt::Debug for Connection { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple("Connection").finish() - } -} - -impl AsyncRead for Connection { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - loop { - match mem::replace(&mut self.read_state, ConnectionReadState::Finished) { - ConnectionReadState::Finished => { - break Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } - - ConnectionReadState::PendingData(ref data) if data.is_empty() => { - let iter_next = self.read_iterator.next().map_err(JsErr::from)?; - if iter_next.done() { - self.read_state = ConnectionReadState::Finished; - } else { - let promise: js_sys::Promise = iter_next.value().into(); - let promise = SendWrapper::new(promise.into()); - self.read_state = ConnectionReadState::Waiting(promise); - } - continue; - } - - ConnectionReadState::PendingData(mut data) => { - debug_assert!(!data.is_empty()); - if buf.len() <= data.len() { - buf.copy_from_slice(&data[..buf.len()]); - self.read_state = - ConnectionReadState::PendingData(data.split_off(buf.len())); - break Poll::Ready(Ok(buf.len())); - } else { - let len = data.len(); - buf[..len].copy_from_slice(&data); - self.read_state = ConnectionReadState::PendingData(Vec::new()); - break Poll::Ready(Ok(len)); - } - } - - ConnectionReadState::Waiting(mut promise) => { - let data = match Future::poll(Pin::new(&mut *promise), cx) { - Poll::Ready(Ok(ref data)) if data.is_null() => break Poll::Ready(Ok(0)), - Poll::Ready(Ok(data)) => data, - Poll::Ready(Err(err)) => { - break Poll::Ready(Err(io::Error::from(JsErr::from(err)))) - } - Poll::Pending => { - self.read_state = ConnectionReadState::Waiting(promise); - break Poll::Pending; - } - }; - - // Try to directly copy the data into `buf` if it is large enough, otherwise - // transition to `PendingData` and loop again. - let data = js_sys::Uint8Array::new(&data); - let data_len = data.length() as usize; - if data_len <= buf.len() { - data.copy_to(&mut buf[..data_len]); - self.read_state = ConnectionReadState::PendingData(Vec::new()); - break Poll::Ready(Ok(data_len)); - } else { - let mut tmp_buf = vec![0; data_len]; - data.copy_to(&mut tmp_buf[..]); - self.read_state = ConnectionReadState::PendingData(tmp_buf); - continue; - } - } - } - } - } -} - -impl AsyncWrite for Connection { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - // Note: as explained in the doc-comments of `Connection`, each call to this function must - // map to exactly one call to `self.inner.write()`. - - if let Some(mut promise) = self.previous_write_promise.take() { - match Future::poll(Pin::new(&mut *promise), cx) { - Poll::Ready(Ok(_)) => (), - Poll::Ready(Err(err)) => { - return Poll::Ready(Err(io::Error::from(JsErr::from(err)))) - } - Poll::Pending => { - self.previous_write_promise = Some(promise); - return Poll::Pending; - } - } - } - - debug_assert!(self.previous_write_promise.is_none()); - self.previous_write_promise = Some(SendWrapper::new( - self.inner.write(buf).map_err(JsErr::from)?.into(), - )); - Poll::Ready(Ok(buf.len())) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // There's no flushing mechanism. In the FFI we consider that writing implicitly flushes. - Poll::Ready(Ok(())) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // Shutting down is considered instantaneous. - match self.inner.shutdown() { - Ok(()) => Poll::Ready(Ok(())), - Err(err) => Poll::Ready(Err(io::Error::from(JsErr::from(err)))), - } - } -} - -impl Drop for Connection { - fn drop(&mut self) { - self.inner.close(); - } -} - -/// Returns true if `err` is an error about an address not being supported. -fn is_not_supported_error(err: &JsValue) -> bool { - if let Some(err) = err.dyn_ref::() { - err.name() == "NotSupportedError" - } else { - false - } -} - -/// Turns a `JsValue` containing a `String` into a `Multiaddr`, if possible. -fn js_value_to_addr(addr: &JsValue) -> Result { - if let Some(addr) = addr.as_string() { - Ok(addr.parse()?) - } else { - Err(JsValue::from_str("Element in new_addrs is not a string").into()) - } -} - -/// Error that can be generated by the `ExtTransport`. -pub struct JsErr(SendWrapper); - -impl From for JsErr { - fn from(val: JsValue) -> JsErr { - JsErr(SendWrapper::new(val)) - } -} - -impl From for JsErr { - fn from(err: libp2p_core::multiaddr::Error) -> JsErr { - JsValue::from_str(&err.to_string()).into() - } -} - -impl From for io::Error { - fn from(err: JsErr) -> io::Error { - io::Error::new(io::ErrorKind::Other, err.to_string()) - } -} - -impl fmt::Debug for JsErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{self}") - } -} - -impl fmt::Display for JsErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(s) = self.0.as_string() { - write!(f, "{s}") - } else if let Some(err) = self.0.dyn_ref::() { - write!(f, "{}", String::from(err.message())) - } else if let Some(obj) = self.0.dyn_ref::() { - write!(f, "{}", String::from(obj.to_string())) - } else { - write!(f, "{:?}", &*self.0) - } - } -} - -impl error::Error for JsErr {} diff --git a/transports/wasm-ext/src/websockets.js b/transports/wasm-ext/src/websockets.js deleted file mode 100644 index 1ef2faf6ded..00000000000 --- a/transports/wasm-ext/src/websockets.js +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -export const websocket_transport = () => { - return { - dial: dial, - listen_on: (addr) => { - let err = new Error("Listening on WebSockets is not possible from within a browser"); - err.name = "NotSupportedError"; - throw err; - }, - }; -} - -/// Turns a string multiaddress into a WebSockets string URL. -const multiaddr_to_ws = (addr) => { - let parsed = addr.match(/^\/(ip4|ip6|dns4|dns6|dns)\/(.*?)\/tcp\/(.*?)\/(ws|wss|x-parity-ws\/(.*)|x-parity-wss\/(.*))(|\/p2p\/[a-zA-Z0-9]+)$/); - if (parsed != null) { - let proto = 'wss'; - if (parsed[4] == 'ws' || parsed[4].startsWith('x-parity-ws/')) { - proto = 'ws'; - } - let url = decodeURIComponent(parsed[5] || parsed[6] || ''); - if (parsed[1] == 'ip6') { - return proto + "://[" + parsed[2] + "]:" + parsed[3] + url; - } else { - return proto + "://" + parsed[2] + ":" + parsed[3] + url; - } - } - - let err = new Error("Address not supported: " + addr); - err.name = "NotSupportedError"; - throw err; -} - -// Attempt to dial a multiaddress. -const dial = (addr) => { - let ws = new WebSocket(multiaddr_to_ws(addr)); - ws.binaryType = "arraybuffer"; - let reader = read_queue(); - - return new Promise((open_resolve, open_reject) => { - ws.onerror = (ev) => { - // If `open_resolve` has been called earlier, calling `open_reject` seems to be - // silently ignored. It is easier to unconditionally call `open_reject` rather than - // check in which state the connection is, which would be error-prone. - open_reject(ev); - // Injecting an EOF is how we report to the reading side that the connection has been - // closed. Injecting multiple EOFs is harmless. - reader.inject_eof(); - }; - ws.onclose = (ev) => { - // Same remarks as above. - open_reject(ev); - reader.inject_eof(); - }; - - // We inject all incoming messages into the queue unconditionally. The caller isn't - // supposed to access this queue unless the connection is open. - ws.onmessage = (ev) => reader.inject_array_buffer(ev.data); - - ws.onopen = () => open_resolve({ - read: (function*() { while(ws.readyState == 1) { yield reader.next(); } })(), - write: (data) => { - if (ws.readyState == 1) { - // The passed in `data` is an `ArrayBufferView` [0]. If the - // underlying typed array is a `SharedArrayBuffer` (when - // using WASM threads, so multiple web workers sharing - // memory) the WebSocket's `send` method errors [1][2][3]. - // This limitation will probably be lifted in the future, - // but for now we have to make a copy here .. - // - // [0]: https://developer.mozilla.org/en-US/docs/Web/API/ArrayBufferView - // [1]: https://chromium.googlesource.com/chromium/src/+/1438f63f369fed3766fa5031e7a252c986c69be6%5E%21/ - // [2]: https://bugreports.qt.io/browse/QTBUG-78078 - // [3]: https://chromium.googlesource.com/chromium/src/+/HEAD/third_party/blink/renderer/bindings/IDLExtendedAttributes.md#AllowShared_p - ws.send(data.slice(0)); - return promise_when_send_finished(ws); - } else { - return Promise.reject("WebSocket is closed"); - } - }, - shutdown: () => ws.close(), - close: () => {} - }); - }); -} - -// Takes a WebSocket object and returns a Promise that resolves when bufferedAmount is low enough -// to allow more data to be sent. -const promise_when_send_finished = (ws) => { - return new Promise((resolve, reject) => { - function check() { - if (ws.readyState != 1) { - reject("WebSocket is closed"); - return; - } - - // We put an arbitrary threshold of 8 kiB of buffered data. - if (ws.bufferedAmount < 8 * 1024) { - resolve(); - } else { - setTimeout(check, 100); - } - } - - check(); - }) -} - -// Creates a queue reading system. -const read_queue = () => { - // State of the queue. - let state = { - // Array of promises resolving to `ArrayBuffer`s, that haven't been transmitted back with - // `next` yet. - queue: new Array(), - // If `resolve` isn't null, it is a "resolve" function of a promise that has already been - // returned by `next`. It should be called with some data. - resolve: null, - }; - - return { - // Inserts a new Blob in the queue. - inject_array_buffer: (buffer) => { - if (state.resolve != null) { - state.resolve(buffer); - state.resolve = null; - } else { - state.queue.push(Promise.resolve(buffer)); - } - }, - - // Inserts an EOF message in the queue. - inject_eof: () => { - if (state.resolve != null) { - state.resolve(null); - state.resolve = null; - } else { - state.queue.push(Promise.resolve(null)); - } - }, - - // Returns a Promise that yields the next entry as an ArrayBuffer. - next: () => { - if (state.queue.length != 0) { - return state.queue.shift(0); - } else { - if (state.resolve !== null) - throw "Internal error: already have a pending promise"; - return new Promise((resolve, reject) => { - state.resolve = resolve; - }); - } - } - }; -}; diff --git a/transports/webrtc-websys/CHANGELOG.md b/transports/webrtc-websys/CHANGELOG.md new file mode 100644 index 00000000000..634120c53c3 --- /dev/null +++ b/transports/webrtc-websys/CHANGELOG.md @@ -0,0 +1,18 @@ +## 0.3.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). +- Remove `libp2p_noise` from the public API. + See [PR 4969](https://github.com/libp2p/rust-libp2p/pull/4969). + +## 0.2.0-alpha + +- Rename `Error::JsError` to `Error::Js`. + See [PR 4653](https://github.com/libp2p/rust-libp2p/pull/4653) + +## 0.1.0-alpha + +- Initial alpha release. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml new file mode 100644 index 00000000000..34da24a3f5f --- /dev/null +++ b/transports/webrtc-websys/Cargo.toml @@ -0,0 +1,31 @@ +[package] +authors = ["Doug Anderson "] +categories = ["asynchronous", "network-programming", "wasm", "web-programming"] +description = "WebRTC for libp2p under WASM environment" +edition = "2021" +keywords = ["libp2p", "networking", "peer-to-peer"] +license = "MIT" +name = "libp2p-webrtc-websys" +repository = "https://github.com/libp2p/rust-libp2p" +rust-version = { workspace = true } +version = "0.3.0-alpha" +publish = true + +[dependencies] +bytes = "1" +futures = "0.3" +getrandom = { version = "0.2.11", features = ["js"] } +hex = "0.4.3" +js-sys = { version = "0.3" } +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-webrtc-utils = { workspace = true } +send_wrapper = { version = "0.6.0", features = ["futures"] } +thiserror = "1" +tracing = "0.1.37" +wasm-bindgen = { version = "0.2.89" } +wasm-bindgen-futures = { version = "0.4.39" } +web-sys = { version = "0.3.66", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } + +[lints] +workspace = true diff --git a/transports/webrtc-websys/README.md b/transports/webrtc-websys/README.md new file mode 100644 index 00000000000..b522f31ba65 --- /dev/null +++ b/transports/webrtc-websys/README.md @@ -0,0 +1,9 @@ +# Rust `libp2p-webrtc-websys` + +Browser Transport made available through `web-sys` bindings. + +## Usage + +Use with `Swarm::with_wasm_executor` to enable the `wasm-bindgen` executor for the `Swarm`. + +See the [browser-webrtc](../../examples/browser-webrtc) example for a full example. diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs new file mode 100644 index 00000000000..b858237da63 --- /dev/null +++ b/transports/webrtc-websys/src/connection.rs @@ -0,0 +1,307 @@ +//! A libp2p connection backed by an [RtcPeerConnection](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection). + +use super::{Error, Stream}; +use crate::stream::DropListener; +use futures::channel::mpsc; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use js_sys::{Object, Reflect}; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +use libp2p_webrtc_utils::Fingerprint; +use send_wrapper::SendWrapper; +use std::pin::Pin; +use std::task::Waker; +use std::task::{ready, Context, Poll}; +use wasm_bindgen::prelude::*; +use wasm_bindgen_futures::JsFuture; +use web_sys::{ + RtcConfiguration, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelInit, RtcDataChannelType, + RtcSessionDescriptionInit, +}; + +/// A WebRTC Connection. +/// +/// All connections need to be [`Send`] which is why some fields are wrapped in [`SendWrapper`]. +/// This is safe because WASM is single-threaded. +pub struct Connection { + /// The [RtcPeerConnection] that is used for the WebRTC Connection + inner: SendWrapper, + + /// Whether the connection is closed + closed: bool, + /// An [`mpsc::channel`] for all inbound data channels. + /// + /// Because the browser's WebRTC API is event-based, we need to use a channel to obtain all inbound data channels. + inbound_data_channels: SendWrapper>, + /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + drop_listeners: FuturesUnordered, + no_drop_listeners_waker: Option, + + _ondatachannel_closure: SendWrapper>, +} + +impl Connection { + /// Create a new inner WebRTC Connection + pub(crate) fn new(peer_connection: RtcPeerConnection) -> Self { + // An ondatachannel Future enables us to poll for incoming data channel events in poll_incoming + let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection + + let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { + tracing::trace!("New data channel"); + + if let Err(e) = tx_ondatachannel.try_send(ev.channel()) { + if e.is_full() { + tracing::warn!("Remote is opening too many data channels, we can't keep up!"); + return; + } + + if e.is_disconnected() { + tracing::warn!("Receiver is gone, are we shutting down?"); + } + } + }); + peer_connection + .inner + .set_ondatachannel(Some(ondatachannel_closure.as_ref().unchecked_ref())); + + Self { + inner: SendWrapper::new(peer_connection), + closed: false, + drop_listeners: FuturesUnordered::default(), + no_drop_listeners_waker: None, + inbound_data_channels: SendWrapper::new(rx_ondatachannel), + _ondatachannel_closure: SendWrapper::new(ondatachannel_closure), + } + } + + fn new_stream_from_data_channel(&mut self, data_channel: RtcDataChannel) -> Stream { + let (stream, drop_listener) = Stream::new(data_channel); + + self.drop_listeners.push(drop_listener); + if let Some(waker) = self.no_drop_listeners_waker.take() { + waker.wake() + } + stream + } + + /// Closes the Peer Connection. + /// + /// This closes the data channels also and they will return an error + /// if they are used. + fn close_connection(&mut self) { + if !self.closed { + tracing::trace!("connection::close_connection"); + self.inner.inner.close(); + self.closed = true; + } + } +} + +impl Drop for Connection { + fn drop(&mut self) { + self.close_connection(); + } +} + +/// WebRTC native multiplexing +/// Allows users to open substreams +impl StreamMuxer for Connection { + type Substream = Stream; + type Error = Error; + + fn poll_inbound( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match ready!(self.inbound_data_channels.poll_next_unpin(cx)) { + Some(data_channel) => { + let stream = self.new_stream_from_data_channel(data_channel); + + Poll::Ready(Ok(stream)) + } + None => { + // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. + tracing::debug!("`Sender` for inbound data channels has been dropped"); + Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) + } + } + } + + fn poll_outbound( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + tracing::trace!("Creating outbound data channel"); + + let data_channel = self.inner.new_regular_data_channel(); + let stream = self.new_stream_from_data_channel(data_channel); + + Poll::Ready(Ok(stream)) + } + + /// Closes the Peer Connection. + fn poll_close( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + tracing::trace!("connection::poll_close"); + + self.close_connection(); + Poll::Ready(Ok(())) + } + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + loop { + match ready!(self.drop_listeners.poll_next_unpin(cx)) { + Some(Ok(())) => {} + Some(Err(e)) => { + tracing::debug!("a DropListener failed: {e}") + } + None => { + self.no_drop_listeners_waker = Some(cx.waker().clone()); + return Poll::Pending; + } + } + } + } +} + +pub(crate) struct RtcPeerConnection { + inner: web_sys::RtcPeerConnection, +} + +impl RtcPeerConnection { + pub(crate) async fn new(algorithm: String) -> Result { + let algo: Object = Object::new(); + Reflect::set(&algo, &"name".into(), &"ECDSA".into()).unwrap(); + Reflect::set(&algo, &"namedCurve".into(), &"P-256".into()).unwrap(); + Reflect::set(&algo, &"hash".into(), &algorithm.into()).unwrap(); + + let certificate_promise = + web_sys::RtcPeerConnection::generate_certificate_with_object(&algo) + .expect("certificate to be valid"); + + let certificate = JsFuture::from(certificate_promise).await?; + + let mut config = RtcConfiguration::default(); + // wrap certificate in a js Array first before adding it to the config object + let certificate_arr = js_sys::Array::new(); + certificate_arr.push(&certificate); + config.certificates(&certificate_arr); + + let inner = web_sys::RtcPeerConnection::new_with_configuration(&config)?; + + Ok(Self { inner }) + } + + /// Creates the stream for the initial noise handshake. + /// + /// The underlying data channel MUST have `negotiated` set to `true` and carry the ID 0. + pub(crate) fn new_handshake_stream(&self) -> (Stream, DropListener) { + Stream::new(self.new_data_channel(true)) + } + + /// Creates a regular data channel for when the connection is already established. + pub(crate) fn new_regular_data_channel(&self) -> RtcDataChannel { + self.new_data_channel(false) + } + + fn new_data_channel(&self, negotiated: bool) -> RtcDataChannel { + const LABEL: &str = ""; + + let dc = match negotiated { + true => { + let mut options = RtcDataChannelInit::new(); + options.negotiated(true).id(0); // id is only ever set to zero when negotiated is true + + self.inner + .create_data_channel_with_data_channel_dict(LABEL, &options) + } + false => self.inner.create_data_channel(LABEL), + }; + dc.set_binary_type(RtcDataChannelType::Arraybuffer); // Hardcoded here, it's the only type we use + + dc + } + + pub(crate) async fn create_offer(&self) -> Result { + let offer = JsFuture::from(self.inner.create_offer()).await?; + + let offer = Reflect::get(&offer, &JsValue::from_str("sdp")) + .expect("sdp should be valid") + .as_string() + .expect("sdp string should be valid string"); + + Ok(offer) + } + + pub(crate) async fn set_local_description( + &self, + sdp: RtcSessionDescriptionInit, + ) -> Result<(), Error> { + let promise = self.inner.set_local_description(&sdp); + JsFuture::from(promise).await?; + + Ok(()) + } + + pub(crate) fn local_fingerprint(&self) -> Result { + let sdp = &self + .inner + .local_description() + .ok_or_else(|| Error::Js("No local description".to_string()))? + .sdp(); + + let fingerprint = + parse_fingerprint(sdp).ok_or_else(|| Error::Js("No fingerprint in SDP".to_string()))?; + + Ok(fingerprint) + } + + pub(crate) async fn set_remote_description( + &self, + sdp: RtcSessionDescriptionInit, + ) -> Result<(), Error> { + let promise = self.inner.set_remote_description(&sdp); + JsFuture::from(promise).await?; + + Ok(()) + } +} + +/// Parse Fingerprint from a SDP. +fn parse_fingerprint(sdp: &str) -> Option { + // split the sdp by new lines / carriage returns + let lines = sdp.split("\r\n"); + + // iterate through the lines to find the one starting with a=fingerprint: + // get the value after the first space + // return the value as a Fingerprint + for line in lines { + if line.starts_with("a=fingerprint:") { + let fingerprint = line.split(' ').nth(1).unwrap(); + let bytes = hex::decode(fingerprint.replace(':', "")).unwrap(); + let arr: [u8; 32] = bytes.as_slice().try_into().unwrap(); + return Some(Fingerprint::raw(arr)); + } + } + None +} + +#[cfg(test)] +mod sdp_tests { + use super::*; + + #[test] + fn test_fingerprint() { + let sdp = "v=0\r\no=- 0 0 IN IP6 ::1\r\ns=-\r\nc=IN IP6 ::1\r\nt=0 0\r\na=ice-lite\r\nm=application 61885 UDP/DTLS/SCTP webrtc-datachannel\r\na=mid:0\r\na=setup:passive\r\na=ice-ufrag:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=ice-pwd:libp2p+webrtc+v1/YwapWySn6fE6L9i47PhlB6X4gzNXcgFs\r\na=fingerprint:sha-256 A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89\r\na=sctp-port:5000\r\na=max-message-size:16384\r\na=candidate:1467250027 1 UDP 1467250027 ::1 61885 typ host\r\n"; + + let fingerprint = parse_fingerprint(sdp).unwrap(); + + assert_eq!(fingerprint.algorithm(), "sha-256"); + assert_eq!(fingerprint.to_sdp_format(), "A8:17:77:1E:02:7E:D1:2B:53:92:70:A6:8E:F9:02:CC:21:72:3A:92:5D:F4:97:5F:27:C4:5E:75:D4:F4:31:89"); + } +} diff --git a/transports/webrtc-websys/src/error.rs b/transports/webrtc-websys/src/error.rs new file mode 100644 index 00000000000..a2df1a182ea --- /dev/null +++ b/transports/webrtc-websys/src/error.rs @@ -0,0 +1,62 @@ +use wasm_bindgen::{JsCast, JsValue}; + +/// Errors that may happen on the [`Transport`](crate::Transport) or the +/// [`Connection`](crate::Connection). +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid multiaddr: {0}")] + InvalidMultiaddr(&'static str), + + #[error("JavaScript error: {0}")] + Js(String), + + #[error("JavaScript typecasting failed")] + JsCastFailed, + + #[error("Unknown remote peer ID")] + UnknownRemotePeerId, + + #[error("Connection error: {0}")] + Connection(String), + + #[error("Authentication error")] + Authentication(#[from] AuthenticationError), +} + +/// New-type wrapper to hide `libp2p_noise` from the public API. +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +pub struct AuthenticationError(pub(crate) libp2p_webrtc_utils::noise::Error); + +impl Error { + pub(crate) fn from_js_value(value: JsValue) -> Self { + let s = if value.is_instance_of::() { + js_sys::Error::from(value) + .to_string() + .as_string() + .unwrap_or_else(|| "Unknown error".to_string()) + } else { + "Unknown error".to_string() + }; + + Error::Js(s) + } +} + +impl From for Error { + fn from(value: JsValue) -> Self { + Error::from_js_value(value) + } +} + +impl From for Error { + fn from(value: String) -> Self { + Error::Js(value) + } +} + +impl From for Error { + fn from(value: std::io::Error) -> Self { + Error::Js(value.to_string()) + } +} diff --git a/transports/webrtc-websys/src/lib.rs b/transports/webrtc-websys/src/lib.rs new file mode 100644 index 00000000000..04fced4111b --- /dev/null +++ b/transports/webrtc-websys/src/lib.rs @@ -0,0 +1,13 @@ +#![doc = include_str!("../README.md")] + +mod connection; +mod error; +mod sdp; +mod stream; +mod transport; +mod upgrade; + +pub use self::connection::Connection; +pub use self::error::Error; +pub use self::stream::Stream; +pub use self::transport::{Config, Transport}; diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs new file mode 100644 index 00000000000..439182ea4db --- /dev/null +++ b/transports/webrtc-websys/src/sdp.rs @@ -0,0 +1,55 @@ +use libp2p_webrtc_utils::Fingerprint; +use std::net::SocketAddr; +use web_sys::{RtcSdpType, RtcSessionDescriptionInit}; + +/// Creates the SDP answer used by the client. +pub(crate) fn answer( + addr: SocketAddr, + server_fingerprint: Fingerprint, + client_ufrag: &str, +) -> RtcSessionDescriptionInit { + let mut answer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Answer); + answer_obj.sdp(&libp2p_webrtc_utils::sdp::answer( + addr, + server_fingerprint, + client_ufrag, + )); + answer_obj +} + +/// Creates the munged SDP offer from the Browser's given SDP offer +/// +/// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. +pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionInit { + // find line and replace a=ice-ufrag: with "\r\na=ice-ufrag:{client_ufrag}\r\n" + // find line and replace a=ice-pwd: with "\r\na=ice-ufrag:{client_ufrag}\r\n" + + let mut munged_sdp_offer = String::new(); + + for line in offer.split("\r\n") { + if line.starts_with("a=ice-ufrag:") { + munged_sdp_offer.push_str(&format!("a=ice-ufrag:{client_ufrag}\r\n")); + continue; + } + + if line.starts_with("a=ice-pwd:") { + munged_sdp_offer.push_str(&format!("a=ice-pwd:{client_ufrag}\r\n")); + continue; + } + + if !line.is_empty() { + munged_sdp_offer.push_str(&format!("{}\r\n", line)); + continue; + } + } + + // remove any double \r\n + let munged_sdp_offer = munged_sdp_offer.replace("\r\n\r\n", "\r\n"); + + tracing::trace!(offer=%munged_sdp_offer, "Created SDP offer"); + + let mut offer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Offer); + offer_obj.sdp(&munged_sdp_offer); + + offer_obj +} diff --git a/transports/webrtc-websys/src/stream.rs b/transports/webrtc-websys/src/stream.rs new file mode 100644 index 00000000000..812aa5afbbf --- /dev/null +++ b/transports/webrtc-websys/src/stream.rs @@ -0,0 +1,61 @@ +//! The WebRTC [Stream] over the Connection +use self::poll_data_channel::PollDataChannel; +use futures::{AsyncRead, AsyncWrite}; +use send_wrapper::SendWrapper; +use std::pin::Pin; +use std::task::{Context, Poll}; +use web_sys::RtcDataChannel; + +mod poll_data_channel; + +/// A stream over a WebRTC connection. +/// +/// Backed by a WebRTC data channel. +pub struct Stream { + /// Wrapper for the inner stream to make it Send + inner: SendWrapper>, +} + +pub(crate) type DropListener = SendWrapper>; + +impl Stream { + pub(crate) fn new(data_channel: RtcDataChannel) -> (Self, DropListener) { + let (inner, drop_listener) = + libp2p_webrtc_utils::Stream::new(PollDataChannel::new(data_channel)); + + ( + Self { + inner: SendWrapper::new(inner), + }, + SendWrapper::new(drop_listener), + ) + } +} + +impl AsyncRead for Stream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut *self.get_mut().inner).poll_close(cx) + } +} diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs new file mode 100644 index 00000000000..0ee4f7920c9 --- /dev/null +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -0,0 +1,242 @@ +use std::cmp::min; +use std::io; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; +use std::task::{Context, Poll}; + +use bytes::BytesMut; +use futures::task::AtomicWaker; +use futures::{AsyncRead, AsyncWrite}; +use libp2p_webrtc_utils::MAX_MSG_LEN; +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::{Event, MessageEvent, RtcDataChannel, RtcDataChannelEvent, RtcDataChannelState}; + +/// [`PollDataChannel`] is a wrapper around around [`RtcDataChannel`] which implements [`AsyncRead`] and [`AsyncWrite`]. +#[derive(Debug, Clone)] +pub(crate) struct PollDataChannel { + /// The [`RtcDataChannel`] being wrapped. + inner: RtcDataChannel, + + new_data_waker: Rc, + read_buffer: Rc>, + + /// Waker for when we are waiting for the DC to be opened. + open_waker: Rc, + + /// Waker for when we are waiting to write (again) to the DC because we previously exceeded the [`MAX_MSG_LEN`] threshold. + write_waker: Rc, + + /// Waker for when we are waiting for the DC to be closed. + close_waker: Rc, + + /// Whether we've been overloaded with data by the remote. + /// + /// This is set to `true` in case `read_buffer` overflows, i.e. the remote is sending us messages faster than we can read them. + /// In that case, we return an [`std::io::Error`] from [`AsyncRead`] or [`AsyncWrite`], depending which one gets called earlier. + /// Failing these will (very likely), cause the application developer to drop the stream which resets it. + overloaded: Rc, + + // Store the closures for proper garbage collection. + // These are wrapped in an [`Rc`] so we can implement [`Clone`]. + _on_open_closure: Rc>, + _on_write_closure: Rc>, + _on_close_closure: Rc>, + _on_message_closure: Rc>, +} + +impl PollDataChannel { + pub(crate) fn new(inner: RtcDataChannel) -> Self { + let open_waker = Rc::new(AtomicWaker::new()); + let on_open_closure = Closure::new({ + let open_waker = open_waker.clone(); + + move |_: RtcDataChannelEvent| { + tracing::trace!("DataChannel opened"); + open_waker.wake(); + } + }); + inner.set_onopen(Some(on_open_closure.as_ref().unchecked_ref())); + + let write_waker = Rc::new(AtomicWaker::new()); + inner.set_buffered_amount_low_threshold(0); + let on_write_closure = Closure::new({ + let write_waker = write_waker.clone(); + + move |_: Event| { + tracing::trace!("DataChannel available for writing (again)"); + write_waker.wake(); + } + }); + inner.set_onbufferedamountlow(Some(on_write_closure.as_ref().unchecked_ref())); + + let close_waker = Rc::new(AtomicWaker::new()); + let on_close_closure = Closure::new({ + let close_waker = close_waker.clone(); + + move |_: Event| { + tracing::trace!("DataChannel closed"); + close_waker.wake(); + } + }); + inner.set_onclose(Some(on_close_closure.as_ref().unchecked_ref())); + + let new_data_waker = Rc::new(AtomicWaker::new()); + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); // We purposely don't use `with_capacity` so we don't eagerly allocate `MAX_READ_BUFFER` per stream. + let overloaded = Rc::new(AtomicBool::new(false)); + + let on_message_closure = Closure::::new({ + let new_data_waker = new_data_waker.clone(); + let read_buffer = read_buffer.clone(); + let overloaded = overloaded.clone(); + + move |ev: MessageEvent| { + let data = js_sys::Uint8Array::new(&ev.data()); + + let mut read_buffer = read_buffer.lock().unwrap(); + + if read_buffer.len() + data.length() as usize > MAX_MSG_LEN { + overloaded.store(true, Ordering::SeqCst); + tracing::warn!("Remote is overloading us with messages, resetting stream",); + return; + } + + read_buffer.extend_from_slice(&data.to_vec()); + new_data_waker.wake(); + } + }); + inner.set_onmessage(Some(on_message_closure.as_ref().unchecked_ref())); + + Self { + inner, + new_data_waker, + read_buffer, + open_waker, + write_waker, + close_waker, + overloaded, + _on_open_closure: Rc::new(on_open_closure), + _on_write_closure: Rc::new(on_write_closure), + _on_close_closure: Rc::new(on_close_closure), + _on_message_closure: Rc::new(on_message_closure), + } + } + + /// Returns the [RtcDataChannelState] of the [RtcDataChannel] + fn ready_state(&self) -> RtcDataChannelState { + self.inner.ready_state() + } + + /// Returns the current [RtcDataChannel] BufferedAmount + fn buffered_amount(&self) -> usize { + self.inner.buffered_amount() as usize + } + + /// Whether the data channel is ready for reading or writing. + fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + match self.ready_state() { + RtcDataChannelState::Connecting => { + self.open_waker.register(cx.waker()); + return Poll::Pending; + } + RtcDataChannelState::Closing | RtcDataChannelState::Closed => { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + RtcDataChannelState::Open | RtcDataChannelState::__Nonexhaustive => {} + } + + if self.overloaded.load(Ordering::SeqCst) { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::BrokenPipe, + "remote overloaded us with messages", + ))); + } + + Poll::Ready(Ok(())) + } +} + +impl AsyncRead for PollDataChannel { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.get_mut(); + + futures::ready!(this.poll_ready(cx))?; + + let mut read_buffer = this.read_buffer.lock().unwrap(); + + if read_buffer.is_empty() { + this.new_data_waker.register(cx.waker()); + return Poll::Pending; + } + + // Ensure that we: + // - at most return what the caller can read (`buf.len()`) + // - at most what we have (`read_buffer.len()`) + let split_index = min(buf.len(), read_buffer.len()); + + let bytes_to_return = read_buffer.split_to(split_index); + let len = bytes_to_return.len(); + buf[..len].copy_from_slice(&bytes_to_return); + + Poll::Ready(Ok(len)) + } +} + +impl AsyncWrite for PollDataChannel { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.get_mut(); + + futures::ready!(this.poll_ready(cx))?; + + debug_assert!(this.buffered_amount() <= MAX_MSG_LEN); + let remaining_space = MAX_MSG_LEN - this.buffered_amount(); + + if remaining_space == 0 { + this.write_waker.register(cx.waker()); + return Poll::Pending; + } + + let bytes_to_send = min(buf.len(), remaining_space); + + if this + .inner + .send_with_u8_array(&buf[..bytes_to_send]) + .is_err() + { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); + } + + Poll::Ready(Ok(bytes_to_send)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.buffered_amount() == 0 { + return Poll::Ready(Ok(())); + } + + self.write_waker.register(cx.waker()); + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.ready_state() == RtcDataChannelState::Closed { + return Poll::Ready(Ok(())); + } + + if self.ready_state() != RtcDataChannelState::Closing { + self.inner.close(); + } + + self.close_waker.register(cx.waker()); + Poll::Pending + } +} diff --git a/transports/webrtc-websys/src/transport.rs b/transports/webrtc-websys/src/transport.rs new file mode 100644 index 00000000000..ecf137eab8a --- /dev/null +++ b/transports/webrtc-websys/src/transport.rs @@ -0,0 +1,140 @@ +use super::upgrade; +use super::Connection; +use super::Error; +use futures::future::FutureExt; +use libp2p_core::multiaddr::Multiaddr; +use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_identity::{Keypair, PeerId}; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +/// Config for the [`Transport`]. +#[derive(Clone)] +pub struct Config { + keypair: Keypair, +} + +/// A WebTransport [`Transport`](libp2p_core::Transport) that works with `web-sys`. +pub struct Transport { + config: Config, +} + +impl Config { + /// Constructs a new configuration for the [`Transport`]. + pub fn new(keypair: &Keypair) -> Self { + Config { + keypair: keypair.to_owned(), + } + } +} + +impl Transport { + /// Constructs a new `Transport` with the given [`Config`]. + pub fn new(config: Config) -> Transport { + Transport { config } + } + + /// Wraps `Transport` in [`Boxed`] and makes it ready to be consumed by + /// SwarmBuilder. + pub fn boxed(self) -> Boxed<(PeerId, StreamMuxerBox)> { + self.map(|(peer_id, muxer), _| (peer_id, StreamMuxerBox::new(muxer))) + .boxed() + } +} + +impl libp2p_core::Transport for Transport { + type Output = (PeerId, Connection); + type Error = Error; + type ListenerUpgrade = Pin> + Send>>; + type Dial = Pin> + Send>>; + + fn listen_on( + &mut self, + _id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn remove_listener(&mut self, _id: ListenerId) -> bool { + false + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + if maybe_local_firefox() { + return Err(TransportError::Other( + "Firefox does not support WebRTC over localhost or 127.0.0.1" + .to_string() + .into(), + )); + } + + let (sock_addr, server_fingerprint) = libp2p_webrtc_utils::parse_webrtc_dial_addr(&addr) + .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; + + if sock_addr.port() == 0 || sock_addr.ip().is_unspecified() { + return Err(TransportError::MultiaddrNotSupported(addr)); + } + + let config = self.config.clone(); + + Ok(async move { + let (peer_id, connection) = + upgrade::outbound(sock_addr, server_fingerprint, config.keypair.clone()).await?; + + Ok((peer_id, connection)) + } + .boxed()) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn address_translation(&self, _listen: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } +} + +/// Checks if local Firefox. +/// +/// See: `` for more details +fn maybe_local_firefox() -> bool { + let window = &web_sys::window().expect("window should be available"); + let ua = match window.navigator().user_agent() { + Ok(agent) => agent.to_lowercase(), + Err(_) => return false, + }; + + let hostname = match window + .document() + .expect("should be valid document") + .location() + { + Some(location) => match location.hostname() { + Ok(hostname) => hostname, + Err(_) => return false, + }, + None => return false, + }; + + // check if web_sys::Navigator::user_agent() matches any of the following: + // - firefox + // - seamonkey + // - iceape + // AND hostname is either localhost or "127.0.0.1" + (ua.contains("firefox") || ua.contains("seamonkey") || ua.contains("iceape")) + && (hostname == "localhost" || hostname == "127.0.0.1" || hostname == "[::1]") +} diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs new file mode 100644 index 00000000000..d42f2e3ae18 --- /dev/null +++ b/transports/webrtc-websys/src/upgrade.rs @@ -0,0 +1,59 @@ +use super::Error; +use crate::connection::RtcPeerConnection; +use crate::error::AuthenticationError; +use crate::sdp; +use crate::Connection; +use libp2p_identity::{Keypair, PeerId}; +use libp2p_webrtc_utils::noise; +use libp2p_webrtc_utils::Fingerprint; +use send_wrapper::SendWrapper; +use std::net::SocketAddr; + +/// Upgrades an outbound WebRTC connection by creating the data channel +/// and conducting a Noise handshake +pub(crate) async fn outbound( + sock_addr: SocketAddr, + remote_fingerprint: Fingerprint, + id_keys: Keypair, +) -> Result<(PeerId, Connection), Error> { + let fut = SendWrapper::new(outbound_inner(sock_addr, remote_fingerprint, id_keys)); + fut.await +} + +/// Inner outbound function that is wrapped in [SendWrapper] +async fn outbound_inner( + sock_addr: SocketAddr, + remote_fingerprint: Fingerprint, + id_keys: Keypair, +) -> Result<(PeerId, Connection), Error> { + let rtc_peer_connection = RtcPeerConnection::new(remote_fingerprint.algorithm()).await?; + + // Create stream for Noise handshake + // Must create data channel before Offer is created for it to be included in the SDP + let (channel, listener) = rtc_peer_connection.new_handshake_stream(); + drop(listener); + + let ufrag = libp2p_webrtc_utils::sdp::random_ufrag(); + + let offer = rtc_peer_connection.create_offer().await?; + let munged_offer = sdp::offer(offer, &ufrag); + rtc_peer_connection + .set_local_description(munged_offer) + .await?; + + let answer = sdp::answer(sock_addr, remote_fingerprint, &ufrag); + rtc_peer_connection.set_remote_description(answer).await?; + + let local_fingerprint = rtc_peer_connection.local_fingerprint()?; + + tracing::trace!(?local_fingerprint); + tracing::trace!(?remote_fingerprint); + + let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint) + .await + .map_err(AuthenticationError)?; + + tracing::debug!(peer=%peer_id, "Remote peer identified"); + + Ok((peer_id, Connection::new(rtc_peer_connection))) +} diff --git a/transports/webrtc/CHANGELOG.md b/transports/webrtc/CHANGELOG.md index c90bcc7446c..7cc2b2b63bc 100644 --- a/transports/webrtc/CHANGELOG.md +++ b/transports/webrtc/CHANGELOG.md @@ -1,4 +1,23 @@ -## 0.5.0-alpha - unreleased +## 0.7.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). + +## 0.6.1-alpha + +- Move common dependencies to `libp2p-webrtc-utils` crate. + See [PR 4248]. + +[PR 4248]: https://github.com/libp2p/rust-libp2p/pull/4248 + +## 0.6.0-alpha + +- Update `webrtc` dependency to `v0.8.0`. + See [PR 4099]. + +[PR 4099]: https://github.com/libp2p/rust-libp2p/pull/4099 + +## 0.5.0-alpha - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 1019a49678f..de805840683 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-webrtc" -version = "0.5.0-alpha" +version = "0.7.0-alpha" authors = ["Parity Technologies "] description = "WebRTC transport for libp2p" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,49 +12,48 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -asynchronous-codec = "0.6.1" bytes = "1" futures = "0.3" futures-timer = "3" hex = "0.4" -if-watch = "3.0" +if-watch = "3.2" libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" -sha2 = "0.10.6" -multihash = { version = "0.17.0", default-features = false } -quick-protobuf = "0.8" -quick-protobuf-codec = { workspace = true } +libp2p-webrtc-utils = { workspace = true } +multihash = { workspace = true } rand = "0.8" -rcgen = "0.9.3" +rcgen = "0.11.3" serde = { version = "1.0", features = ["derive"] } -stun = "0.4" +stun = "0.5" thiserror = "1" tinytemplate = "1.2" -tokio = { version = "1.28", features = ["net"], optional = true} +tokio = { version = "1.35", features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } -webrtc = { version = "0.6.0", optional = true } +tracing = "0.1.37" +webrtc = { version = "0.9.0", optional = true } [features] tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] pem = ["webrtc?/pem"] [dev-dependencies] -anyhow = "1.0" -env_logger = "0.10" -hex-literal = "0.4" -libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } -libp2p-ping = { workspace = true } -tokio = { version = "1.28", features = ["full"] } -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } -void = "1" +libp2p-identity = { workspace = true, features = ["rand"] } +tokio = { version = "1.35", features = ["full"] } quickcheck = "1.0.3" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + [[test]] name = "smoke" required-features = ["tokio"] -[[example]] -name = "listen_ping" -required-features = ["tokio"] +[lints] +workspace = true + +# Passing arguments to the docsrs builder in order to properly document cfg's. +# More information: https://docs.rs/about/builds#cross-compiling +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +rustc-args = ["--cfg", "docsrs"] diff --git a/transports/webrtc/examples/listen_ping.rs b/transports/webrtc/examples/listen_ping.rs deleted file mode 100644 index 219acf0d08b..00000000000 --- a/transports/webrtc/examples/listen_ping.rs +++ /dev/null @@ -1,66 +0,0 @@ -use anyhow::Result; -use futures::StreamExt; -use libp2p_core::muxing::StreamMuxerBox; -use libp2p_core::Transport; -use libp2p_identity as identity; -use libp2p_ping as ping; -use libp2p_swarm::{keep_alive, NetworkBehaviour, Swarm, SwarmBuilder}; -use rand::thread_rng; -use void::Void; - -/// An example WebRTC server that will accept connections and run the ping protocol on them. -#[tokio::main] -async fn main() -> Result<()> { - let mut swarm = create_swarm()?; - - swarm.listen_on("/ip4/127.0.0.1/udp/0/webrtc-direct".parse()?)?; - - loop { - let event = swarm.next().await.unwrap(); - eprintln!("New event: {event:?}") - } -} - -fn create_swarm() -> Result> { - let id_keys = identity::Keypair::generate_ed25519(); - let peer_id = id_keys.public().to_peer_id(); - let transport = libp2p_webrtc::tokio::Transport::new( - id_keys, - libp2p_webrtc::tokio::Certificate::generate(&mut thread_rng())?, - ); - - let transport = transport - .map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn))) - .boxed(); - - Ok(SwarmBuilder::with_tokio_executor(transport, Behaviour::default(), peer_id).build()) -} - -#[derive(NetworkBehaviour, Default)] -#[behaviour( - out_event = "Event", - event_process = false, - prelude = "libp2p_swarm::derive_prelude" -)] -struct Behaviour { - ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, -} - -#[derive(Debug)] -#[allow(clippy::large_enum_variant)] -enum Event { - Ping(ping::Event), -} - -impl From for Event { - fn from(e: ping::Event) -> Self { - Event::Ping(e) - } -} - -impl From for Event { - fn from(event: Void) -> Self { - void::unreachable(event) - } -} diff --git a/transports/webrtc/src/lib.rs b/transports/webrtc/src/lib.rs index 012796a6b69..ea1e6a4d646 100644 --- a/transports/webrtc/src/lib.rs +++ b/transports/webrtc/src/lib.rs @@ -80,11 +80,7 @@ //! is to make the hash a part of the remote's multiaddr. On the server side, we turn //! certificate verification off. -mod proto { - #![allow(unreachable_pub)] - include!("generated/mod.rs"); - pub(crate) use self::webrtc::pb::{mod_Message::Flag, Message}; -} +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "tokio")] pub mod tokio; diff --git a/transports/webrtc/src/tokio/certificate.rs b/transports/webrtc/src/tokio/certificate.rs index 748cfdb6ffd..7c7c65f0447 100644 --- a/transports/webrtc/src/tokio/certificate.rs +++ b/transports/webrtc/src/tokio/certificate.rs @@ -97,24 +97,18 @@ enum Kind { InvalidPEM(#[from] webrtc::Error), } -#[cfg(test)] +#[cfg(all(test, feature = "pem"))] mod test { - #[cfg(feature = "pem")] - use anyhow::Result; + use super::*; + use rand::thread_rng; - #[cfg(feature = "pem")] #[test] - fn test_certificate_serialize_pem_and_from_pem() -> Result<()> { - use super::*; - use rand::thread_rng; - + fn test_certificate_serialize_pem_and_from_pem() { let cert = Certificate::generate(&mut thread_rng()).unwrap(); let pem = cert.serialize_pem(); - let loaded_cert = Certificate::from_pem(&pem)?; - - assert_eq!(loaded_cert, cert); + let loaded_cert = Certificate::from_pem(&pem).unwrap(); - Ok(()) + assert_eq!(loaded_cert, cert) } } diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 72e39ce525f..3bcc4c3193e 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -40,7 +40,7 @@ use std::{ task::{Context, Poll}, }; -use crate::tokio::{error::Error, substream, substream::Substream}; +use crate::tokio::{error::Error, stream, stream::Stream}; /// Maximum number of unprocessed data channels. /// See [`Connection::poll_inbound`]. @@ -56,14 +56,14 @@ pub struct Connection { /// Channel onto which incoming data channels are put. incoming_data_channels_rx: mpsc::Receiver>, - /// Future, which, once polled, will result in an outbound substream. + /// Future, which, once polled, will result in an outbound stream. outbound_fut: Option, Error>>>, /// Future, which, once polled, will result in closing the entire connection. close_fut: Option>>, - /// A list of futures, which, once completed, signal that a [`Substream`] has been dropped. - drop_listeners: FuturesUnordered, + /// A list of futures, which, once completed, signal that a [`Stream`] has been dropped. + drop_listeners: FuturesUnordered, no_drop_listeners_waker: Option, } @@ -101,7 +101,7 @@ impl Connection { tx: Arc>>>, ) { rtc_conn.on_data_channel(Box::new(move |data_channel: Arc| { - log::debug!("Incoming data channel {}", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Incoming data channel"); let tx = tx.clone(); @@ -109,7 +109,7 @@ impl Connection { data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -118,7 +118,7 @@ impl Connection { Ok(detached) => { let mut tx = tx.lock().await; if let Err(e) = tx.try_send(detached.clone()) { - log::error!("Can't send data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {}", e); // We're not accepting data channels fast enough => // close this channel. // @@ -126,16 +126,16 @@ impl Connection { // during the negotiation process, but it's not // possible with the current API. if let Err(e) = detached.close().await { - log::error!( - "Failed to close data channel {}: {}", - id, + tracing::error!( + channel=%id, + "Failed to close data channel: {}", e ); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) @@ -147,7 +147,7 @@ impl Connection { } impl StreamMuxer for Connection { - type Substream = Substream; + type Substream = Stream; type Error = Error; fn poll_inbound( @@ -156,15 +156,15 @@ impl StreamMuxer for Connection { ) -> Poll> { match ready!(self.incoming_data_channels_rx.poll_next_unpin(cx)) { Some(detached) => { - log::trace!("Incoming substream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Incoming stream"); - let (substream, drop_listener) = Substream::new(detached); + let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); if let Some(waker) = self.no_drop_listeners_waker.take() { waker.wake() } - Poll::Ready(Ok(substream)) + Poll::Ready(Ok(stream)) } None => { debug_assert!( @@ -185,7 +185,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); @@ -208,7 +208,7 @@ impl StreamMuxer for Connection { // No need to hold the lock during the DTLS handshake. drop(peer_conn); - log::trace!("Opening data channel {}", data_channel.id()); + tracing::trace!(channel=%data_channel.id(), "Opening data channel"); let (tx, rx) = oneshot::channel::>(); @@ -226,15 +226,15 @@ impl StreamMuxer for Connection { Ok(detached) => { self.outbound_fut = None; - log::trace!("Outbound substream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Outbound stream"); - let (substream, drop_listener) = Substream::new(detached); + let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); if let Some(waker) = self.no_drop_listeners_waker.take() { waker.wake() } - Poll::Ready(Ok(substream)) + Poll::Ready(Ok(stream)) } Err(e) => { self.outbound_fut = None; @@ -244,7 +244,7 @@ impl StreamMuxer for Connection { } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - log::debug!("Closing connection"); + tracing::debug!("Closing connection"); let peer_conn = self.peer_conn.clone(); let fut = self.close_fut.get_or_insert(Box::pin(async move { @@ -275,7 +275,7 @@ pub(crate) async fn register_data_channel_open_handler( data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -283,14 +283,14 @@ pub(crate) async fn register_data_channel_open_handler( match data_channel.detach().await { Ok(detached) => { if let Err(e) = data_channel_tx.send(detached.clone()) { - log::error!("Can't send data channel {}: {:?}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {:?}", e); if let Err(e) = detached.close().await { - log::error!("Failed to close data channel {}: {}", id, e); + tracing::error!(channel=%id, "Failed to close data channel: {}", e); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) diff --git a/transports/webrtc/src/tokio/fingerprint.rs b/transports/webrtc/src/tokio/fingerprint.rs index 3776f0dc24e..c075e486232 100644 --- a/transports/webrtc/src/tokio/fingerprint.rs +++ b/transports/webrtc/src/tokio/fingerprint.rs @@ -18,31 +18,25 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use multihash::MultihashGeneric; -use sha2::Digest as _; -use std::fmt; use webrtc::dtls_transport::dtls_fingerprint::RTCDtlsFingerprint; const SHA256: &str = "sha-256"; -const MULTIHASH_SHA256_CODE: u64 = 0x12; -type Multihash = MultihashGeneric<64>; +type Multihash = multihash::Multihash<64>; /// A certificate fingerprint that is assumed to be created using the SHA256 hash algorithm. -#[derive(Eq, PartialEq, Copy, Clone)] -pub struct Fingerprint([u8; 32]); +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct Fingerprint(libp2p_webrtc_utils::Fingerprint); impl Fingerprint { - pub(crate) const FF: Fingerprint = Fingerprint([0xFF; 32]); - #[cfg(test)] pub fn raw(bytes: [u8; 32]) -> Self { - Self(bytes) + Self(libp2p_webrtc_utils::Fingerprint::raw(bytes)) } /// Creates a fingerprint from a raw certificate. pub fn from_certificate(bytes: &[u8]) -> Self { - Fingerprint(sha2::Sha256::digest(bytes).into()) + Fingerprint(libp2p_webrtc_utils::Fingerprint::from_certificate(bytes)) } /// Converts [`RTCDtlsFingerprint`] to [`Fingerprint`]. @@ -54,58 +48,35 @@ impl Fingerprint { let mut buf = [0; 32]; hex::decode_to_slice(fp.value.replace(':', ""), &mut buf).ok()?; - Some(Self(buf)) + Some(Self(libp2p_webrtc_utils::Fingerprint::raw(buf))) } - /// Converts [`Multihash`](MultihashGeneric) to [`Fingerprint`]. + /// Converts [`Multihash`](multihash::Multihash) to [`Fingerprint`]. pub fn try_from_multihash(hash: Multihash) -> Option { - if hash.code() != MULTIHASH_SHA256_CODE { - // Only support SHA256 for now. - return None; - } - - let bytes = hash.digest().try_into().ok()?; - - Some(Self(bytes)) + Some(Self(libp2p_webrtc_utils::Fingerprint::try_from_multihash( + hash, + )?)) } - /// Converts this fingerprint to [`Multihash`](MultihashGeneric). + /// Converts this fingerprint to [`Multihash`](multihash::Multihash). pub fn to_multihash(self) -> Multihash { - Multihash::wrap(MULTIHASH_SHA256_CODE, &self.0).expect("fingerprint's len to be 32 bytes") + self.0.to_multihash() } /// Formats this fingerprint as uppercase hex, separated by colons (`:`). /// /// This is the format described in . pub fn to_sdp_format(self) -> String { - self.0.map(|byte| format!("{byte:02X}")).join(":") + self.0.to_sdp_format() } /// Returns the algorithm used (e.g. "sha-256"). /// See pub fn algorithm(&self) -> String { - SHA256.to_owned() - } -} - -impl fmt::Debug for Fingerprint { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(&hex::encode(self.0)) + self.0.algorithm() } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn sdp_format() { - let fp = Fingerprint::raw(hex_literal::hex!( - "7DE3D83F81A680592A471E6B6ABB0747ABD35385A8093FDFE112C1EEBB6CC6AC" - )); - - let sdp_format = fp.to_sdp_format(); - assert_eq!(sdp_format, "7D:E3:D8:3F:81:A6:80:59:2A:47:1E:6B:6A:BB:07:47:AB:D3:53:85:A8:09:3F:DF:E1:12:C1:EE:BB:6C:C6:AC") + pub(crate) fn into_inner(self) -> libp2p_webrtc_utils::Fingerprint { + self.0 } } diff --git a/transports/webrtc/src/tokio/mod.rs b/transports/webrtc/src/tokio/mod.rs index 85e041bf98f..4f2c0dd9116 100644 --- a/transports/webrtc/src/tokio/mod.rs +++ b/transports/webrtc/src/tokio/mod.rs @@ -24,7 +24,7 @@ mod error; mod fingerprint; mod req_res_chan; mod sdp; -mod substream; +mod stream; mod transport; mod udp_mux; mod upgrade; diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index d2f424e5d4e..8549a864dcc 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -18,22 +18,19 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use serde::Serialize; -use tinytemplate::TinyTemplate; +pub(crate) use libp2p_webrtc_utils::sdp::random_ufrag; +use libp2p_webrtc_utils::sdp::render_description; +use libp2p_webrtc_utils::Fingerprint; +use std::net::SocketAddr; use webrtc::peer_connection::sdp::session_description::RTCSessionDescription; -use std::net::{IpAddr, SocketAddr}; - -use crate::tokio::fingerprint::Fingerprint; - /// Creates the SDP answer used by the client. pub(crate) fn answer( addr: SocketAddr, - server_fingerprint: &Fingerprint, + server_fingerprint: Fingerprint, client_ufrag: &str, ) -> RTCSessionDescription { - RTCSessionDescription::answer(render_description( - SERVER_SESSION_DESCRIPTION, + RTCSessionDescription::answer(libp2p_webrtc_utils::sdp::answer( addr, server_fingerprint, client_ufrag, @@ -45,13 +42,16 @@ pub(crate) fn answer( /// /// Certificate verification is disabled which is why we hardcode a dummy fingerprint here. pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescription { - RTCSessionDescription::offer(render_description( + let offer = render_description( CLIENT_SESSION_DESCRIPTION, addr, - &Fingerprint::FF, + Fingerprint::FF, client_ufrag, - )) - .unwrap() + ); + + tracing::trace!(offer=%offer, "Created SDP offer"); + + RTCSessionDescription::offer(offer).unwrap() } // An SDP message that constitutes the offer. @@ -142,111 +142,3 @@ a=setup:actpass a=sctp-port:5000 a=max-message-size:16384 "; - -// See [`CLIENT_SESSION_DESCRIPTION`]. -// -// a=ice-lite -// -// A lite implementation is only appropriate for devices that will *always* be connected to -// the public Internet and have a public IP address at which it can receive packets from any -// correspondent. ICE will not function when a lite implementation is placed behind a NAT -// (RFC8445). -// -// a=tls-id: -// -// "TLS ID" uniquely identifies a TLS association. -// The ICE protocol uses a "TLS ID" system to indicate whether a fresh DTLS connection -// must be reopened in case of ICE renegotiation. Considering that ICE renegotiations -// never happen in our use case, we can simply put a random value and not care about -// it. Note however that the TLS ID in the answer must be present if and only if the -// offer contains one. (RFC8842) -// TODO: is it true that renegotiations never happen? what about a connection closing? -// "tls-id" attribute MUST be present in the initial offer and respective answer (RFC8839). -// XXX: but right now browsers don't send it. -// -// a=setup:passive -// -// "passive" indicates that the remote DTLS server will only listen for incoming -// connections. (RFC5763) -// The answerer (server) MUST not be located behind a NAT (RFC6135). -// -// The answerer MUST use either a setup attribute value of setup:active or setup:passive. -// Note that if the answerer uses setup:passive, then the DTLS handshake will not begin until -// the answerer is received, which adds additional latency. setup:active allows the answer and -// the DTLS handshake to occur in parallel. Thus, setup:active is RECOMMENDED. -// -// a=candidate: -// -// A transport address for a candidate that can be used for connectivity checks (RFC8839). -// -// a=end-of-candidates -// -// Indicate that no more candidates will ever be sent (RFC8838). -const SERVER_SESSION_DESCRIPTION: &str = "v=0 -o=- 0 0 IN {ip_version} {target_ip} -s=- -t=0 0 -a=ice-lite -m=application {target_port} UDP/DTLS/SCTP webrtc-datachannel -c=IN {ip_version} {target_ip} -a=mid:0 -a=ice-options:ice2 -a=ice-ufrag:{ufrag} -a=ice-pwd:{pwd} -a=fingerprint:{fingerprint_algorithm} {fingerprint_value} - -a=setup:passive -a=sctp-port:5000 -a=max-message-size:16384 -a=candidate:1 1 UDP 1 {target_ip} {target_port} typ host -a=end-of-candidates -"; - -/// Indicates the IP version used in WebRTC: `IP4` or `IP6`. -#[derive(Serialize)] -enum IpVersion { - IP4, - IP6, -} - -/// Context passed to the templating engine, which replaces the above placeholders (e.g. -/// `{IP_VERSION}`) with real values. -#[derive(Serialize)] -struct DescriptionContext { - pub(crate) ip_version: IpVersion, - pub(crate) target_ip: IpAddr, - pub(crate) target_port: u16, - pub(crate) fingerprint_algorithm: String, - pub(crate) fingerprint_value: String, - pub(crate) ufrag: String, - pub(crate) pwd: String, -} - -/// Renders a [`TinyTemplate`] description using the provided arguments. -fn render_description( - description: &str, - addr: SocketAddr, - fingerprint: &Fingerprint, - ufrag: &str, -) -> String { - let mut tt = TinyTemplate::new(); - tt.add_template("description", description).unwrap(); - - let context = DescriptionContext { - ip_version: { - if addr.is_ipv4() { - IpVersion::IP4 - } else { - IpVersion::IP6 - } - }, - target_ip: addr.ip(), - target_port: addr.port(), - fingerprint_algorithm: fingerprint.algorithm(), - fingerprint_value: fingerprint.to_sdp_format(), - // NOTE: ufrag is equal to pwd. - ufrag: ufrag.to_owned(), - pwd: ufrag.to_owned(), - }; - tt.render("description", &context).unwrap() -} diff --git a/transports/webrtc/src/tokio/stream.rs b/transports/webrtc/src/tokio/stream.rs new file mode 100644 index 00000000000..4278a751e27 --- /dev/null +++ b/transports/webrtc/src/tokio/stream.rs @@ -0,0 +1,80 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use futures::prelude::*; +use libp2p_webrtc_utils::MAX_MSG_LEN; +use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; +use webrtc::data::data_channel::{DataChannel, PollDataChannel}; + +/// A substream on top of a WebRTC data channel. +/// +/// To be a proper libp2p substream, we need to implement [`AsyncRead`] and [`AsyncWrite`] as well +/// as support a half-closed state which we do by framing messages in a protobuf envelope. +pub struct Stream { + inner: libp2p_webrtc_utils::Stream>, +} + +pub(crate) type DropListener = libp2p_webrtc_utils::DropListener>; + +impl Stream { + /// Returns a new `Substream` and a listener, which will notify the receiver when/if the substream + /// is dropped. + pub(crate) fn new(data_channel: Arc) -> (Self, DropListener) { + let mut data_channel = PollDataChannel::new(data_channel).compat(); + data_channel.get_mut().set_read_buf_capacity(MAX_MSG_LEN); + + let (inner, drop_listener) = libp2p_webrtc_utils::Stream::new(data_channel); + + (Self { inner }, drop_listener) + } +} +impl AsyncRead for Stream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().inner).poll_close(cx) + } +} diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 904da61c44a..02cfa6f7296 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -59,7 +59,7 @@ impl Transport { /// # Example /// /// ``` - /// use libp2p_core::identity; + /// use libp2p_identity as identity; /// use rand::thread_rng; /// use libp2p_webrtc::tokio::{Transport, Certificate}; /// @@ -80,9 +80,11 @@ impl libp2p_core::Transport for Transport { type ListenerUpgrade = BoxFuture<'static, Result>; type Dial = BoxFuture<'static, Result>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let id = ListenerId::new(); - + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { let socket_addr = parse_webrtc_listen_addr(&addr).ok_or(TransportError::MultiaddrNotSupported(addr))?; let udp_mux = UDPMuxNewAddr::listen_on(socket_addr) @@ -93,7 +95,7 @@ impl libp2p_core::Transport for Transport { .map_err(|e| TransportError::Other(Error::Io(e)))?, ); - Ok(id) + Ok(()) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -117,7 +119,7 @@ impl libp2p_core::Transport for Transport { } fn dial(&mut self, addr: Multiaddr) -> Result> { - let (sock_addr, server_fingerprint) = parse_webrtc_dial_addr(&addr) + let (sock_addr, server_fingerprint) = libp2p_webrtc_utils::parse_webrtc_dial_addr(&addr) .ok_or_else(|| TransportError::MultiaddrNotSupported(addr.clone()))?; if sock_addr.port() == 0 || sock_addr.ip().is_unspecified() { return Err(TransportError::MultiaddrNotSupported(addr)); @@ -138,7 +140,7 @@ impl libp2p_core::Transport for Transport { sock_addr, config.inner, udp_mux, - client_fingerprint, + client_fingerprint.into_inner(), server_fingerprint, config.id_keys, ) @@ -236,7 +238,7 @@ impl ListenStream { /// terminate the stream. fn close(&mut self, reason: Result<(), Error>) { match self.report_closed { - Some(_) => log::debug!("Listener was already closed."), + Some(_) => tracing::debug!("Listener was already closed"), None => { // Report the listener event as closed. let _ = self @@ -255,9 +257,8 @@ impl ListenStream { } fn poll_if_watcher(&mut self, cx: &mut Context<'_>) -> Poll<::Item> { - let if_watcher = match self.if_watcher.as_mut() { - Some(w) => w, - None => return Poll::Pending, + let Some(if_watcher) = self.if_watcher.as_mut() else { + return Poll::Pending; }; while let Poll::Ready(event) = if_watcher.poll_if_event(cx) { @@ -335,7 +336,7 @@ impl Stream for ListenStream { new_addr.addr, self.config.inner.clone(), self.udp_mux.udp_mux_handle(), - self.config.fingerprint, + self.config.fingerprint.into_inner(), new_addr.ufrag, self.config.id_keys.clone(), ) @@ -391,7 +392,7 @@ fn socketaddr_to_multiaddr(socket_addr: &SocketAddr, certhash: Option Option { _ => return None, }; - let port = iter.next()?; - let webrtc = iter.next()?; - - let port = match (port, webrtc) { - (Protocol::Udp(port), Protocol::WebRTC) => port, - _ => return None, + let Protocol::Udp(port) = iter.next()? else { + return None; + }; + let Protocol::WebRTCDirect = iter.next()? else { + return None; }; if iter.next().is_some() { @@ -425,40 +425,6 @@ fn parse_webrtc_listen_addr(addr: &Multiaddr) -> Option { Some(SocketAddr::new(ip, port)) } -/// Parse the given [`Multiaddr`] into a [`SocketAddr`] and a [`Fingerprint`] for dialing. -fn parse_webrtc_dial_addr(addr: &Multiaddr) -> Option<(SocketAddr, Fingerprint)> { - let mut iter = addr.iter(); - - let ip = match iter.next()? { - Protocol::Ip4(ip) => IpAddr::from(ip), - Protocol::Ip6(ip) => IpAddr::from(ip), - _ => return None, - }; - - let port = iter.next()?; - let webrtc = iter.next()?; - let certhash = iter.next()?; - - let (port, fingerprint) = match (port, webrtc, certhash) { - (Protocol::Udp(port), Protocol::WebRTC, Protocol::Certhash(cert_hash)) => { - let fingerprint = Fingerprint::try_from_multihash(cert_hash)?; - - (port, fingerprint) - } - _ => return None, - }; - - match iter.next() { - Some(Protocol::P2p(_)) => {} - // peer ID is optional - None => {} - // unexpected protocol - Some(_) => return None, - } - - Some((SocketAddr::new(ip, port), fingerprint)) -} - // Tests ////////////////////////////////////////////////////////////////////////////////////////// #[cfg(test)] @@ -467,7 +433,7 @@ mod tests { use futures::future::poll_fn; use libp2p_core::{multiaddr::Protocol, Transport as _}; use rand::thread_rng; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv6Addr}; #[test] fn missing_webrtc_protocol() { @@ -478,44 +444,6 @@ mod tests { assert!(maybe_parsed.is_none()); } - #[test] - fn parse_valid_address_with_certhash_and_p2p() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - - #[test] - fn peer_id_is_not_required() { - let addr = "/ip4/127.0.0.1/udp/39901/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 39901), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - #[test] fn tcp_is_invalid_protocol() { let addr = "/ip4/127.0.0.1/tcp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w" @@ -538,26 +466,6 @@ mod tests { assert!(maybe_parsed.is_none()); } - #[test] - fn parse_ipv6() { - let addr = - "/ip6/::1/udp/12345/webrtc-direct/certhash/uEiDikp5KVUgkLta1EjUN-IKbHk-dUBg8VzKgf5nXxLK46w/p2p/12D3KooWNpDk9w6WrEEcdsEH1y47W71S36yFjw4sd3j7omzgCSMS" - .parse() - .unwrap(); - - let maybe_parsed = parse_webrtc_dial_addr(&addr); - - assert_eq!( - maybe_parsed, - Some(( - SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 12345), - Fingerprint::raw(hex_literal::hex!( - "e2929e4a5548242ed6b512350df8829b1e4f9d50183c5732a07f99d7c4b2b8eb" - )) - )) - ); - } - #[test] fn can_parse_valid_addr_without_certhash() { let addr = "/ip6/::1/udp/12345/webrtc-direct".parse().unwrap(); @@ -596,8 +504,12 @@ mod tests { // Run test twice to check that there is no unexpected behaviour if `QuicTransport.listener` // is temporarily empty. for _ in 0..2 { - let listener = transport - .listen_on("/ip4/0.0.0.0/udp/0/webrtc-direct".parse().unwrap()) + let listener = ListenerId::next(); + transport + .listen_on( + listener, + "/ip4/0.0.0.0/udp/0/webrtc-direct".parse().unwrap(), + ) .unwrap(); match poll_fn(|cx| Pin::new(&mut transport).as_mut().poll(cx)).await { TransportEvent::NewAddress { @@ -611,7 +523,10 @@ mod tests { assert!( matches!(listen_addr.iter().nth(1), Some(Protocol::Udp(port)) if port != 0) ); - assert!(matches!(listen_addr.iter().nth(2), Some(Protocol::WebRTC))); + assert!(matches!( + listen_addr.iter().nth(2), + Some(Protocol::WebRTCDirect) + )); } e => panic!("Unexpected event: {e:?}"), } diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index f978121d01c..7a8d960826d 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -175,7 +175,7 @@ impl UDPMuxNewAddr { None } Err(e) => { - log::debug!("{} (addr={})", e, addr); + tracing::debug!(address=%addr, "{}", e); None } } @@ -337,12 +337,12 @@ impl UDPMuxNewAddr { let conn = match conn { // If we couldn't find the connection based on source address, see if - // this is a STUN mesage and if so if we can find the connection based on ufrag. + // this is a STUN message and if so if we can find the connection based on ufrag. None if is_stun_message(read.filled()) => { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), Some(Err(e)) => { - log::debug!("addr={}: Error when querying existing connections: {}", &addr, e); + tracing::debug!(address=%&addr, "Error when querying existing connections: {}", e); continue; } None => None, @@ -357,20 +357,20 @@ impl UDPMuxNewAddr { if !self.new_addrs.contains(&addr) { match ufrag_from_stun_message(read.filled(), false) { Ok(ufrag) => { - log::trace!( - "Notifying about new address addr={} from ufrag={}", - &addr, - ufrag - ); + tracing::trace!( + address=%&addr, + %ufrag, + "Notifying about new address from ufrag", + ); self.new_addrs.insert(addr); return Poll::Ready(UDPMuxEvent::NewAddr( NewAddr { addr, ufrag }, )); } Err(e) => { - log::debug!( - "Unknown address addr={} (non STUN packet: {})", - &addr, + tracing::debug!( + address=%&addr, + "Unknown address (non STUN packet: {})", e ); } @@ -384,10 +384,10 @@ impl UDPMuxNewAddr { async move { if let Err(err) = conn.write_packet(&packet, addr).await { - log::error!( - "Failed to write packet: {} (addr={})", + tracing::error!( + address=%addr, + "Failed to write packet: {}", err, - addr ); } } @@ -401,10 +401,10 @@ impl UDPMuxNewAddr { Poll::Pending => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::TimedOut => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::ConnectionReset => { - log::debug!("ConnectionReset by remote client {err:?}") + tracing::debug!("ConnectionReset by remote client {err:?}") } Poll::Ready(Err(err)) => { - log::error!("Could not read udp packet: {}", err); + tracing::error!("Could not read udp packet: {}", err); return Poll::Ready(UDPMuxEvent::Error(err)); } } @@ -470,7 +470,7 @@ impl UDPMux for UdpMuxHandle { async fn remove_conn_by_ufrag(&self, ufrag: &str) { if let Err(e) = self.remove_sender.send(ufrag.to_owned()).await { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); } } } @@ -511,12 +511,12 @@ impl UDPMuxWriter for UdpMuxWriterHandle { { Ok(()) => {} Err(e) => { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); return; } } - log::debug!("Registered {} for {}", addr, conn.key()); + tracing::debug!(address=%addr, connection=%conn.key(), "Registered address for connection"); } async fn send_to(&self, buf: &[u8], target: &SocketAddr) -> Result { diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 2d5e3fe2c10..4145a5e7510 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -18,15 +18,14 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -mod noise; +use libp2p_webrtc_utils::{noise, Fingerprint}; use futures::channel::oneshot; use futures::future::Either; use futures_timer::Delay; use libp2p_identity as identity; use libp2p_identity::PeerId; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use webrtc::api::setting_engine::SettingEngine; use webrtc::api::APIBuilder; use webrtc::data::data_channel::DataChannel; @@ -38,9 +37,8 @@ use webrtc::ice::udp_network::UDPNetwork; use webrtc::peer_connection::configuration::RTCConfiguration; use webrtc::peer_connection::RTCPeerConnection; -use std::{net::SocketAddr, sync::Arc, time::Duration}; - -use crate::tokio::{error::Error, fingerprint::Fingerprint, sdp, substream::Substream, Connection}; +use crate::tokio::sdp::random_ufrag; +use crate::tokio::{error::Error, sdp, stream::Stream, Connection}; /// Creates a new outbound WebRTC connection. pub(crate) async fn outbound( @@ -51,19 +49,16 @@ pub(crate) async fn outbound( server_fingerprint: Fingerprint, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new outbound connection to {addr})"); + tracing::debug!(address=%addr, "new outbound connection to address"); let (peer_connection, ufrag) = new_outbound_connection(addr, config, udp_mux).await?; let offer = peer_connection.create_offer(None).await?; - log::debug!("created SDP offer for outbound connection: {:?}", offer.sdp); + tracing::debug!(offer=%offer.sdp, "created SDP offer for outbound connection"); peer_connection.set_local_description(offer).await?; - let answer = sdp::answer(addr, &server_fingerprint, &ufrag); - log::debug!( - "calculated SDP answer for outbound connection: {:?}", - answer - ); + let answer = sdp::answer(addr, server_fingerprint, &ufrag); + tracing::debug!(?answer, "calculated SDP answer for outbound connection"); peer_connection.set_remote_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; @@ -87,16 +82,16 @@ pub(crate) async fn inbound( remote_ufrag: String, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new inbound connection from {addr} (ufrag: {remote_ufrag})"); + tracing::debug!(address=%addr, ufrag=%remote_ufrag, "new inbound connection from address"); let peer_connection = new_inbound_connection(addr, config, udp_mux, &remote_ufrag).await?; let offer = sdp::offer(addr, &remote_ufrag); - log::debug!("calculated SDP offer for inbound connection: {:?}", offer); + tracing::debug!(?offer, "calculated SDP offer for inbound connection"); peer_connection.set_remote_description(offer).await?; let answer = peer_connection.create_answer(None).await?; - log::debug!("created SDP answer for inbound connection: {:?}", answer); + tracing::debug!(?answer, "created SDP answer for inbound connection"); peer_connection.set_local_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; @@ -155,18 +150,6 @@ async fn new_inbound_connection( Ok(connection) } -/// Generates a random ufrag and adds a prefix according to the spec. -fn random_ufrag() -> String { - format!( - "libp2p+webrtc+v1/{}", - thread_rng() - .sample_iter(&Alphanumeric) - .take(64) - .map(char::from) - .collect::() - ) -} - fn setting_engine( udp_mux: Arc, ufrag: &str, @@ -203,9 +186,7 @@ async fn get_remote_fingerprint(conn: &RTCPeerConnection) -> Fingerprint { Fingerprint::from_certificate(&cert_bytes) } -async fn create_substream_for_noise_handshake( - conn: &RTCPeerConnection, -) -> Result { +async fn create_substream_for_noise_handshake(conn: &RTCPeerConnection) -> Result { // NOTE: the data channel w/ `negotiated` flag set to `true` MUST be created on both ends. let data_channel = conn .create_data_channel( @@ -234,7 +215,7 @@ async fn create_substream_for_noise_handshake( } }; - let (substream, drop_listener) = Substream::new(channel); + let (substream, drop_listener) = Stream::new(channel); drop(drop_listener); // Don't care about cancelled substreams during initial handshake. Ok(substream) diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index bca159d785b..76e168edfd6 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -23,7 +23,7 @@ use futures::future::{BoxFuture, Either}; use futures::stream::StreamExt; use futures::{future, ready, AsyncReadExt, AsyncWriteExt, FutureExt, SinkExt}; use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; -use libp2p_core::transport::{Boxed, TransportEvent}; +use libp2p_core::transport::{Boxed, ListenerId, TransportEvent}; use libp2p_core::{Multiaddr, Transport}; use libp2p_identity::PeerId; use libp2p_webrtc as webrtc; @@ -33,10 +33,13 @@ use std::num::NonZeroU8; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_transport(); let (b_peer_id, mut b_transport) = create_transport(); @@ -53,7 +56,9 @@ async fn smoke() { // Note: This test should likely be ported to the muxer compliance test suite. #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -81,7 +86,9 @@ fn create_transport() -> (PeerId, Boxed<(PeerId, StreamMuxerBox)>) { } async fn start_listening(transport: &mut Boxed<(PeerId, StreamMuxerBox)>, addr: &str) -> Multiaddr { - transport.listen_on(addr.parse().unwrap()).unwrap(); + transport + .listen_on(ListenerId::next(), addr.parse().unwrap()) + .unwrap(); match transport.next().await { Some(TransportEvent::NewAddress { listen_addr, .. }) => listen_addr, e => panic!("{e:?}"), @@ -100,7 +107,11 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -167,15 +178,13 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T async fn answer_inbound_streams(mut connection: StreamMuxerBox) { loop { - let mut inbound_stream = match future::poll_fn(|cx| { + let Ok(mut inbound_stream) = future::poll_fn(|cx| { let _ = connection.poll_unpin(cx)?; - connection.poll_inbound_unpin(cx) }) .await - { - Ok(s) => s, - Err(_) => return, + else { + return; }; tokio::spawn(async move { @@ -242,7 +251,7 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!(stream_count=%number_streams, "Created streams"); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await @@ -342,7 +351,7 @@ impl Future for ListenUpgrade<'_> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { - match dbg!(self.listener.poll_next_unpin(cx)) { + match self.listener.poll_next_unpin(cx) { Poll::Ready(Some(TransportEvent::Incoming { upgrade, send_back_addr, diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md new file mode 100644 index 00000000000..3cfb1b2fbf9 --- /dev/null +++ b/transports/websocket-websys/CHANGELOG.md @@ -0,0 +1,16 @@ +## 0.3.1 + +- Add support for different WASM environments by introducing a `WebContext` that + detects and abstracts the `Window` vs the `WorkerGlobalScope` API. + See [PR 4889](https://github.com/libp2p/rust-libp2p/pull/4889). + +## 0.3.0 + + +## 0.2.0 + +- Add Websys Websocket transport. + +## 0.1.0 + +- Crate claimed. diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml new file mode 100644 index 00000000000..1a822f77f1e --- /dev/null +++ b/transports/websocket-websys/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "libp2p-websocket-websys" +edition = "2021" +rust-version = "1.60.0" +description = "WebSocket for libp2p under WASM environment" +version = "0.3.1" +authors = ["Vince Vasta "] +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +bytes = "1.4.0" +futures = "0.3.30" +js-sys = "0.3.66" +libp2p-core = { workspace = true } +tracing = "0.1.37" +parking_lot = "0.12.1" +send_wrapper = "0.6.0" +thiserror = "1.0.51" +wasm-bindgen = "0.2.89" +web-sys = { version = "0.3.66", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window", "WorkerGlobalScope"] } + +# Passing arguments to the docsrs builder in order to properly document cfg's. +# More information: https://docs.rs/about/builds#cross-compiling +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +rustc-args = ["--cfg", "docsrs"] + +[dev-dependencies] +libp2p-yamux = { workspace = true } +libp2p-noise = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs new file mode 100644 index 00000000000..5c1a6ebf1c4 --- /dev/null +++ b/transports/websocket-websys/src/lib.rs @@ -0,0 +1,450 @@ +// Copyright (C) 2023 Vince Vasta +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +//! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). + +mod web_context; + +use bytes::BytesMut; +use futures::task::AtomicWaker; +use futures::{future::Ready, io, prelude::*}; +use js_sys::Array; +use libp2p_core::{ + multiaddr::{Multiaddr, Protocol}, + transport::{ListenerId, TransportError, TransportEvent}, +}; +use send_wrapper::SendWrapper; +use std::cmp::min; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Mutex; +use std::{pin::Pin, task::Context, task::Poll}; +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; + +use crate::web_context::WebContext; + +/// A Websocket transport that can be used in a wasm environment. +/// +/// ## Example +/// +/// To create an authenticated transport instance with Noise protocol and Yamux: +/// +/// ``` +/// # use libp2p_core::{upgrade::Version, Transport}; +/// # use libp2p_identity::Keypair; +/// # use libp2p_yamux as yamux; +/// # use libp2p_noise as noise; +/// let local_key = Keypair::generate_ed25519(); +/// let transport = libp2p_websocket_websys::Transport::default() +/// .upgrade(Version::V1) +/// .authenticate(noise::Config::new(&local_key).unwrap()) +/// .multiplex(yamux::Config::default()) +/// .boxed(); +/// ``` +/// +#[derive(Default)] +pub struct Transport { + _private: (), +} + +/// Arbitrary, maximum amount we are willing to buffer before we throttle our user. +const MAX_BUFFER: usize = 1024 * 1024; + +impl libp2p_core::Transport for Transport { + type Output = Connection; + type Error = Error; + type ListenerUpgrade = Ready>; + type Dial = Pin> + Send>>; + + fn listen_on( + &mut self, + _: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn remove_listener(&mut self, _id: ListenerId) -> bool { + false + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + let url = extract_websocket_url(&addr) + .ok_or_else(|| TransportError::MultiaddrNotSupported(addr))?; + + Ok(async move { + let socket = match WebSocket::new(&url) { + Ok(ws) => ws, + Err(_) => return Err(Error::invalid_websocket_url(&url)), + }; + + Ok(Connection::new(socket)) + } + .boxed()) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> std::task::Poll> { + Poll::Pending + } + + fn address_translation(&self, _listen: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } +} + +// Try to convert Multiaddr to a Websocket url. +fn extract_websocket_url(addr: &Multiaddr) -> Option { + let mut protocols = addr.iter(); + let host_port = match (protocols.next(), protocols.next()) { + (Some(Protocol::Ip4(ip)), Some(Protocol::Tcp(port))) => { + format!("{ip}:{port}") + } + (Some(Protocol::Ip6(ip)), Some(Protocol::Tcp(port))) => { + format!("[{ip}]:{port}") + } + (Some(Protocol::Dns(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dns4(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dns6(h)), Some(Protocol::Tcp(port))) + | (Some(Protocol::Dnsaddr(h)), Some(Protocol::Tcp(port))) => { + format!("{}:{}", &h, port) + } + _ => return None, + }; + + let (scheme, wspath) = match protocols.next() { + Some(Protocol::Ws(path)) => ("ws", path.into_owned()), + Some(Protocol::Wss(path)) => ("wss", path.into_owned()), + _ => return None, + }; + + Some(format!("{scheme}://{host_port}{wspath}")) +} + +#[derive(thiserror::Error, Debug)] +#[error("{msg}")] +pub struct Error { + msg: String, +} + +impl Error { + fn invalid_websocket_url(url: &str) -> Self { + Self { + msg: format!("Invalid websocket url: {url}"), + } + } +} + +/// A Websocket connection created by the [`Transport`]. +pub struct Connection { + inner: SendWrapper, +} + +struct Inner { + socket: WebSocket, + + new_data_waker: Rc, + read_buffer: Rc>, + + /// Waker for when we are waiting for the WebSocket to be opened. + open_waker: Rc, + + /// Waker for when we are waiting to write (again) to the WebSocket because we previously exceeded the [`MAX_BUFFER`] threshold. + write_waker: Rc, + + /// Waker for when we are waiting for the WebSocket to be closed. + close_waker: Rc, + + /// Whether the connection errored. + errored: Rc, + + // Store the closures for proper garbage collection. + // These are wrapped in an [`Rc`] so we can implement [`Clone`]. + _on_open_closure: Rc>, + _on_buffered_amount_low_closure: Rc>, + _on_close_closure: Rc>, + _on_error_closure: Rc>, + _on_message_closure: Rc>, + buffered_amount_low_interval: i32, +} + +impl Inner { + fn ready_state(&self) -> ReadyState { + match self.socket.ready_state() { + 0 => ReadyState::Connecting, + 1 => ReadyState::Open, + 2 => ReadyState::Closing, + 3 => ReadyState::Closed, + unknown => unreachable!("invalid `ReadyState` value: {unknown}"), + } + } + + fn poll_open(&mut self, cx: &Context<'_>) -> Poll> { + match self.ready_state() { + ReadyState::Connecting => { + self.open_waker.register(cx.waker()); + Poll::Pending + } + ReadyState::Open => Poll::Ready(Ok(())), + ReadyState::Closed | ReadyState::Closing => { + Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } + } + } + + fn error_barrier(&self) -> io::Result<()> { + if self.errored.load(Ordering::SeqCst) { + return Err(io::ErrorKind::BrokenPipe.into()); + } + + Ok(()) + } +} + +/// The state of the WebSocket. +/// +/// See . +#[derive(PartialEq)] +enum ReadyState { + Connecting, + Open, + Closing, + Closed, +} + +impl Connection { + fn new(socket: WebSocket) -> Self { + socket.set_binary_type(web_sys::BinaryType::Arraybuffer); + + let open_waker = Rc::new(AtomicWaker::new()); + let onopen_closure = Closure::::new({ + let open_waker = open_waker.clone(); + move |_| { + open_waker.wake(); + } + }); + socket.set_onopen(Some(onopen_closure.as_ref().unchecked_ref())); + + let close_waker = Rc::new(AtomicWaker::new()); + let onclose_closure = Closure::::new({ + let close_waker = close_waker.clone(); + move |_| { + close_waker.wake(); + } + }); + socket.set_onclose(Some(onclose_closure.as_ref().unchecked_ref())); + + let errored = Rc::new(AtomicBool::new(false)); + let onerror_closure = Closure::::new({ + let errored = errored.clone(); + move |_| { + errored.store(true, Ordering::SeqCst); + } + }); + socket.set_onerror(Some(onerror_closure.as_ref().unchecked_ref())); + + let read_buffer = Rc::new(Mutex::new(BytesMut::new())); + let new_data_waker = Rc::new(AtomicWaker::new()); + let onmessage_closure = Closure::::new({ + let read_buffer = read_buffer.clone(); + let new_data_waker = new_data_waker.clone(); + let errored = errored.clone(); + move |e: MessageEvent| { + let data = js_sys::Uint8Array::new(&e.data()); + + let mut read_buffer = read_buffer.lock().unwrap(); + + if read_buffer.len() + data.length() as usize > MAX_BUFFER { + tracing::warn!("Remote is overloading us with messages, closing connection"); + errored.store(true, Ordering::SeqCst); + + return; + } + + read_buffer.extend_from_slice(&data.to_vec()); + new_data_waker.wake(); + } + }); + socket.set_onmessage(Some(onmessage_closure.as_ref().unchecked_ref())); + + let write_waker = Rc::new(AtomicWaker::new()); + let on_buffered_amount_low_closure = Closure::::new({ + let write_waker = write_waker.clone(); + let socket = socket.clone(); + move |_| { + if socket.buffered_amount() == 0 { + write_waker.wake(); + } + } + }); + let buffered_amount_low_interval = WebContext::new() + .expect("to have a window or worker context") + .set_interval_with_callback_and_timeout_and_arguments( + on_buffered_amount_low_closure.as_ref().unchecked_ref(), + 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. + &Array::new(), + ) + .expect("to be able to set an interval"); + + Self { + inner: SendWrapper::new(Inner { + socket, + new_data_waker, + read_buffer, + open_waker, + write_waker, + close_waker, + errored, + _on_open_closure: Rc::new(onopen_closure), + _on_buffered_amount_low_closure: Rc::new(on_buffered_amount_low_closure), + _on_close_closure: Rc::new(onclose_closure), + _on_error_closure: Rc::new(onerror_closure), + _on_message_closure: Rc::new(onmessage_closure), + buffered_amount_low_interval, + }), + } + } + + fn buffered_amount(&self) -> usize { + self.inner.socket.buffered_amount() as usize + } +} + +impl AsyncRead for Connection { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.get_mut(); + this.inner.error_barrier()?; + futures::ready!(this.inner.poll_open(cx))?; + + let mut read_buffer = this.inner.read_buffer.lock().unwrap(); + + if read_buffer.is_empty() { + this.inner.new_data_waker.register(cx.waker()); + return Poll::Pending; + } + + // Ensure that we: + // - at most return what the caller can read (`buf.len()`) + // - at most what we have (`read_buffer.len()`) + let split_index = min(buf.len(), read_buffer.len()); + + let bytes_to_return = read_buffer.split_to(split_index); + let len = bytes_to_return.len(); + buf[..len].copy_from_slice(&bytes_to_return); + + Poll::Ready(Ok(len)) + } +} + +impl AsyncWrite for Connection { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let this = self.get_mut(); + + this.inner.error_barrier()?; + futures::ready!(this.inner.poll_open(cx))?; + + debug_assert!(this.buffered_amount() <= MAX_BUFFER); + let remaining_space = MAX_BUFFER - this.buffered_amount(); + + if remaining_space == 0 { + this.inner.write_waker.register(cx.waker()); + return Poll::Pending; + } + + let bytes_to_send = min(buf.len(), remaining_space); + + if this + .inner + .socket + .send_with_u8_array(&buf[..bytes_to_send]) + .is_err() + { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); + } + + Poll::Ready(Ok(bytes_to_send)) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.buffered_amount() == 0 { + return Poll::Ready(Ok(())); + } + + self.inner.error_barrier()?; + + self.inner.write_waker.register(cx.waker()); + Poll::Pending + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + const REGULAR_CLOSE: u16 = 1000; // See https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1. + + if self.inner.ready_state() == ReadyState::Closed { + return Poll::Ready(Ok(())); + } + + self.inner.error_barrier()?; + + if self.inner.ready_state() != ReadyState::Closing { + let _ = self + .inner + .socket + .close_with_code_and_reason(REGULAR_CLOSE, "user initiated"); + } + + self.inner.close_waker.register(cx.waker()); + Poll::Pending + } +} + +impl Drop for Connection { + fn drop(&mut self) { + const GO_AWAY_STATUS_CODE: u16 = 1001; // See https://www.rfc-editor.org/rfc/rfc6455.html#section-7.4.1. + + if let ReadyState::Connecting | ReadyState::Open = self.inner.ready_state() { + let _ = self + .inner + .socket + .close_with_code_and_reason(GO_AWAY_STATUS_CODE, "connection dropped"); + } + + WebContext::new() + .expect("to have a window or worker context") + .clear_interval_with_handle(self.inner.buffered_amount_low_interval); + } +} diff --git a/transports/websocket-websys/src/web_context.rs b/transports/websocket-websys/src/web_context.rs new file mode 100644 index 00000000000..c514435d2bb --- /dev/null +++ b/transports/websocket-websys/src/web_context.rs @@ -0,0 +1,57 @@ +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::window; + +/// Web context that abstract the window vs web worker API +#[derive(Debug)] +pub(crate) enum WebContext { + Window(web_sys::Window), + Worker(web_sys::WorkerGlobalScope), +} + +impl WebContext { + pub(crate) fn new() -> Option { + match window() { + Some(window) => Some(Self::Window(window)), + None => { + #[wasm_bindgen] + extern "C" { + type Global; + + #[wasm_bindgen(method, getter, js_name = WorkerGlobalScope)] + fn worker(this: &Global) -> JsValue; + } + let global: Global = js_sys::global().unchecked_into(); + if !global.worker().is_undefined() { + Some(Self::Worker(global.unchecked_into())) + } else { + None + } + } + } + } + + /// The `setInterval()` method. + pub(crate) fn set_interval_with_callback_and_timeout_and_arguments( + &self, + handler: &::js_sys::Function, + timeout: i32, + arguments: &::js_sys::Array, + ) -> Result { + match self { + WebContext::Window(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + WebContext::Worker(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + } + } + + /// The `clearInterval()` method. + pub(crate) fn clear_interval_with_handle(&self, handle: i32) { + match self { + WebContext::Window(w) => w.clear_interval_with_handle(handle), + WebContext::Worker(w) => w.clear_interval_with_handle(handle), + } + } +} diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index b8e795fd8d3..192b1fa094e 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,9 +1,26 @@ -## 0.42.0 - unreleased +## 0.43.0 + + +## 0.42.1 + +- Bump `futures-rustls` to `0.24.0`. + This is a part of the resolution of the [RUSTSEC-2023-0052]. + See [PR 4378]. + +[PR 4378]: https://github.com/libp2p/rust-libp2p/pull/4378 +[RUSTSEC-2023-0052]: https://rustsec.org/advisories/RUSTSEC-2023-0052.html + +## 0.42.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove `WsConfig::use_deflate` option. + This allows us to remove the dependency on the `zlib` shared library. + See [PR 3949]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3949]: https://github.com/libp2p/rust-libp2p/pull/3949 ## 0.41.0 diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 643bf15b15f..385e292103c 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket" edition = "2021" rust-version = { workspace = true } description = "WebSocket transport for libp2p" -version = "0.42.0" +version = "0.43.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,24 +11,25 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -futures-rustls = "0.22" -either = "1.5.3" -futures = "0.3.28" +futures-rustls = "0.24.0" +either = "1.9.0" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.8" parking_lot = "0.12.0" -quicksink = "0.1" +pin-project-lite = "0.2.13" rw-stream-sink = { workspace = true } -soketto = { version = "0.7.0", features = ["deflate"] } -url = "2.1" -webpki-roots = "0.23" +soketto = "0.7.0" +tracing = "0.1.37" +url = "2.5" +webpki-roots = "0.25" [dev-dependencies] libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-dns = { workspace = true, features = ["async-std"] } +libp2p-identity = { workspace = true, features = ["rand"] } async-std = { version = "1.6.5", features = ["attributes"] } -rcgen = "0.9.3" +rcgen = "0.11.3" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -36,3 +37,6 @@ rcgen = "0.9.3" all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 318090d10d2..3593e1eaff2 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::{error::Error, tls}; +use crate::{error::Error, quicksink, tls}; use either::Either; use futures::{future::BoxFuture, prelude::*, ready, stream::BoxStream}; use futures_rustls::{client, rustls, server}; @@ -28,11 +28,9 @@ use libp2p_core::{ transport::{ListenerId, TransportError, TransportEvent}, Transport, }; -use log::{debug, trace}; use parking_lot::Mutex; use soketto::{ connection::{self, CloseReason}, - extension::deflate::Deflate, handshake, }; use std::{collections::HashMap, ops::DerefMut, sync::Arc}; @@ -51,7 +49,6 @@ pub struct WsConfig { max_data_size: usize, tls_config: tls::Config, max_redirects: u8, - use_deflate: bool, /// Websocket protocol of the inner listener. /// /// This is the suffix of the address provided in `listen_on`. @@ -59,7 +56,10 @@ pub struct WsConfig { listener_protos: HashMap>, } -impl WsConfig { +impl WsConfig +where + T: Send, +{ /// Create a new websocket transport based on another transport. pub fn new(transport: T) -> Self { WsConfig { @@ -67,7 +67,6 @@ impl WsConfig { max_data_size: MAX_DATA_SIZE, tls_config: tls::Config::client(), max_redirects: 0, - use_deflate: false, listener_protos: HashMap::new(), } } @@ -99,12 +98,6 @@ impl WsConfig { self.tls_config = c; self } - - /// Should the deflate extension (RFC 7692) be used if supported? - pub fn use_deflate(&mut self, flag: bool) -> &mut Self { - self.use_deflate = flag; - self - } } type TlsOrPlain = future::Either, server::TlsStream>, T>; @@ -122,27 +115,31 @@ where type ListenerUpgrade = BoxFuture<'static, Result>; type Dial = BoxFuture<'static, Result>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { let mut inner_addr = addr.clone(); let proto = match inner_addr.pop() { Some(p @ Protocol::Wss(_)) => { if self.tls_config.server.is_some() { p } else { - debug!("/wss address but TLS server support is not configured"); + tracing::debug!("/wss address but TLS server support is not configured"); return Err(TransportError::MultiaddrNotSupported(addr)); } } Some(p @ Protocol::Ws(_)) => p, _ => { - debug!("{} is not a websocket multiaddr", addr); + tracing::debug!(address=%addr, "Address is not a websocket multiaddr"); return Err(TransportError::MultiaddrNotSupported(addr)); } }; - match self.transport.lock().listen_on(inner_addr) { - Ok(id) => { + match self.transport.lock().listen_on(id, inner_addr) { + Ok(()) => { self.listener_protos.insert(id, proto); - Ok(id) + Ok(()) } Err(e) => Err(e.map(Error::Transport)), } @@ -189,7 +186,7 @@ where .get(&listener_id) .expect("Protocol was inserted in Transport::listen_on."); listen_addr.push(proto.clone()); - debug!("Listening on {}", listen_addr); + tracing::debug!(address=%listen_addr, "Listening on address"); TransportEvent::NewAddress { listener_id, listen_addr, @@ -281,23 +278,16 @@ where let transport = self.transport.clone(); let tls_config = self.tls_config.clone(); - let use_deflate = self.use_deflate; let max_redirects = self.max_redirects; let future = async move { loop { - match Self::dial_once( - transport.clone(), - addr, - tls_config.clone(), - use_deflate, - role_override, - ) - .await + match Self::dial_once(transport.clone(), addr, tls_config.clone(), role_override) + .await { Ok(Either::Left(redirect)) => { if remaining_redirects == 0 { - debug!("Too many redirects (> {})", max_redirects); + tracing::debug!(%max_redirects, "Too many redirects"); return Err(Error::TooManyRedirects); } remaining_redirects -= 1; @@ -317,10 +307,9 @@ where transport: Arc>, addr: WsAddress, tls_config: tls::Config, - use_deflate: bool, role_override: Endpoint, ) -> Result>, Error> { - trace!("Dialing websocket address: {:?}", addr); + tracing::trace!(address=?addr, "Dialing websocket address"); let dial = match role_override { Endpoint::Dialer => transport.lock().dial(addr.tcp_addr), @@ -332,19 +321,19 @@ where })?; let stream = dial.map_err(Error::Transport).await?; - trace!("TCP connection to {} established.", addr.host_port); + tracing::trace!(port=%addr.host_port, "TCP connection established"); let stream = if addr.use_tls { // begin TLS session let dns_name = addr .dns_name .expect("for use_tls we have checked that dns_name is some"); - trace!("Starting TLS handshake with {:?}", dns_name); + tracing::trace!(?dns_name, "Starting TLS handshake"); let stream = tls_config .client .connect(dns_name.clone(), stream) .map_err(|e| { - debug!("TLS handshake with {:?} failed: {}", dns_name, e); + tracing::debug!(?dns_name, "TLS handshake failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -356,14 +345,10 @@ where future::Either::Right(stream) }; - trace!("Sending websocket handshake to {}", addr.host_port); + tracing::trace!(port=%addr.host_port, "Sending websocket handshake"); let mut client = handshake::Client::new(stream, &addr.host_port, addr.path.as_ref()); - if use_deflate { - client.add_extension(Box::new(Deflate::new(connection::Mode::Client))); - } - match client .handshake() .map_err(|e| Error::Handshake(Box::new(e))) @@ -373,9 +358,10 @@ where status_code, location, } => { - debug!( - "received redirect ({}); location: {}", - status_code, location + tracing::debug!( + %status_code, + %location, + "received redirect" ); Ok(Either::Left(location)) } @@ -384,7 +370,7 @@ where Err(Error::Handshake(msg.into())) } handshake::ServerResponse::Accepted { .. } => { - trace!("websocket handshake with {} successful", addr.host_port); + tracing::trace!(port=%addr.host_port, "websocket handshake successful"); Ok(Either::Right(Connection::new(client.into_builder()))) } } @@ -399,11 +385,10 @@ where let remote_addr2 = remote_addr.clone(); // used for logging let tls_config = self.tls_config.clone(); let max_size = self.max_data_size; - let use_deflate = self.use_deflate; async move { let stream = upgrade.map_err(Error::Transport).await?; - trace!("incoming connection from {}", remote_addr); + tracing::trace!(address=%remote_addr, "incoming connection from address"); let stream = if use_tls { // begin TLS session @@ -411,12 +396,12 @@ where .server .expect("for use_tls we checked server is not none"); - trace!("awaiting TLS handshake with {}", remote_addr); + tracing::trace!(address=%remote_addr, "awaiting TLS handshake with address"); let stream = server .accept(stream) .map_err(move |e| { - debug!("TLS handshake with {} failed: {}", remote_addr, e); + tracing::debug!(address=%remote_addr, "TLS handshake with address failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -429,17 +414,13 @@ where future::Either::Right(stream) }; - trace!( - "receiving websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "receiving websocket handshake request from address" ); let mut server = handshake::Server::new(stream); - if use_deflate { - server.add_extension(Box::new(Deflate::new(connection::Mode::Server))); - } - let ws_key = { let request = server .receive_request() @@ -448,9 +429,9 @@ where request.key() }; - trace!( - "accepting websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "accepting websocket handshake request from address" ); let response = handshake::server::Response::Accept { @@ -530,7 +511,7 @@ fn parse_ws_dial_addr(addr: Multiaddr) -> Result> { Some(Protocol::Ws(path)) => break (false, path.into_owned()), Some(Protocol::Wss(path)) => { if dns_name.is_none() { - debug!("Missing DNS name in WSS address: {}", addr); + tracing::debug!(addrress=%addr, "Missing DNS name in WSS address"); return Err(Error::InvalidMultiaddr(addr)); } break (true, path.into_owned()); @@ -575,13 +556,13 @@ fn location_to_multiaddr(location: &str) -> Result> { } else if s.eq_ignore_ascii_case("http") | s.eq_ignore_ascii_case("ws") { a.push(Protocol::Ws(url.path().into())) } else { - debug!("unsupported scheme: {}", s); + tracing::debug!(scheme=%s, "unsupported scheme"); return Err(Error::InvalidRedirectLocation); } Ok(a) } Err(e) => { - debug!("failed to parse url as multi-address: {:?}", e); + tracing::debug!("failed to parse url as multi-address: {:?}", e); Err(Error::InvalidRedirectLocation) } } diff --git a/transports/websocket/src/lib.rs b/transports/websocket/src/lib.rs index f9b289422fd..e0b3d09ca25 100644 --- a/transports/websocket/src/lib.rs +++ b/transports/websocket/src/lib.rs @@ -24,6 +24,7 @@ pub mod error; pub mod framed; +mod quicksink; pub mod tls; use error::Error; @@ -52,13 +53,19 @@ use std::{ /// /// If you don't need Secure Websocket's support, use a plain TCP transport as an inner transport. /// +/// # Dependencies +/// +/// This transport requires the `zlib` shared library to be installed on the system. +/// +/// Future releases might lift this requirement, see . +/// /// # Examples /// /// Secure Websocket transport: /// /// ``` /// # use futures::future; -/// # use libp2p_core::Transport; +/// # use libp2p_core::{transport::ListenerId, Transport}; /// # use libp2p_dns as dns; /// # use libp2p_tcp as tcp; /// # use libp2p_websocket as websocket; @@ -68,7 +75,7 @@ use std::{ /// # #[async_std::main] /// # async fn main() { /// -/// let mut transport = websocket::WsConfig::new(dns::DnsConfig::system( +/// let mut transport = websocket::WsConfig::new(dns::async_std::Transport::system( /// tcp::async_io::Transport::new(tcp::Config::default()), /// ).await.unwrap()); /// @@ -77,7 +84,7 @@ use std::{ /// let cert = websocket::tls::Certificate::new(rcgen_cert.serialize_der().unwrap()); /// transport.set_tls_config(websocket::tls::Config::new(priv_key, vec![cert]).unwrap()); /// -/// let id = transport.listen_on("/ip4/127.0.0.1/tcp/0/wss".parse().unwrap()).unwrap(); +/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/wss".parse().unwrap()).unwrap(); /// /// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); /// println!("Listening on {addr}"); @@ -89,7 +96,7 @@ use std::{ /// /// ``` /// # use futures::future; -/// # use libp2p_core::Transport; +/// # use libp2p_core::{transport::ListenerId, Transport}; /// # use libp2p_dns as dns; /// # use libp2p_tcp as tcp; /// # use libp2p_websocket as websocket; @@ -102,7 +109,7 @@ use std::{ /// tcp::async_io::Transport::new(tcp::Config::default()), /// ); /// -/// let id = transport.listen_on("/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()).unwrap(); +/// let id = transport.listen_on(ListenerId::next(), "/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()).unwrap(); /// /// let addr = future::poll_fn(|cx| Pin::new(&mut transport).poll(cx)).await.into_new_address().unwrap(); /// println!("Listening on {addr}"); @@ -168,12 +175,6 @@ where self.transport.inner_mut().set_tls_config(c); self } - - /// Should the deflate extension (RFC 7692) be used if supported? - pub fn use_deflate(&mut self, flag: bool) -> &mut Self { - self.transport.inner_mut().use_deflate(flag); - self - } } impl Transport for WsConfig @@ -189,8 +190,12 @@ where type ListenerUpgrade = MapFuture, WrapperFn>; type Dial = MapFuture, WrapperFn>; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { - self.transport.listen_on(addr) + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + self.transport.listen_on(id, addr) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -287,7 +292,7 @@ where mod tests { use super::WsConfig; use futures::prelude::*; - use libp2p_core::{multiaddr::Protocol, Multiaddr, Transport}; + use libp2p_core::{multiaddr::Protocol, transport::ListenerId, Multiaddr, Transport}; use libp2p_identity::PeerId; use libp2p_tcp as tcp; @@ -309,7 +314,9 @@ mod tests { async fn connect(listen_addr: Multiaddr) { let mut ws_config = new_ws_config().boxed(); - ws_config.listen_on(listen_addr).expect("listener"); + ws_config + .listen_on(ListenerId::next(), listen_addr) + .expect("listener"); let addr = ws_config .next() @@ -332,7 +339,7 @@ mod tests { let outbound = new_ws_config() .boxed() - .dial(addr.with(Protocol::P2p(PeerId::random().into()))) + .dial(addr.with(Protocol::P2p(PeerId::random()))) .unwrap(); let (a, b) = futures::join!(inbound, outbound); diff --git a/transports/websocket/src/quicksink.rs b/transports/websocket/src/quicksink.rs new file mode 100644 index 00000000000..d9edb4dfe0d --- /dev/null +++ b/transports/websocket/src/quicksink.rs @@ -0,0 +1,350 @@ +// Copyright (c) 2019-2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 +// or the MIT +// license , at your +// option. All files in the project carrying such notice may not be copied, +// modified, or distributed except according to those terms. +// +// Forked into rust-libp2p and further distributed under the MIT license. + +// Create a [`Sink`] implementation from an initial value and a closure +// returning a [`Future`]. +// +// This is very similar to how `futures::stream::unfold` creates a `Stream` +// implementation from a seed value and a future-returning closure. +// +// # Examples +// +// ```no_run +// use async_std::io; +// use futures::prelude::*; +// use crate::quicksink::Action; +// +// crate::quicksink::make_sink(io::stdout(), |mut stdout, action| async move { +// match action { +// Action::Send(x) => stdout.write_all(x).await?, +// Action::Flush => stdout.flush().await?, +// Action::Close => stdout.close().await? +// } +// Ok::<_, io::Error>(stdout) +// }); +// ``` +// +// # Panics +// +// - If any of the [`Sink`] methods produce an error, the sink transitions +// to a failure state and none of its methods must be called afterwards or +// else a panic will occur. +// - If [`Sink::poll_close`] has been called, no other sink method must be +// called afterwards or else a panic will be caused. + +use futures::{ready, sink::Sink}; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +/// Returns a `Sink` impl based on the initial value and the given closure. +/// +/// The closure will be applied to the initial value and an [`Action`] that +/// informs it about the action it should perform. The returned [`Future`] +/// will resolve to another value and the process starts over using this +/// output. +pub(crate) fn make_sink(init: S, f: F) -> SinkImpl +where + F: FnMut(S, Action) -> T, + T: Future>, +{ + SinkImpl { + lambda: f, + future: None, + param: Some(init), + state: State::Empty, + _mark: std::marker::PhantomData, + } +} + +/// The command given to the closure so that it can perform appropriate action. +/// +/// Presumably the closure encapsulates a resource to perform I/O. The commands +/// correspond to methods of the [`Sink`] trait and provide the closure with +/// sufficient information to know what kind of action to perform with it. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum Action { + /// Send the given value. + /// Corresponds to [`Sink::start_send`]. + Send(A), + /// Flush the resource. + /// Corresponds to [`Sink::poll_flush`]. + Flush, + /// Close the resource. + /// Corresponds to [`Sink::poll_close`]. + Close, +} + +/// The various states the `Sink` may be in. +#[derive(Debug, PartialEq, Eq)] +enum State { + /// The `Sink` is idle. + Empty, + /// The `Sink` is sending a value. + Sending, + /// The `Sink` is flushing its resource. + Flushing, + /// The `Sink` is closing its resource. + Closing, + /// The `Sink` is closed (terminal state). + Closed, + /// The `Sink` experienced an error (terminal state). + Failed, +} + +pin_project! { + /// `SinkImpl` implements the `Sink` trait. + #[derive(Debug)] + pub(crate) struct SinkImpl { + lambda: F, + #[pin] future: Option, + param: Option, + state: State, + _mark: std::marker::PhantomData<(A, E)> + } +} + +impl Sink for SinkImpl +where + F: FnMut(S, Action) -> T, + T: Future>, +{ + type Error = E; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + match this.state { + State::Sending | State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty; + Poll::Ready(Ok(())) + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + Poll::Ready(Err(e)) + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + panic!("SinkImpl::poll_ready called on a closing sink.") + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + Poll::Ready(Err(e)) + } + }, + State::Empty => { + assert!(this.param.is_some()); + Poll::Ready(Ok(())) + } + State::Closed => panic!("SinkImpl::poll_ready called on a closed sink."), + State::Failed => panic!("SinkImpl::poll_ready called after error."), + } + } + + fn start_send(self: Pin<&mut Self>, item: A) -> Result<(), Self::Error> { + assert_eq!(State::Empty, self.state); + let mut this = self.project(); + let param = this.param.take().unwrap(); + let future = (this.lambda)(param, Action::Send(item)); + this.future.set(Some(future)); + *this.state = State::Sending; + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + let mut this = self.as_mut().project(); + match this.state { + State::Empty => { + if let Some(p) = this.param.take() { + let future = (this.lambda)(p, Action::Flush); + this.future.set(Some(future)); + *this.state = State::Flushing + } else { + return Poll::Ready(Ok(())); + } + } + State::Sending => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Closed => return Poll::Ready(Ok(())), + State::Failed => panic!("SinkImpl::poll_flush called after error."), + } + } + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + let mut this = self.as_mut().project(); + match this.state { + State::Empty => { + if let Some(p) = this.param.take() { + let future = (this.lambda)(p, Action::Close); + this.future.set(Some(future)); + *this.state = State::Closing; + } else { + return Poll::Ready(Ok(())); + } + } + State::Sending => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Flushing => { + match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) { + Ok(p) => { + this.future.set(None); + *this.param = Some(p); + *this.state = State::Empty + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + } + } + State::Closing => match ready!(this.future.as_mut().as_pin_mut().unwrap().poll(cx)) + { + Ok(_) => { + this.future.set(None); + *this.state = State::Closed; + return Poll::Ready(Ok(())); + } + Err(e) => { + this.future.set(None); + *this.state = State::Failed; + return Poll::Ready(Err(e)); + } + }, + State::Closed => return Poll::Ready(Ok(())), + State::Failed => panic!("SinkImpl::poll_closed called after error."), + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::quicksink::{make_sink, Action}; + use async_std::{io, task}; + use futures::{channel::mpsc, prelude::*, stream}; + + #[test] + fn smoke_test() { + task::block_on(async { + let sink = make_sink(io::stdout(), |mut stdout, action| async move { + match action { + Action::Send(x) => stdout.write_all(x).await?, + Action::Flush => stdout.flush().await?, + Action::Close => stdout.close().await?, + } + Ok::<_, io::Error>(stdout) + }); + + let values = vec![Ok(&b"hello\n"[..]), Ok(&b"world\n"[..])]; + assert!(stream::iter(values).forward(sink).await.is_ok()) + }) + } + + #[test] + fn replay() { + task::block_on(async { + let (tx, rx) = mpsc::channel(5); + + let sink = make_sink(tx, |mut tx, action| async move { + tx.send(action.clone()).await?; + if action == Action::Close { + tx.close().await? + } + Ok::<_, mpsc::SendError>(tx) + }); + + futures::pin_mut!(sink); + + let expected = [ + Action::Send("hello\n"), + Action::Flush, + Action::Send("world\n"), + Action::Flush, + Action::Close, + ]; + + for &item in &["hello\n", "world\n"] { + sink.send(item).await.unwrap() + } + + sink.close().await.unwrap(); + + let actual = rx.collect::>().await; + + assert_eq!(&expected[..], &actual[..]) + }); + } +} diff --git a/transports/websocket/src/tls.rs b/transports/websocket/src/tls.rs index 260031ea850..5bff818f34c 100644 --- a/transports/websocket/src/tls.rs +++ b/transports/websocket/src/tls.rs @@ -92,7 +92,7 @@ impl Config { /// Setup the rustls client configuration. fn client_root_store() -> rustls::RootCertStore { let mut client_root_store = rustls::RootCertStore::empty(); - client_root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + client_root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( ta.subject, ta.spki, diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md new file mode 100644 index 00000000000..b368a943395 --- /dev/null +++ b/transports/webtransport-websys/CHANGELOG.md @@ -0,0 +1,8 @@ +## 0.2.0 + + +## 0.1.0 + +* Initial implementation of WebTranport transport using web-sys bindings. See [PR 4015]. + +[PR 4015]: https://github.com/libp2p/rust-libp2p/pull/4015 diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml new file mode 100644 index 00000000000..25ece175a3f --- /dev/null +++ b/transports/webtransport-websys/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "libp2p-webtransport-websys" +edition = "2021" +rust-version = { workspace = true } +description = "WebTransport for libp2p under WASM environment" +version = "0.2.0" +authors = [ + "Yiannis Marangos ", + "oblique ", +] +license = "MIT" +repository = "https://github.com/libp2p/rust-libp2p" +keywords = ["peer-to-peer", "libp2p", "networking"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +futures = "0.3.30" +js-sys = "0.3.66" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true } +libp2p-noise = { workspace = true } +multiaddr = { workspace = true } +multihash = { workspace = true } +send_wrapper = { version = "0.6.0", features = ["futures"] } +thiserror = "1.0.51" +tracing = "0.1.37" +wasm-bindgen = "0.2.89" +wasm-bindgen-futures = "0.4.39" +web-sys = { version = "0.3.66", features = [ + "ReadableStreamDefaultReader", + "WebTransport", + "WebTransportBidirectionalStream", + "WebTransportHash", + "WebTransportOptions", + "WebTransportReceiveStream", + "WebTransportSendStream", + "WritableStreamDefaultWriter", +] } + +[dev-dependencies] +multibase = "0.9.1" + +# Passing arguments to the docsrs builder in order to properly document cfg's. +# More information: https://docs.rs/about/builds#cross-compiling +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/webtransport-websys/src/bindings.rs b/transports/webtransport-websys/src/bindings.rs new file mode 100644 index 00000000000..a8a1469f8ad --- /dev/null +++ b/transports/webtransport-websys/src/bindings.rs @@ -0,0 +1,141 @@ +//! This file is an extract from `web-sys` crate. It is a temporary +//! solution until `web_sys::WebTransport` and related structs get stabilized. +//! +//! Only the methods that are used by this crate are extracted. + +#![allow(clippy::all)] +use js_sys::{Object, Promise, Reflect}; +use wasm_bindgen::prelude::*; +use web_sys::{ReadableStream, WritableStream}; + +// WebTransport bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = Object, js_name = WebTransport, typescript_type = "WebTransport")] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransport; + + #[wasm_bindgen(structural, method, getter, js_class = "WebTransport", js_name = ready)] + pub fn ready(this: &WebTransport) -> Promise; + + #[wasm_bindgen(structural, method, getter, js_class = "WebTransport", js_name = closed)] + pub fn closed(this: &WebTransport) -> Promise; + + #[wasm_bindgen(structural, method, getter, js_class = "WebTransport" , js_name = incomingBidirectionalStreams)] + pub fn incoming_bidirectional_streams(this: &WebTransport) -> ReadableStream; + + #[wasm_bindgen(catch, constructor, js_class = "WebTransport")] + pub fn new(url: &str) -> Result; + + #[wasm_bindgen(catch, constructor, js_class = "WebTransport")] + pub fn new_with_options( + url: &str, + options: &WebTransportOptions, + ) -> Result; + + #[wasm_bindgen(method, structural, js_class = "WebTransport", js_name = close)] + pub fn close(this: &WebTransport); + + #[wasm_bindgen (method, structural, js_class = "WebTransport", js_name = createBidirectionalStream)] + pub fn create_bidirectional_stream(this: &WebTransport) -> Promise; +} + +// WebTransportBidirectionalStream bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = Object, js_name = WebTransportBidirectionalStream, typescript_type = "WebTransportBidirectionalStream")] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransportBidirectionalStream; + + #[wasm_bindgen(structural, method, getter, js_class = "WebTransportBidirectionalStream", js_name = readable)] + pub fn readable(this: &WebTransportBidirectionalStream) -> WebTransportReceiveStream; + + #[wasm_bindgen(structural, method, getter, js_class = "WebTransportBidirectionalStream", js_name = writable)] + pub fn writable(this: &WebTransportBidirectionalStream) -> WebTransportSendStream; +} + +// WebTransportReceiveStream bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = ReadableStream, extends = Object, js_name = WebTransportReceiveStream, typescript_type = "WebTransportReceiveStream")] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransportReceiveStream; +} + +// WebTransportSendStream bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = WritableStream, extends = Object, js_name = WebTransportSendStream, typescript_type = "WebTransportSendStream")] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransportSendStream; +} + +// WebTransportOptions bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = Object, js_name = WebTransportOptions)] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransportOptions; +} + +impl WebTransportOptions { + pub fn new() -> Self { + #[allow(unused_mut)] + let mut ret: Self = JsCast::unchecked_into(Object::new()); + ret + } + + pub fn server_certificate_hashes(&mut self, val: &JsValue) -> &mut Self { + let r = ::js_sys::Reflect::set( + self.as_ref(), + &JsValue::from("serverCertificateHashes"), + &JsValue::from(val), + ); + debug_assert!( + r.is_ok(), + "setting properties should never fail on our dictionary objects" + ); + let _ = r; + self + } +} + +// WebTransportHash bindings +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = Object, js_name = WebTransportHash)] + #[derive(Debug, Clone, PartialEq, Eq)] + pub type WebTransportHash; +} + +impl WebTransportHash { + pub fn new() -> Self { + #[allow(unused_mut)] + let mut ret: Self = JsCast::unchecked_into(Object::new()); + ret + } + + pub fn algorithm(&mut self, val: &str) -> &mut Self { + let r = Reflect::set( + self.as_ref(), + &JsValue::from("algorithm"), + &JsValue::from(val), + ); + debug_assert!( + r.is_ok(), + "setting properties should never fail on our dictionary objects" + ); + let _ = r; + self + } + + pub fn value(&mut self, val: &::js_sys::Object) -> &mut Self { + let r = Reflect::set(self.as_ref(), &JsValue::from("value"), &JsValue::from(val)); + debug_assert!( + r.is_ok(), + "setting properties should never fail on our dictionary objects" + ); + let _ = r; + self + } +} diff --git a/transports/webtransport-websys/src/connection.rs b/transports/webtransport-websys/src/connection.rs new file mode 100644 index 00000000000..982f9e5a32c --- /dev/null +++ b/transports/webtransport-websys/src/connection.rs @@ -0,0 +1,209 @@ +use futures::FutureExt; +use libp2p_core::muxing::{StreamMuxer, StreamMuxerEvent}; +use libp2p_core::upgrade::OutboundConnectionUpgrade; +use libp2p_core::UpgradeInfo; +use libp2p_identity::{Keypair, PeerId}; +use multihash::Multihash; +use send_wrapper::SendWrapper; +use std::collections::HashSet; +use std::future::poll_fn; +use std::pin::Pin; +use std::task::{ready, Context, Poll}; +use wasm_bindgen_futures::JsFuture; +use web_sys::ReadableStreamDefaultReader; + +use crate::bindings::{WebTransport, WebTransportBidirectionalStream}; +use crate::endpoint::Endpoint; +use crate::fused_js_promise::FusedJsPromise; +use crate::utils::{detach_promise, parse_reader_response, to_js_type}; +use crate::{Error, Stream}; + +/// An opened WebTransport connection. +#[derive(Debug)] +pub struct Connection { + // Swarm needs all types to be Send. WASM is single-threaded + // and it is safe to use SendWrapper. + inner: SendWrapper, +} + +#[derive(Debug)] +struct ConnectionInner { + session: WebTransport, + create_stream_promise: FusedJsPromise, + incoming_stream_promise: FusedJsPromise, + incoming_streams_reader: ReadableStreamDefaultReader, + closed: bool, +} + +impl Connection { + pub(crate) fn new(endpoint: &Endpoint) -> Result { + let url = endpoint.url(); + + let session = if endpoint.certhashes.is_empty() { + // Endpoint has CA-signed TLS certificate. + WebTransport::new(&url).map_err(Error::from_js_value)? + } else { + // Endpoint has self-signed TLS certificates. + let opts = endpoint.webtransport_opts(); + WebTransport::new_with_options(&url, &opts).map_err(Error::from_js_value)? + }; + + let incoming_streams = session.incoming_bidirectional_streams(); + let incoming_streams_reader = + to_js_type::(incoming_streams.get_reader())?; + + Ok(Connection { + inner: SendWrapper::new(ConnectionInner { + session, + create_stream_promise: FusedJsPromise::new(), + incoming_stream_promise: FusedJsPromise::new(), + incoming_streams_reader, + closed: false, + }), + }) + } + + pub(crate) async fn authenticate( + &mut self, + keypair: &Keypair, + remote_peer: Option, + certhashes: HashSet>, + ) -> Result { + let fut = SendWrapper::new(self.inner.authenticate(keypair, remote_peer, certhashes)); + fut.await + } +} + +impl ConnectionInner { + /// Authenticates with the server + /// + /// This methods runs the security handshake as descripted + /// in the [spec][1]. It validates the certhashes and peer ID + /// of the server. + /// + /// [1]: https://github.com/libp2p/specs/tree/master/webtransport#security-handshake + async fn authenticate( + &mut self, + keypair: &Keypair, + remote_peer: Option, + certhashes: HashSet>, + ) -> Result { + JsFuture::from(self.session.ready()) + .await + .map_err(Error::from_js_value)?; + + let stream = poll_fn(|cx| self.poll_create_bidirectional_stream(cx)).await?; + let mut noise = libp2p_noise::Config::new(keypair)?; + + if !certhashes.is_empty() { + noise = noise.with_webtransport_certhashes(certhashes); + } + + // We do not use `upgrade::apply_outbound` function because it uses + // `multistream_select` protocol, which is not used by WebTransport spec. + let info = noise.protocol_info().next().unwrap_or_default(); + let (peer_id, _io) = noise.upgrade_outbound(stream, info).await?; + + // TODO: This should be part libp2p-noise + if let Some(expected_peer_id) = remote_peer { + if peer_id != expected_peer_id { + return Err(Error::UnknownRemotePeerId); + } + } + + Ok(peer_id) + } + + /// Initiates and polls a promise from `create_bidirectional_stream`. + fn poll_create_bidirectional_stream( + &mut self, + cx: &mut Context, + ) -> Poll> { + // Create bidirectional stream + let val = ready!(self + .create_stream_promise + .maybe_init(|| self.session.create_bidirectional_stream()) + .poll_unpin(cx)) + .map_err(Error::from_js_value)?; + + let bidi_stream = to_js_type::(val)?; + let stream = Stream::new(bidi_stream)?; + + Poll::Ready(Ok(stream)) + } + + /// Polls for incoming stream from `incoming_bidirectional_streams` reader. + fn poll_incoming_bidirectional_streams( + &mut self, + cx: &mut Context, + ) -> Poll> { + // Read the next incoming stream from the JS channel + let val = ready!(self + .incoming_stream_promise + .maybe_init(|| self.incoming_streams_reader.read()) + .poll_unpin(cx)) + .map_err(Error::from_js_value)?; + + let val = parse_reader_response(&val) + .map_err(Error::from_js_value)? + .ok_or_else(|| Error::JsError("incoming_bidirectional_streams closed".to_string()))?; + + let bidi_stream = to_js_type::(val)?; + let stream = Stream::new(bidi_stream)?; + + Poll::Ready(Ok(stream)) + } + + /// Closes the session. + /// + /// This closes the streams also and they will return an error + /// when they will be used. + fn close_session(&mut self) { + if !self.closed { + detach_promise(self.incoming_streams_reader.cancel()); + self.session.close(); + self.closed = true; + } + } +} + +impl Drop for ConnectionInner { + fn drop(&mut self) { + self.close_session(); + } +} + +/// WebTransport native multiplexing +impl StreamMuxer for Connection { + type Substream = Stream; + type Error = Error; + + fn poll_inbound( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.inner.poll_incoming_bidirectional_streams(cx) + } + + fn poll_outbound( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.inner.poll_create_bidirectional_stream(cx) + } + + fn poll_close( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + self.inner.close_session(); + Poll::Ready(Ok(())) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } +} diff --git a/transports/webtransport-websys/src/endpoint.rs b/transports/webtransport-websys/src/endpoint.rs new file mode 100644 index 00000000000..0bff1ed6186 --- /dev/null +++ b/transports/webtransport-websys/src/endpoint.rs @@ -0,0 +1,227 @@ +use js_sys::{Array, Uint8Array}; +use libp2p_identity::PeerId; +use multiaddr::{Multiaddr, Protocol}; +use multihash::Multihash; +use std::collections::HashSet; + +use crate::bindings::{WebTransportHash, WebTransportOptions}; +use crate::Error; + +pub(crate) struct Endpoint { + pub(crate) host: String, + pub(crate) port: u16, + pub(crate) is_ipv6: bool, + pub(crate) certhashes: HashSet>, + pub(crate) remote_peer: Option, +} + +impl Endpoint { + pub(crate) fn from_multiaddr(addr: &Multiaddr) -> Result { + let mut host = None; + let mut port = None; + let mut found_quic = false; + let mut found_webtransport = false; + let mut certhashes = HashSet::new(); + let mut remote_peer = None; + let mut is_ipv6 = false; + + for proto in addr.iter() { + match proto { + Protocol::Ip4(addr) => { + if host.is_some() { + return Err(Error::InvalidMultiaddr("More than one host definitions")); + } + + host = Some(addr.to_string()); + } + Protocol::Ip6(addr) => { + if host.is_some() { + return Err(Error::InvalidMultiaddr("More than one host definitions")); + } + + is_ipv6 = true; + host = Some(addr.to_string()); + } + Protocol::Dns(domain) | Protocol::Dns4(domain) | Protocol::Dns6(domain) => { + if port.is_some() { + return Err(Error::InvalidMultiaddr("More than one host definitions")); + } + + host = Some(domain.to_string()) + } + Protocol::Dnsaddr(_) => { + return Err(Error::InvalidMultiaddr( + "/dnsaddr not supported from within a browser", + )); + } + Protocol::Udp(p) => { + if port.is_some() { + return Err(Error::InvalidMultiaddr("More than one port definitions")); + } + + port = Some(p); + } + Protocol::Quic | Protocol::QuicV1 => { + if host.is_none() || port.is_none() { + return Err(Error::InvalidMultiaddr( + "No host and port definition before /quic/webtransport", + )); + } + + found_quic = true; + } + Protocol::WebTransport => { + if !found_quic { + return Err(Error::InvalidMultiaddr( + "/quic is not found before /webtransport", + )); + } + + found_webtransport = true; + } + Protocol::Certhash(hash) => { + if !found_webtransport { + return Err(Error::InvalidMultiaddr( + "/certhashes must be after /quic/found_webtransport", + )); + } + + certhashes.insert(hash); + } + Protocol::P2p(peer) => { + if remote_peer.is_some() { + return Err(Error::InvalidMultiaddr("More than one peer definitions")); + } + + remote_peer = Some(peer); + } + _ => {} + } + } + + if !found_quic || !found_webtransport { + return Err(Error::InvalidMultiaddr( + "Not a /quic/webtransport multiaddr", + )); + } + + let host = host.ok_or_else(|| Error::InvalidMultiaddr("Host is not defined"))?; + let port = port.ok_or_else(|| Error::InvalidMultiaddr("Port is not defined"))?; + + Ok(Endpoint { + host, + port, + is_ipv6, + certhashes, + remote_peer, + }) + } + + pub(crate) fn url(&self) -> String { + let host = &self.host; + let port = self.port; + + if self.is_ipv6 { + format!("https://[{host}]:{port}/.well-known/libp2p-webtransport?type=noise") + } else { + format!("https://{host}:{port}/.well-known/libp2p-webtransport?type=noise") + } + } + + pub(crate) fn webtransport_opts(&self) -> WebTransportOptions { + let mut opts = WebTransportOptions::new(); + let hashes = Array::new(); + + for hash in &self.certhashes { + let digest = Uint8Array::from(hash.digest()); + + let mut jshash = WebTransportHash::new(); + jshash.algorithm("sha-256").value(&digest); + + hashes.push(&jshash); + } + + opts.server_certificate_hashes(&hashes); + + opts + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + fn multihash_from_str(s: &str) -> Multihash<64> { + let (_base, bytes) = multibase::decode(s).unwrap(); + Multihash::from_bytes(&bytes).unwrap() + } + + #[test] + fn valid_webtransport_multiaddr() { + let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); + + assert_eq!(endpoint.host, "127.0.0.1"); + assert_eq!(endpoint.port, 44874); + assert_eq!(endpoint.certhashes.len(), 2); + + assert!(endpoint.certhashes.contains(&multihash_from_str( + "uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng" + ))); + + assert!(endpoint.certhashes.contains(&multihash_from_str( + "uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ" + ))); + + assert_eq!( + endpoint.remote_peer.unwrap(), + PeerId::from_str("12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap() + ); + + assert_eq!( + endpoint.url(), + "https://127.0.0.1:44874/.well-known/libp2p-webtransport?type=noise" + ); + } + + #[test] + fn valid_webtransport_multiaddr_without_certhashes() { + let addr = Multiaddr::from_str("/ip4/127.0.0.1/udp/44874/quic-v1/webtransport/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); + + assert_eq!(endpoint.host, "127.0.0.1"); + assert_eq!(endpoint.port, 44874); + assert_eq!(endpoint.certhashes.len(), 0); + assert_eq!( + endpoint.remote_peer.unwrap(), + PeerId::from_str("12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap() + ); + } + + #[test] + fn ipv6_webtransport() { + let addr = Multiaddr::from_str("/ip6/::1/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); + + assert_eq!(endpoint.host, "::1"); + assert_eq!(endpoint.port, 44874); + assert_eq!( + endpoint.url(), + "https://[::1]:44874/.well-known/libp2p-webtransport?type=noise" + ); + } + + #[test] + fn dns_webtransport() { + let addr = Multiaddr::from_str("/dns/libp2p.io/udp/44874/quic-v1/webtransport/certhash/uEiCaDd1Ca1A8IVJ3hsIxIyi11cwxaDKqzVrBkGJbKZU5ng/certhash/uEiDv-VGW8oXxui_G_Kqp-87YjvET-Hr2qYAMYPePJDcsjQ/p2p/12D3KooWR7EfNv5SLtgjMRjUwR8AvNu3hP4fLrtSa9fmHHXKYWNG").unwrap(); + let endpoint = Endpoint::from_multiaddr(&addr).unwrap(); + + assert_eq!(endpoint.host, "libp2p.io"); + assert_eq!(endpoint.port, 44874); + assert_eq!( + endpoint.url(), + "https://libp2p.io:44874/.well-known/libp2p-webtransport?type=noise" + ); + } +} diff --git a/transports/webtransport-websys/src/error.rs b/transports/webtransport-websys/src/error.rs new file mode 100644 index 00000000000..ad85cab7537 --- /dev/null +++ b/transports/webtransport-websys/src/error.rs @@ -0,0 +1,36 @@ +use wasm_bindgen::{JsCast, JsValue}; + +/// Errors that may happen on the [`Transport`](crate::Transport) or the +/// [`Connection`](crate::Connection). +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid multiaddr: {0}")] + InvalidMultiaddr(&'static str), + + #[error("Noise authentication failed")] + Noise(#[from] libp2p_noise::Error), + + #[error("JavaScript error: {0}")] + JsError(String), + + #[error("JavaScript typecasting failed")] + JsCastFailed, + + #[error("Unknown remote peer ID")] + UnknownRemotePeerId, +} + +impl Error { + pub(crate) fn from_js_value(value: JsValue) -> Self { + let s = if value.is_instance_of::() { + js_sys::Error::from(value) + .to_string() + .as_string() + .unwrap_or_else(|| "Unknown error".to_string()) + } else { + "Unknown error".to_string() + }; + + Error::JsError(s) + } +} diff --git a/transports/webtransport-websys/src/fused_js_promise.rs b/transports/webtransport-websys/src/fused_js_promise.rs new file mode 100644 index 00000000000..0ba846501c2 --- /dev/null +++ b/transports/webtransport-websys/src/fused_js_promise.rs @@ -0,0 +1,58 @@ +use futures::FutureExt; +use js_sys::Promise; +use std::future::Future; +use std::pin::Pin; +use std::task::{ready, Context, Poll}; +use wasm_bindgen::JsValue; +use wasm_bindgen_futures::JsFuture; + +/// Convenient wrapper to poll a promise to completion. +/// +/// # Panics +/// +/// Panics if polled and promise is not initialized. Use `maybe_init` if unsure. +#[derive(Debug)] +pub(crate) struct FusedJsPromise { + promise: Option, +} + +impl FusedJsPromise { + /// Creates new uninitialized promise. + pub(crate) fn new() -> Self { + FusedJsPromise { promise: None } + } + + /// Initialize promise if needed + pub(crate) fn maybe_init(&mut self, init: F) -> &mut Self + where + F: FnOnce() -> Promise, + { + if self.promise.is_none() { + self.promise = Some(JsFuture::from(init())); + } + + self + } + + /// Checks if promise is already running + pub(crate) fn is_active(&self) -> bool { + self.promise.is_some() + } +} + +impl Future for FusedJsPromise { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let val = ready!(self + .promise + .as_mut() + .expect("FusedJsPromise not initialized") + .poll_unpin(cx)); + + // Future finished, drop it + self.promise.take(); + + Poll::Ready(val) + } +} diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs new file mode 100644 index 00000000000..f9c59694fa3 --- /dev/null +++ b/transports/webtransport-websys/src/lib.rs @@ -0,0 +1,15 @@ +//! Libp2p WebTransport built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html) + +mod bindings; +mod connection; +mod endpoint; +mod error; +mod fused_js_promise; +mod stream; +mod transport; +mod utils; + +pub use self::connection::Connection; +pub use self::error::Error; +pub use self::stream::Stream; +pub use self::transport::{Config, Transport}; diff --git a/transports/webtransport-websys/src/stream.rs b/transports/webtransport-websys/src/stream.rs new file mode 100644 index 00000000000..ba4238ac814 --- /dev/null +++ b/transports/webtransport-websys/src/stream.rs @@ -0,0 +1,228 @@ +use futures::{AsyncRead, AsyncWrite, FutureExt}; +use js_sys::Uint8Array; +use send_wrapper::SendWrapper; +use std::io; +use std::pin::Pin; +use std::task::ready; +use std::task::{Context, Poll}; +use web_sys::{ReadableStreamDefaultReader, WritableStreamDefaultWriter}; + +use crate::bindings::WebTransportBidirectionalStream; +use crate::fused_js_promise::FusedJsPromise; +use crate::utils::{detach_promise, parse_reader_response, to_io_error, to_js_type}; +use crate::Error; + +/// A stream on a connection. +#[derive(Debug)] +pub struct Stream { + // Swarm needs all types to be Send. WASM is single-threaded + // and it is safe to use SendWrapper. + inner: SendWrapper, +} + +#[derive(Debug)] +struct StreamInner { + reader: ReadableStreamDefaultReader, + reader_read_promise: FusedJsPromise, + read_leftovers: Option, + writer: WritableStreamDefaultWriter, + writer_state: StreamState, + writer_ready_promise: FusedJsPromise, + writer_closed_promise: FusedJsPromise, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum StreamState { + Open, + Closing, + Closed, +} + +impl Stream { + pub(crate) fn new(bidi_stream: WebTransportBidirectionalStream) -> Result { + let recv_stream = bidi_stream.readable(); + let send_stream = bidi_stream.writable(); + + let reader = to_js_type::(recv_stream.get_reader())?; + let writer = send_stream.get_writer().map_err(Error::from_js_value)?; + + Ok(Stream { + inner: SendWrapper::new(StreamInner { + reader, + reader_read_promise: FusedJsPromise::new(), + read_leftovers: None, + writer, + writer_state: StreamState::Open, + writer_ready_promise: FusedJsPromise::new(), + writer_closed_promise: FusedJsPromise::new(), + }), + }) + } +} + +impl StreamInner { + fn poll_reader_read(&mut self, cx: &mut Context) -> Poll>> { + let val = ready!(self + .reader_read_promise + .maybe_init(|| self.reader.read()) + .poll_unpin(cx)) + .map_err(to_io_error)?; + + let val = parse_reader_response(&val) + .map_err(to_io_error)? + .map(Uint8Array::from); + + Poll::Ready(Ok(val)) + } + + fn poll_read(&mut self, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll> { + // If we have leftovers from a previous read, then use them. + // Otherwise read new data. + let data = match self.read_leftovers.take() { + Some(data) => data, + None => { + match ready!(self.poll_reader_read(cx))? { + Some(data) => data, + // EOF + None => return Poll::Ready(Ok(0)), + } + } + }; + + if data.byte_length() == 0 { + return Poll::Ready(Ok(0)); + } + + let out_len = data.byte_length().min(buf.len() as u32); + data.slice(0, out_len).copy_to(&mut buf[..out_len as usize]); + + let leftovers = data.slice(out_len, data.byte_length()); + + if leftovers.byte_length() > 0 { + self.read_leftovers = Some(leftovers); + } + + Poll::Ready(Ok(out_len as usize)) + } + + fn poll_writer_ready(&mut self, cx: &mut Context) -> Poll> { + if self.writer_state != StreamState::Open { + return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())); + } + + let desired_size = self + .writer + .desired_size() + .map_err(to_io_error)? + .map(|n| n.trunc() as i64) + .unwrap_or(0); + + // We need to poll if the queue is full or if the promise was already activated. + // + // NOTE: `desired_size` can be negative if we overcommit messages to the queue. + if desired_size <= 0 || self.writer_ready_promise.is_active() { + ready!(self + .writer_ready_promise + .maybe_init(|| self.writer.ready()) + .poll_unpin(cx)) + .map_err(to_io_error)?; + } + + Poll::Ready(Ok(())) + } + + fn poll_write(&mut self, cx: &mut Context, buf: &[u8]) -> Poll> { + ready!(self.poll_writer_ready(cx))?; + + let len = buf.len() as u32; + let data = Uint8Array::new_with_length(len); + data.copy_from(buf); + + detach_promise(self.writer.write_with_chunk(&data)); + + Poll::Ready(Ok(len as usize)) + } + + fn poll_flush(&mut self, cx: &mut Context) -> Poll> { + if self.writer_state == StreamState::Open { + // Writer has queue size of 1, so as soon it is ready, self means the + // messages were flushed. + self.poll_writer_ready(cx) + } else { + debug_assert!( + false, + "libp2p_webtransport_websys::Stream: poll_flush called after poll_close" + ); + Poll::Ready(Ok(())) + } + } + + fn poll_writer_close(&mut self, cx: &mut Context) -> Poll> { + match self.writer_state { + StreamState::Open => { + self.writer_state = StreamState::Closing; + + // Initiate close + detach_promise(self.writer.close()); + + // Assume closed on error + let _ = ready!(self + .writer_closed_promise + .maybe_init(|| self.writer.closed()) + .poll_unpin(cx)); + + self.writer_state = StreamState::Closed; + } + StreamState::Closing => { + // Assume closed on error + let _ = ready!(self.writer_closed_promise.poll_unpin(cx)); + self.writer_state = StreamState::Closed; + } + StreamState::Closed => {} + } + + Poll::Ready(Ok(())) + } +} + +impl Drop for StreamInner { + fn drop(&mut self) { + // Close writer. + // + // We choose to use `close()` instead of `abort()`, because + // abort was causing some side effects on the WebTransport + // layer and connection was lost. + detach_promise(self.writer.close()); + + // Cancel any ongoing reads. + detach_promise(self.reader.cancel()); + } +} + +impl AsyncRead for Stream { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + self.inner.poll_read(cx, buf) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context, + buf: &[u8], + ) -> Poll> { + self.inner.poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.inner.poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.inner.poll_writer_close(cx) + } +} diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs new file mode 100644 index 00000000000..cb556ffef99 --- /dev/null +++ b/transports/webtransport-websys/src/transport.rs @@ -0,0 +1,103 @@ +use futures::future::FutureExt; +use libp2p_core::muxing::StreamMuxerBox; +use libp2p_core::transport::{Boxed, ListenerId, Transport as _, TransportError, TransportEvent}; +use libp2p_identity::{Keypair, PeerId}; +use multiaddr::Multiaddr; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use crate::endpoint::Endpoint; +use crate::Connection; +use crate::Error; + +/// Config for the [`Transport`]. +pub struct Config { + keypair: Keypair, +} + +/// A WebTransport [`Transport`](libp2p_core::Transport) that works with `web-sys`. +pub struct Transport { + config: Config, +} + +impl Config { + /// Constructs a new configuration for the [`Transport`]. + pub fn new(keypair: &Keypair) -> Self { + Config { + keypair: keypair.to_owned(), + } + } +} + +impl Transport { + /// Constructs a new `Transport` with the given [`Config`]. + pub fn new(config: Config) -> Transport { + Transport { config } + } + + /// Wraps `Transport` in [`Boxed`] and makes it ready to be consumed by + /// SwarmBuilder. + pub fn boxed(self) -> Boxed<(PeerId, StreamMuxerBox)> { + self.map(|(peer_id, muxer), _| (peer_id, StreamMuxerBox::new(muxer))) + .boxed() + } +} + +impl libp2p_core::Transport for Transport { + type Output = (PeerId, Connection); + type Error = Error; + type ListenerUpgrade = Pin> + Send>>; + type Dial = Pin> + Send>>; + + fn listen_on( + &mut self, + _id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn remove_listener(&mut self, _id: ListenerId) -> bool { + false + } + + fn dial(&mut self, addr: Multiaddr) -> Result> { + let endpoint = Endpoint::from_multiaddr(&addr).map_err(|e| match e { + e @ Error::InvalidMultiaddr(_) => { + tracing::warn!("{}", e); + TransportError::MultiaddrNotSupported(addr) + } + e => TransportError::Other(e), + })?; + + let mut session = Connection::new(&endpoint).map_err(TransportError::Other)?; + let keypair = self.config.keypair.clone(); + + Ok(async move { + let peer_id = session + .authenticate(&keypair, endpoint.remote_peer, endpoint.certhashes) + .await?; + Ok((peer_id, session)) + } + .boxed()) + } + + fn dial_as_listener( + &mut self, + addr: Multiaddr, + ) -> Result> { + Err(TransportError::MultiaddrNotSupported(addr)) + } + + fn poll( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Pending + } + + fn address_translation(&self, _listen: &Multiaddr, _observed: &Multiaddr) -> Option { + None + } +} diff --git a/transports/webtransport-websys/src/utils.rs b/transports/webtransport-websys/src/utils.rs new file mode 100644 index 00000000000..fcde226be87 --- /dev/null +++ b/transports/webtransport-websys/src/utils.rs @@ -0,0 +1,76 @@ +use js_sys::{Promise, Reflect}; +use send_wrapper::SendWrapper; +use std::io; +use wasm_bindgen::{JsCast, JsValue}; + +use crate::Error; + +/// Properly detach a promise. +/// +/// A promise always runs in the background, however if you don't await it, +/// or specify a `catch` handler before you drop it, it might cause some side +/// effects. This function avoids any side effects. +// +// Ref: https://github.com/typescript-eslint/typescript-eslint/blob/391a6702c0a9b5b3874a7a27047f2a721f090fb6/packages/eslint-plugin/docs/rules/no-floating-promises.md +pub(crate) fn detach_promise(promise: Promise) { + type Closure = wasm_bindgen::closure::Closure; + static mut DO_NOTHING: Option> = None; + + // Allocate Closure only once and reuse it + let do_nothing = unsafe { + if DO_NOTHING.is_none() { + let cb = Closure::new(|_| {}); + DO_NOTHING = Some(SendWrapper::new(cb)); + } + + DO_NOTHING.as_deref().unwrap() + }; + + // Avoid having "floating" promise and ignore any errors. + // After `catch` promise is allowed to be dropped. + let _ = promise.catch(do_nothing); +} + +/// Typecasts a JavaScript type. +/// +/// Returns a `Ok(value)` casted to the requested type. +/// +/// If the underlying value is an error and the requested +/// type is not, then `Err(Error::JsError)` is returned. +/// +/// If the underlying value can not be casted to the requested type and +/// is not an error, then `Err(Error::JsCastFailed)` is returned. +pub(crate) fn to_js_type(value: impl Into) -> Result +where + T: JsCast + From, +{ + let value = value.into(); + + if value.has_type::() { + Ok(value.unchecked_into()) + } else if value.has_type::() { + Err(Error::from_js_value(value)) + } else { + Err(Error::JsCastFailed) + } +} + +/// Parse reponse from `ReadableStreamDefaultReader::read`. +// +// Ref: https://streams.spec.whatwg.org/#default-reader-prototype +pub(crate) fn parse_reader_response(resp: &JsValue) -> Result, JsValue> { + let value = Reflect::get(resp, &JsValue::from_str("value"))?; + let done = Reflect::get(resp, &JsValue::from_str("done"))? + .as_bool() + .unwrap_or_default(); + + if value.is_undefined() || done { + Ok(None) + } else { + Ok(Some(value)) + } +} + +pub(crate) fn to_io_error(value: JsValue) -> io::Error { + io::Error::new(io::ErrorKind::Other, Error::from_js_value(value)) +} diff --git a/wasm-tests/README.md b/wasm-tests/README.md new file mode 100644 index 00000000000..1d0902b106c --- /dev/null +++ b/wasm-tests/README.md @@ -0,0 +1,11 @@ +# Dependencies + +Before you run the tests you need to install the following: + +* Chrome or Chromium +* chromedriver (major version must be the same as Chrome's) +* wasm-pack + +# Run tests + +Just call `run-all.sh` or `run.sh` in the test directory you are interested. diff --git a/wasm-tests/run-all.sh b/wasm-tests/run-all.sh new file mode 100755 index 00000000000..77b896a167d --- /dev/null +++ b/wasm-tests/run-all.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +# cd to this script directory +cd "$(dirname "${BASH_SOURCE[0]}")" || exit 1 + +./webtransport-tests/run.sh diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml new file mode 100644 index 00000000000..5422fe20fe1 --- /dev/null +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "webtransport-tests" +version = "0.1.0" +edition = "2021" +license = "MIT" +publish = false + +[package.metadata.release] +release = false + +[dependencies] +futures = "0.3.30" +getrandom = { version = "0.2.11", features = ["js"] } +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-noise = { workspace = true } +libp2p-webtransport-websys = { workspace = true } +multiaddr = { workspace = true } +multihash = { workspace = true } +wasm-bindgen = "0.2.89" +wasm-bindgen-futures = "0.4.39" +wasm-bindgen-test = "0.3.39" +web-sys = { version = "0.3.66", features = ["Response", "Window"] } + +[lints] +workspace = true diff --git a/wasm-tests/webtransport-tests/README.md b/wasm-tests/webtransport-tests/README.md new file mode 100644 index 00000000000..b57a159176b --- /dev/null +++ b/wasm-tests/webtransport-tests/README.md @@ -0,0 +1,27 @@ +# Manually run tests + +First you need to build and start the echo-server: + +``` +docker build -t webtransport-echo-server echo-server +docker run -it --rm --network=host webtransport-echo-server +``` + +On another terminal run: + +``` +wasm-pack test --chrome +``` + +Navigate with your browser at http://127.0.0.1:8000. + +You can also run the tests on a headless browser: + +``` +wasm-pack test --chrome --headless +``` + +> **Note:** For headless tests your Chrome browser needs to be compatible +> with chromedriver (i.e. they must have the same major version). +> +> You may need to define the path of chromedriver with `--chromedriver=/path/to/chromedriver`. diff --git a/wasm-tests/webtransport-tests/echo-server/.gitignore b/wasm-tests/webtransport-tests/echo-server/.gitignore new file mode 100644 index 00000000000..e831a850ae0 --- /dev/null +++ b/wasm-tests/webtransport-tests/echo-server/.gitignore @@ -0,0 +1 @@ +/echo-server diff --git a/wasm-tests/webtransport-tests/echo-server/Dockerfile b/wasm-tests/webtransport-tests/echo-server/Dockerfile new file mode 100644 index 00000000000..f498e2baa1b --- /dev/null +++ b/wasm-tests/webtransport-tests/echo-server/Dockerfile @@ -0,0 +1,9 @@ +# syntax=docker/dockerfile:1.5-labs +FROM docker.io/library/golang:1.20 AS builder +WORKDIR /workspace +ADD . . +RUN CGO_ENABLED=0 go build . + +FROM scratch +COPY --from=builder /workspace/echo-server / +ENTRYPOINT ["/echo-server"] diff --git a/wasm-tests/webtransport-tests/echo-server/go.mod b/wasm-tests/webtransport-tests/echo-server/go.mod new file mode 100644 index 00000000000..9dde12fdcfe --- /dev/null +++ b/wasm-tests/webtransport-tests/echo-server/go.mod @@ -0,0 +1,64 @@ +module echo-server + +go 1.20 + +require ( + github.com/libp2p/go-libp2p v0.27.8 + github.com/multiformats/go-multiaddr v0.9.0 +) + +require ( + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect + github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.8.1 // indirect + github.com/multiformats/go-multihash v0.2.1 // indirect + github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.9.2 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-19 v0.3.3 // indirect + github.com/quic-go/qtls-go1-20 v0.2.3 // indirect + github.com/quic-go/quic-go v0.33.0 // indirect + github.com/quic-go/webtransport-go v0.5.2 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.10.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.7.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + lukechampine.com/blake3 v1.1.7 // indirect +) diff --git a/wasm-tests/webtransport-tests/echo-server/go.sum b/wasm-tests/webtransport-tests/echo-server/go.sum new file mode 100644 index 00000000000..95c1618a0cb --- /dev/null +++ b/wasm-tests/webtransport-tests/echo-server/go.sum @@ -0,0 +1,344 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= +github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI= +github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= +github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= +github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= +github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= +github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= +github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= +github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/wasm-tests/webtransport-tests/echo-server/main.go b/wasm-tests/webtransport-tests/echo-server/main.go new file mode 100644 index 00000000000..def4151bd1b --- /dev/null +++ b/wasm-tests/webtransport-tests/echo-server/main.go @@ -0,0 +1,99 @@ +package main + +import ( + "context" + "crypto/rand" + "fmt" + "io" + "net/http" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/transport" + "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" + "github.com/multiformats/go-multiaddr" +) + +// This provides a way for test cases to discover the WebTransport address +func addrReporter(ma multiaddr.Multiaddr) { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + h := w.Header() + h.Add("Access-Control-Allow-Origin", "*") + h.Add("Cross-Origin-Resource-Policy", "cross-origin") + h.Add("Content-Type", "text/plain; charset=utf-8") + + fmt.Fprint(w, ma.String()) + }) + + http.ListenAndServe(":4455", nil) +} + +func serveConn(conn transport.CapableConn) { + go func() { + for { + stream, err := conn.OpenStream(context.Background()) + if err != nil { + break; + } + + // Stream is a local operation until data is send + // on the stream. We send a single byte to fully + // initiate the stream. + // + // Ref: https://github.com/libp2p/go-libp2p/issues/2343 + stream.Write([]byte("1")) + + go io.Copy(stream, stream) + } + }() + + for { + stream, err := conn.AcceptStream() + if err != nil { + break + } + + go io.Copy(stream, stream) + } +} + +func main() { + priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + panic(err) + } + + peerId, err := peer.IDFromPublicKey(pub) + if err != nil { + panic(err) + } + + connManager, err := quicreuse.NewConnManager([32]byte{}) + if err != nil { + panic(err) + } + + transport, err := webtransport.New(priv, nil, connManager, nil, nil); + if err != nil { + panic(err) + } + + listener, err := transport.Listen(multiaddr.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")) + if err != nil { + panic(err) + } + + addr := listener.Multiaddr().Encapsulate(multiaddr.StringCast("/p2p/" + peerId.String())) + + go addrReporter(addr) + + for { + conn, err := listener.Accept() + if err != nil { + panic(nil) + } + + go serveConn(conn) + } +} diff --git a/wasm-tests/webtransport-tests/run.sh b/wasm-tests/webtransport-tests/run.sh new file mode 100755 index 00000000000..1819fc97770 --- /dev/null +++ b/wasm-tests/webtransport-tests/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Prefer podman over docker since it doesn't require root privileges +if command -v podman > /dev/null; then + docker=podman +else + docker=docker +fi + +# cd to this script directory +cd "$(dirname "${BASH_SOURCE[0]}")" || exit 1 + +# Print the directory for debugging +echo "Tests: $PWD" + +# Build and run echo-server +$docker build -t webtransport-echo-server echo-server || exit 1 +id="$($docker run -d --network=host webtransport-echo-server)" || exit 1 + +# Run tests +wasm-pack test --chrome --headless +exit_code=$? + +# Remove echo-server container +$docker rm -f "$id" + +# Propagate wasm-pack's exit code +exit $exit_code diff --git a/wasm-tests/webtransport-tests/src/lib.rs b/wasm-tests/webtransport-tests/src/lib.rs new file mode 100644 index 00000000000..1f420cd6671 --- /dev/null +++ b/wasm-tests/webtransport-tests/src/lib.rs @@ -0,0 +1,384 @@ +use futures::channel::oneshot; +use futures::{AsyncReadExt, AsyncWriteExt}; +use getrandom::getrandom; +use libp2p_core::{StreamMuxer, Transport as _}; +use libp2p_identity::{Keypair, PeerId}; +use libp2p_noise as noise; +use libp2p_webtransport_websys::{Config, Connection, Error, Stream, Transport}; +use multiaddr::{Multiaddr, Protocol}; +use multihash::Multihash; +use std::future::poll_fn; +use std::pin::Pin; +use wasm_bindgen::JsCast; +use wasm_bindgen_futures::{spawn_local, JsFuture}; +use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; +use web_sys::{window, Response}; + +wasm_bindgen_test_configure!(run_in_browser); + +#[wasm_bindgen_test] +async fn single_conn_single_stream() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + send_recv(&mut stream).await; +} + +#[wasm_bindgen_test] +async fn single_conn_single_stream_incoming() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = incoming_stream(&mut conn).await; + + send_recv(&mut stream).await; +} + +#[wasm_bindgen_test] +async fn single_conn_multiple_streams() { + let mut conn = new_connection_to_echo_server().await; + let mut tasks = Vec::new(); + let mut streams = Vec::new(); + + for i in 0..30 { + let stream = if i % 2 == 0 { + create_stream(&mut conn).await + } else { + incoming_stream(&mut conn).await + }; + + streams.push(stream); + } + + for stream in streams { + tasks.push(send_recv_task(stream)); + } + + futures::future::try_join_all(tasks).await.unwrap(); +} + +#[wasm_bindgen_test] +async fn multiple_conn_multiple_streams() { + let mut tasks = Vec::new(); + let mut conns = Vec::new(); + + for _ in 0..10 { + let mut conn = new_connection_to_echo_server().await; + let mut streams = Vec::new(); + + for i in 0..10 { + let stream = if i % 2 == 0 { + create_stream(&mut conn).await + } else { + incoming_stream(&mut conn).await + }; + + streams.push(stream); + } + + // If `conn` gets drop then its streams will close. + // Keep it alive by moving it to the outer scope. + conns.push(conn); + + for stream in streams { + tasks.push(send_recv_task(stream)); + } + } + + futures::future::try_join_all(tasks).await.unwrap(); +} + +#[wasm_bindgen_test] +async fn multiple_conn_multiple_streams_sequential() { + for _ in 0..10 { + let mut conn = new_connection_to_echo_server().await; + + for i in 0..10 { + let mut stream = if i % 2 == 0 { + create_stream(&mut conn).await + } else { + incoming_stream(&mut conn).await + }; + + send_recv(&mut stream).await; + } + } +} + +#[wasm_bindgen_test] +async fn read_leftovers() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + // Test that stream works + send_recv(&mut stream).await; + + stream.write_all(b"hello").await.unwrap(); + + let mut buf = [0u8; 3]; + + // Read first half + let len = stream.read(&mut buf[..]).await.unwrap(); + assert_eq!(len, 3); + assert_eq!(&buf[..len], b"hel"); + + // Read second half + let len = stream.read(&mut buf[..]).await.unwrap(); + assert_eq!(len, 2); + assert_eq!(&buf[..len], b"lo"); +} + +#[wasm_bindgen_test] +async fn allow_read_after_closing_writer() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + // Test that stream works + send_recv(&mut stream).await; + + // Write random data + let mut send_buf = [0u8; 1024]; + getrandom(&mut send_buf).unwrap(); + stream.write_all(&send_buf).await.unwrap(); + + // Close writer by calling AsyncWrite::poll_close + stream.close().await.unwrap(); + + // Make sure writer is closed + stream.write_all(b"1").await.unwrap_err(); + + // We should be able to read + let mut recv_buf = [0u8; 1024]; + stream.read_exact(&mut recv_buf).await.unwrap(); + + assert_eq!(send_buf, recv_buf); +} + +#[wasm_bindgen_test] +async fn poll_outbound_error_after_connection_close() { + let mut conn = new_connection_to_echo_server().await; + + // Make sure that poll_outbound works well before closing the connection + let mut stream = create_stream(&mut conn).await; + send_recv(&mut stream).await; + drop(stream); + + poll_fn(|cx| Pin::new(&mut conn).poll_close(cx)) + .await + .unwrap(); + + poll_fn(|cx| Pin::new(&mut conn).poll_outbound(cx)) + .await + .expect_err("poll_outbound error after conn closed"); +} + +#[wasm_bindgen_test] +async fn poll_inbound_error_after_connection_close() { + let mut conn = new_connection_to_echo_server().await; + + // Make sure that poll_inbound works well before closing the connection + let mut stream = incoming_stream(&mut conn).await; + send_recv(&mut stream).await; + drop(stream); + + poll_fn(|cx| Pin::new(&mut conn).poll_close(cx)) + .await + .unwrap(); + + poll_fn(|cx| Pin::new(&mut conn).poll_inbound(cx)) + .await + .expect_err("poll_inbound error after conn closed"); +} + +#[wasm_bindgen_test] +async fn read_error_after_connection_drop() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + send_recv(&mut stream).await; + drop(conn); + + let mut buf = [0u8; 16]; + stream + .read(&mut buf) + .await + .expect_err("read error after conn drop"); +} + +#[wasm_bindgen_test] +async fn read_error_after_connection_close() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + send_recv(&mut stream).await; + + poll_fn(|cx| Pin::new(&mut conn).poll_close(cx)) + .await + .unwrap(); + + let mut buf = [0u8; 16]; + stream + .read(&mut buf) + .await + .expect_err("read error after conn drop"); +} + +#[wasm_bindgen_test] +async fn write_error_after_connection_drop() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + send_recv(&mut stream).await; + drop(conn); + + let buf = [0u8; 16]; + stream + .write(&buf) + .await + .expect_err("write error after conn drop"); +} + +#[wasm_bindgen_test] +async fn write_error_after_connection_close() { + let mut conn = new_connection_to_echo_server().await; + let mut stream = create_stream(&mut conn).await; + + send_recv(&mut stream).await; + + poll_fn(|cx| Pin::new(&mut conn).poll_close(cx)) + .await + .unwrap(); + + let buf = [0u8; 16]; + stream + .write(&buf) + .await + .expect_err("write error after conn drop"); +} + +#[wasm_bindgen_test] +async fn connect_without_peer_id() { + let mut addr = fetch_server_addr().await; + let keypair = Keypair::generate_ed25519(); + + // Remove peer id + addr.pop(); + + let mut transport = Transport::new(Config::new(&keypair)); + transport.dial(addr).unwrap().await.unwrap(); +} + +#[wasm_bindgen_test] +async fn error_on_unknown_peer_id() { + let mut addr = fetch_server_addr().await; + let keypair = Keypair::generate_ed25519(); + + // Remove peer id + addr.pop(); + + // Add an unknown one + addr.push(Protocol::P2p(PeerId::random())); + + let mut transport = Transport::new(Config::new(&keypair)); + let e = transport.dial(addr.clone()).unwrap().await.unwrap_err(); + assert!(matches!(e, Error::UnknownRemotePeerId)); +} + +#[wasm_bindgen_test] +async fn error_on_unknown_certhash() { + let mut addr = fetch_server_addr().await; + let keypair = Keypair::generate_ed25519(); + + // Remove peer id + let peer_id = addr.pop().unwrap(); + + // Add unknown certhash + addr.push(Protocol::Certhash(Multihash::wrap(1, b"1").unwrap())); + + // Add peer id back + addr.push(peer_id); + + let mut transport = Transport::new(Config::new(&keypair)); + let e = transport.dial(addr.clone()).unwrap().await.unwrap_err(); + assert!(matches!( + e, + Error::Noise(noise::Error::UnknownWebTransportCerthashes(..)) + )); +} + +async fn new_connection_to_echo_server() -> Connection { + let addr = fetch_server_addr().await; + let keypair = Keypair::generate_ed25519(); + + let mut transport = Transport::new(Config::new(&keypair)); + + let (_peer_id, conn) = transport.dial(addr).unwrap().await.unwrap(); + + conn +} + +/// Helper that returns the multiaddress of echo-server +/// +/// It fetches the multiaddress via HTTP request to +/// 127.0.0.1:4455. +async fn fetch_server_addr() -> Multiaddr { + let url = "http://127.0.0.1:4455/"; + let window = window().expect("failed to get browser window"); + + let value = JsFuture::from(window.fetch_with_str(url)) + .await + .expect("fetch failed"); + let resp = value.dyn_into::().expect("cast failed"); + + let text = resp.text().expect("text failed"); + let text = JsFuture::from(text).await.expect("text promise failed"); + + text.as_string() + .filter(|s| !s.is_empty()) + .expect("response not a text") + .parse() + .unwrap() +} + +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. +async fn create_stream(conn: &mut Connection) -> Stream { + poll_fn(|cx| Pin::new(&mut *conn).poll_outbound(cx)) + .await + .unwrap() +} + +#[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] // False positive. +async fn incoming_stream(conn: &mut Connection) -> Stream { + let mut stream = poll_fn(|cx| Pin::new(&mut *conn).poll_inbound(cx)) + .await + .unwrap(); + + // For the stream to be initiated `echo-server` sends a single byte + let mut buf = [0u8; 1]; + stream.read_exact(&mut buf).await.unwrap(); + + stream +} + +fn send_recv_task(mut steam: Stream) -> oneshot::Receiver<()> { + let (tx, rx) = oneshot::channel(); + + spawn_local(async move { + send_recv(&mut steam).await; + tx.send(()).unwrap(); + }); + + rx +} + +async fn send_recv(stream: &mut Stream) { + let mut send_buf = [0u8; 1024]; + let mut recv_buf = [0u8; 1024]; + + for _ in 0..30 { + getrandom(&mut send_buf).unwrap(); + + stream.write_all(&send_buf).await.unwrap(); + stream.read_exact(&mut recv_buf).await.unwrap(); + + assert_eq!(send_buf, recv_buf); + } +} From 07df6dc785e3c8f3b728d3896ab8ebcf8b6413cf Mon Sep 17 00:00:00 2001 From: drHuangMHT Date: Wed, 27 Dec 2023 20:27:09 +0800 Subject: [PATCH 83/83] fix errors introduced by merging manually --- identity/src/keypair.rs | 89 +---------------------------------------- 1 file changed, 1 insertion(+), 88 deletions(-) diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index c38ce8f1476..e4fedf8e9a1 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -232,10 +232,9 @@ impl Keypair { note = "This method name does not follow Rust naming conventions, use `Keypair::try_into_ed25519` instead." )] pub fn to_protobuf_encoding(&self) -> Result, DecodingError> { - self.encode_protobuf() } - + /// Encode a private key as protobuf structure. /// /// See for details on the encoding. @@ -350,91 +349,7 @@ impl Keypair { Err(DecodingError::missing_feature("ecdsa")) } } - - #[cfg(not(any( - feature = "ecdsa", - feature = "secp256k1", - feature = "ed25519", - feature = "rsa" - )))] - unreachable!() - } - - /// Return a [`KeyType`] of the [`Keypair`]. - pub fn key_type(&self) -> KeyType { - match self.keypair { - #[cfg(feature = "ed25519")] - KeyPairInner::Ed25519(_) => KeyType::Ed25519, - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - KeyPairInner::Rsa(_) => KeyType::RSA, - #[cfg(feature = "secp256k1")] - KeyPairInner::Secp256k1(_) => KeyType::Secp256k1, - #[cfg(feature = "ecdsa")] - KeyPairInner::Ecdsa(_) => KeyType::Ecdsa, } - } - - /// Deterministically derive a new secret from this [`Keypair`], taking into account the provided domain. - /// - /// This works for all key types except RSA where it returns `None`. - /// - /// # Example - /// - /// ``` - /// # fn main() { - /// # use libp2p_identity as identity; - /// let key = identity::Keypair::generate_ed25519(); - /// - /// let new_key = key.derive_secret(b"my encryption key").expect("can derive secret for ed25519"); - /// # } - /// ``` - /// - #[cfg(any( - feature = "ecdsa", - feature = "secp256k1", - feature = "ed25519", - feature = "rsa" - ))] - pub fn derive_secret(&self, domain: &[u8]) -> Option<[u8; 32]> { - let mut okm = [0u8; 32]; - hkdf::Hkdf::::new(None, &self.secret()?) - .expand(domain, &mut okm) - .expect("okm.len() == 32"); - - Some(okm) - } - - // We build docs with all features so this doesn't need to have any docs. - #[cfg(not(any( - feature = "ecdsa", - feature = "secp256k1", - feature = "ed25519", - feature = "rsa" - )))] - pub fn derive_secret(&self, _: &[u8]) -> Option<[u8; 32]> { - None - } - - /// Return the secret key of the [`Keypair`]. - #[allow(dead_code)] - pub(crate) fn secret(&self) -> Option<[u8; 32]> { - match self.keypair { - #[cfg(feature = "ed25519")] - KeyPairInner::Ed25519(ref inner) => Some(inner.secret().to_bytes()), - #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] - KeyPairInner::Rsa(_) => None, - #[cfg(feature = "secp256k1")] - KeyPairInner::Secp256k1(ref inner) => Some(inner.secret().to_bytes()), - #[cfg(feature = "ecdsa")] - KeyPairInner::Ecdsa(ref inner) => Some( - inner - .secret() - .to_bytes() - .try_into() - .expect("Ecdsa's private key should be 32 bytes"), - ), - } - #[cfg(not(any( feature = "ecdsa", feature = "secp256k1", @@ -939,7 +854,6 @@ mod tests { #[cfg(feature = "ed25519")] #[cfg(feature = "peerid")] fn keypair_protobuf_roundtrip_ed25519() { - let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "080112407e0830617c4a7de83925dfb2694556b12936c477a0e1feb2e148ec9da60fee7d1ed1e8fae2c4a144b8be8fd4b47bf3d3b34b871c3cacf6010f0e42d474fce27e" )) @@ -1018,7 +932,6 @@ mod tests { #[test] #[cfg(feature = "peerid")] fn keypair_from_protobuf_encoding() { - let priv_key = Keypair::try_decode_protobuf(&hex_literal::hex!( "080012ae123082092a0201000282020100e1beab071d08200bde24eef00d049449b07770ff9910257b2d7d5dda242ce8f0e2f12e1af4b32d9efd2c090f66b0f29986dbb645dae9880089704a94e5066d594162ae6ee8892e6ec70701db0a6c445c04778eb3de1293aa1a23c3825b85c6620a2bc3f82f9b0c309bc0ab3aeb1873282bebd3da03c33e76c21e9beb172fd44c9e43be32e2c99827033cf8d0f0c606f4579326c930eb4e854395ad941256542c793902185153c474bed109d6ff5141ebf9cd256cf58893a37f83729f97e7cb435ec679d2e33901d27bb35aa0d7e20561da08885ef0abbf8e2fb48d6a5487047a9ecb1ad41fa7ed84f6e3e8ecd5d98b3982d2a901b4454991766da295ab78822add5612a2df83bcee814cf50973e80d7ef38111b1bd87da2ae92438a2c8cbcc70b31ee319939a3b9c761dbc13b5c086d6b64bf7ae7dacc14622375d92a8ff9af7eb962162bbddebf90acb32adb5e4e4029f1c96019949ecfbfeffd7ac1e3fbcc6b6168c34be3d5a2e5999fcbb39bba7adbca78eab09b9bc39f7fa4b93411f4cc175e70c0a083e96bfaefb04a9580b4753c1738a6a760ae1afd851a1a4bdad231cf56e9284d832483df215a46c1c21bdf0c6cfe951c18f1ee4078c79c13d63edb6e14feaeffabc90ad317e4875fe648101b0864097e998f0ca3025ef9638cd2b0caecd3770ab54a1d9c6ca959b0f5dcbc90caeefc4135baca6fd475224269bbe1b02030100010282020100a472ffa858efd8588ce59ee264b957452f3673acdf5631d7bfd5ba0ef59779c231b0bc838a8b14cae367b6d9ef572c03c7883b0a3c652f5c24c316b1ccfd979f13d0cd7da20c7d34d9ec32dfdc81ee7292167e706d705efde5b8f3edfcba41409e642f8897357df5d320d21c43b33600a7ae4e505db957c1afbc189d73f0b5d972d9aaaeeb232ca20eebd5de6fe7f29d01470354413cc9a0af1154b7af7c1029adcd67c74b4798afeb69e09f2cb387305e73a1b5f450202d54f0ef096fe1bde340219a1194d1ac9026e90b366cce0c59b239d10e4888f52ca1780824d39ae01a6b9f4dd6059191a7f12b2a3d8db3c2868cd4e5a5862b8b625a4197d52c6ac77710116ebd3ced81c4d91ad5fdfbed68312ebce7eea45c1833ca3acf7da2052820eacf5c6b07d086dabeb893391c71417fd8a4b1829ae2cf60d1749d0e25da19530d889461c21da3492a8dc6ccac7de83ac1c2185262c7473c8cc42f547cc9864b02a8073b6aa54a037d8c0de3914784e6205e83d97918b944f11b877b12084c0dd1d36592f8a4f8b8da5bb404c3d2c079b22b6ceabfbcb637c0dbe0201f0909d533f8bf308ada47aee641a012a494d31b54c974e58b87f140258258bb82f31692659db7aa07e17a5b2a0832c24e122d3a8babcc9ee74cbb07d3058bb85b15f6f6b2674aba9fd34367be9782d444335fbed31e3c4086c652597c27104938b47fa10282010100e9fdf843c1550070ca711cb8ff28411466198f0e212511c3186623890c0071bf6561219682fe7dbdfd81176eba7c4faba21614a20721e0fcd63768e6d925688ecc90992059ac89256e0524de90bf3d8a052ce6a9f6adafa712f3107a016e20c80255c9e37d8206d1bc327e06e66eb24288da866b55904fd8b59e6b2ab31bc5eab47e597093c63fab7872102d57b4c589c66077f534a61f5f65127459a33c91f6db61fc431b1ae90be92b4149a3255291baf94304e3efb77b1107b5a3bda911359c40a53c347ff9100baf8f36dc5cd991066b5bdc28b39ed644f404afe9213f4d31c9d4e40f3a5f5e3c39bebeb244e84137544e1a1839c1c8aaebf0c78a7fad590282010100f6fa1f1e6b803742d5490b7441152f500970f46feb0b73a6e4baba2aaf3c0e245ed852fc31d86a8e46eb48e90fac409989dfee45238f97e8f1f8e83a136488c1b04b8a7fb695f37b8616307ff8a8d63e8cfa0b4fb9b9167ffaebabf111aa5a4344afbabd002ae8961c38c02da76a9149abdde93eb389eb32595c29ba30d8283a7885218a5a9d33f7f01dbdf85f3aad016c071395491338ec318d39220e1c7bd69d3d6b520a13a30d745c102b827ad9984b0dd6aed73916ffa82a06c1c111e7047dcd2668f988a0570a71474992eecf416e068f029ec323d5d635fd24694fc9bf96973c255d26c772a95bf8b7f876547a5beabf86f06cd21b67994f944e7a5493028201010095b02fd30069e547426a8bea58e8a2816f33688dac6c6f6974415af8402244a22133baedf34ce499d7036f3f19b38eb00897c18949b0c5a25953c71aeeccfc8f6594173157cc854bd98f16dffe8f28ca13b77eb43a2730585c49fc3f608cd811bb54b03b84bddaa8ef910988567f783012266199667a546a18fd88271fbf63a45ae4fd4884706da8befb9117c0a4d73de5172f8640b1091ed8a4aea3ed4641463f5ff6a5e3401ad7d0c92811f87956d1fd5f9a1d15c7f3839a08698d9f35f9d966e5000f7cb2655d7b6c4adcd8a9d950ea5f61bb7c9a33c17508f9baa313eecfee4ae493249ebe05a5d7770bbd3551b2eeb752e3649e0636de08e3d672e66cb90282010100ad93e4c31072b063fc5ab5fe22afacece775c795d0efdf7c704cfc027bde0d626a7646fc905bb5a80117e3ca49059af14e0160089f9190065be9bfecf12c3b2145b211c8e89e42dd91c38e9aa23ca73697063564f6f6aa6590088a738722df056004d18d7bccac62b3bafef6172fc2a4b071ea37f31eff7a076bcab7dd144e51a9da8754219352aef2c73478971539fa41de4759285ea626fa3c72e7085be47d554d915bbb5149cb6ef835351f231043049cd941506a034bf2f8767f3e1e42ead92f91cb3d75549b57ef7d56ac39c2d80d67f6a2b4ca192974bfc5060e2dd171217971002193dba12e7e4133ab201f07500a90495a38610279b13a48d54f0c99028201003e3a1ac0c2b67d54ed5c4bbe04a7db99103659d33a4f9d35809e1f60c282e5988dddc964527f3b05e6cc890eab3dcb571d66debf3a5527704c87264b3954d7265f4e8d2c637dd89b491b9cf23f264801f804b90454d65af0c4c830d1aef76f597ef61b26ca857ecce9cb78d4f6c2218c00d2975d46c2b013fbf59b750c3b92d8d3ed9e6d1fd0ef1ec091a5c286a3fe2dead292f40f380065731e2079ebb9f2a7ef2c415ecbb488da98f3a12609ca1b6ec8c734032c8bd513292ff842c375d4acd1b02dfb206b24cd815f8e2f9d4af8e7dea0370b19c1b23cc531d78b40e06e1119ee2e08f6f31c6e2e8444c568d13c5d451a291ae0c9f1d4f27d23b3a00d60ad" ))

where - TCodec: Codec, + P: AsRef + Clone, { - pub(crate) codec: TCodec, - pub(crate) protocols: SmallVec<[TCodec::Protocol; 2]>, - pub(crate) request_id: RequestId, - pub(crate) request: TCodec::Request, -} + type Output = (Stream, P); + type Error = void::Void; + type Future = Ready>; -impl fmt::Debug for RequestProtocol -where - TCodec: Codec, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RequestProtocol") - .field("request_id", &self.request_id) - .finish() - } -} - -impl UpgradeInfo for RequestProtocol -where - TCodec: Codec, -{ - type Info = TCodec::Protocol; - type InfoIter = smallvec::IntoIter<[Self::Info; 2]>; - - fn protocol_info(&self) -> Self::InfoIter { - self.protocols.clone().into_iter() - } -} - -impl OutboundUpgrade for RequestProtocol -where - TCodec: Codec + Send + 'static, -{ - type Output = TCodec::Response; - type Error = io::Error; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound( - mut self, - mut io: NegotiatedSubstream, - protocol: Self::Info, - ) -> Self::Future { - async move { - let write = self.codec.write_request(&protocol, &mut io, self.request); - write.await?; - io.close().await?; - let read = self.codec.read_response(&protocol, &mut io); - let response = read.await?; - Ok(response) - } - .boxed() + fn upgrade_outbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) } } diff --git a/protocols/request-response/src/json.rs b/protocols/request-response/src/json.rs new file mode 100644 index 00000000000..0b3d634573b --- /dev/null +++ b/protocols/request-response/src/json.rs @@ -0,0 +1,202 @@ +// Copyright 2023 Protocol Labs +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +/// A request-response behaviour using [`serde_json`] for serializing and deserializing the messages. +/// +/// # Example +/// +/// ``` +/// # use libp2p_request_response::{json, ProtocolSupport, self as request_response}; +/// # use libp2p_swarm::{StreamProtocol}; +/// #[derive(Debug, serde::Serialize, serde::Deserialize)] +/// struct GreetRequest { +/// name: String, +/// } +/// +/// #[derive(Debug, serde::Serialize, serde::Deserialize)] +/// struct GreetResponse { +/// message: String, +/// } +/// +/// let behaviour = json::Behaviour::::new( +/// [(StreamProtocol::new("/my-json-protocol"), ProtocolSupport::Full)], +/// request_response::Config::default() +/// ); +/// ``` +pub type Behaviour = crate::Behaviour>; + +mod codec { + use async_trait::async_trait; + use futures::prelude::*; + use futures::{AsyncRead, AsyncWrite}; + use libp2p_swarm::StreamProtocol; + use serde::{de::DeserializeOwned, Serialize}; + use std::{io, marker::PhantomData}; + + /// Max request size in bytes + const REQUEST_SIZE_MAXIMUM: u64 = 1024 * 1024; + /// Max response size in bytes + const RESPONSE_SIZE_MAXIMUM: u64 = 10 * 1024 * 1024; + + pub struct Codec { + phantom: PhantomData<(Req, Resp)>, + } + + impl Default for Codec { + fn default() -> Self { + Codec { + phantom: PhantomData, + } + } + } + + impl Clone for Codec { + fn clone(&self) -> Self { + Self::default() + } + } + + #[async_trait] + impl crate::Codec for Codec + where + Req: Send + Serialize + DeserializeOwned, + Resp: Send + Serialize + DeserializeOwned, + { + type Protocol = StreamProtocol; + type Request = Req; + type Response = Resp; + + async fn read_request(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(REQUEST_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + + Ok(serde_json::from_slice(vec.as_slice())?) + } + + async fn read_response(&mut self, _: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut vec = Vec::new(); + + io.take(RESPONSE_SIZE_MAXIMUM).read_to_end(&mut vec).await?; + + Ok(serde_json::from_slice(vec.as_slice())?) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data = serde_json::to_vec(&req)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + resp: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + let data = serde_json::to_vec(&resp)?; + + io.write_all(data.as_ref()).await?; + + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use crate::Codec; + use futures::AsyncWriteExt; + use futures_ringbuf::Endpoint; + use libp2p_swarm::StreamProtocol; + use serde::{Deserialize, Serialize}; + + #[async_std::test] + async fn test_codec() { + let expected_request = TestRequest { + payload: "test_payload".to_string(), + }; + let expected_response = TestResponse { + payload: "test_payload".to_string(), + }; + let protocol = StreamProtocol::new("/test_json/1"); + let mut codec: super::codec::Codec = + super::codec::Codec::default(); + + let (mut a, mut b) = Endpoint::pair(124, 124); + codec + .write_request(&protocol, &mut a, expected_request.clone()) + .await + .expect("Should write request"); + a.close().await.unwrap(); + + let actual_request = codec + .read_request(&protocol, &mut b) + .await + .expect("Should read request"); + b.close().await.unwrap(); + + assert_eq!(actual_request, expected_request); + + let (mut a, mut b) = Endpoint::pair(124, 124); + codec + .write_response(&protocol, &mut a, expected_response.clone()) + .await + .expect("Should write response"); + a.close().await.unwrap(); + + let actual_response = codec + .read_response(&protocol, &mut b) + .await + .expect("Should read response"); + b.close().await.unwrap(); + + assert_eq!(actual_response, expected_response); + } + + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + struct TestRequest { + payload: String, + } + + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + struct TestResponse { + payload: String, + } +} diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index f187ab8c48b..fc68bd6cf1f 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -28,7 +28,7 @@ //! over the actual messages being sent, which are defined in terms of a //! [`Codec`]. Creating a request/response protocol thus amounts //! to providing an implementation of this trait which can then be -//! given to [`Behaviour::new`]. Further configuration options are +//! given to [`Behaviour::with_codec`]. Further configuration options are //! available via the [`Config`]. //! //! Requests are sent using [`Behaviour::send_request`] and the @@ -39,6 +39,14 @@ //! receiving a [`Message::Request`] via //! [`Event::Message`]. //! +//! ## Predefined codecs +//! +//! In case your message types implement [`serde::Serialize`] and [`serde::Deserialize`], +//! you can use two predefined behaviours: +//! +//! - [`cbor::Behaviour`] for CBOR-encoded messages +//! - [`json::Behaviour`] for JSON-encoded messages +//! //! ## Protocol Families //! //! A single [`Behaviour`] instance can be used with an entire @@ -58,27 +66,31 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#[cfg(feature = "cbor")] +pub mod cbor; mod codec; mod handler; +#[cfg(feature = "json")] +pub mod json; pub use codec::Codec; pub use handler::ProtocolSupport; -use crate::handler::protocol::RequestProtocol; +use crate::handler::OutboundMessage; use futures::channel::oneshot; use handler::Handler; use libp2p_core::{ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; use libp2p_swarm::{ - behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}, + behaviour::{AddressChange, ConnectionClosed, DialFailure, FromSwarm}, dial_opts::DialOpts, - ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters, THandler, + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use smallvec::SmallVec; use std::{ collections::{HashMap, HashSet, VecDeque}, - fmt, + fmt, io, sync::{atomic::AtomicU64, Arc}, task::{Context, Poll}, time::Duration, @@ -90,7 +102,7 @@ pub enum Message { /// A request message. Request { /// The ID of this request. - request_id: RequestId, + request_id: InboundRequestId, /// The request message. request: TRequest, /// The channel waiting for the response. @@ -105,7 +117,7 @@ pub enum Message { /// The ID of the request that produced this response. /// /// See [`Behaviour::send_request`]. - request_id: RequestId, + request_id: OutboundRequestId, /// The response message. response: TResponse, }, @@ -126,7 +138,7 @@ pub enum Event { /// The peer to whom the request was sent. peer: PeerId, /// The (local) ID of the failed request. - request_id: RequestId, + request_id: OutboundRequestId, /// The error that occurred. error: OutboundFailure, }, @@ -135,7 +147,7 @@ pub enum Event { /// The peer from whom the request was received. peer: PeerId, /// The ID of the failed inbound request. - request_id: RequestId, + request_id: InboundRequestId, /// The error that occurred. error: InboundFailure, }, @@ -147,13 +159,13 @@ pub enum Event { /// The peer to whom the response was sent. peer: PeerId, /// The ID of the inbound request whose response was sent. - request_id: RequestId, + request_id: InboundRequestId, }, } /// Possible failures occurring in the context of sending /// an outbound request and receiving the response. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum OutboundFailure { /// The request could not be sent because a dialing attempt failed. DialFailure, @@ -169,6 +181,8 @@ pub enum OutboundFailure { ConnectionClosed, /// The remote supports none of the requested protocols. UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + Io(io::Error), } impl fmt::Display for OutboundFailure { @@ -182,6 +196,7 @@ impl fmt::Display for OutboundFailure { OutboundFailure::UnsupportedProtocols => { write!(f, "The remote supports none of the requested protocols") } + OutboundFailure::Io(e) => write!(f, "IO error on outbound stream: {e}"), } } } @@ -190,7 +205,7 @@ impl std::error::Error for OutboundFailure {} /// Possible failures occurring in the context of receiving an /// inbound request and sending a response. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug)] pub enum InboundFailure { /// The inbound request timed out, either while reading the /// incoming request or before a response is sent, e.g. if @@ -206,6 +221,8 @@ pub enum InboundFailure { /// due to the [`ResponseChannel`] being dropped instead of /// being passed to [`Behaviour::send_response`]. ResponseOmission, + /// An IO failure happened on an inbound stream. + Io(io::Error), } impl fmt::Display for InboundFailure { @@ -225,6 +242,7 @@ impl fmt::Display for InboundFailure { f, "The response channel was dropped without sending a response to the remote" ), + InboundFailure::Io(e) => write!(f, "IO error on inbound stream: {e}"), } } } @@ -252,17 +270,27 @@ impl ResponseChannel { } } -/// The ID of an inbound or outbound request. +/// The ID of an inbound request. /// -/// Note: [`RequestId`]'s uniqueness is only guaranteed between two -/// inbound and likewise between two outbound requests. There is no -/// uniqueness guarantee in a set of both inbound and outbound -/// [`RequestId`]s nor in a set of inbound or outbound requests -/// originating from different [`Behaviour`]'s. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct RequestId(u64); - -impl fmt::Display for RequestId { +/// Note: [`InboundRequestId`]'s uniqueness is only guaranteed between +/// inbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct InboundRequestId(u64); + +impl fmt::Display for InboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The ID of an outbound request. +/// +/// Note: [`OutboundRequestId`]'s uniqueness is only guaranteed between +/// outbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct OutboundRequestId(u64); + +impl fmt::Display for OutboundRequestId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } @@ -272,30 +300,37 @@ impl fmt::Display for RequestId { #[derive(Debug, Clone)] pub struct Config { request_timeout: Duration, - connection_keep_alive: Duration, + max_concurrent_streams: usize, } impl Default for Config { fn default() -> Self { Self { - connection_keep_alive: Duration::from_secs(10), request_timeout: Duration::from_secs(10), + max_concurrent_streams: 100, } } } impl Config { - /// Sets the keep-alive timeout of idle connections. - pub fn set_connection_keep_alive(&mut self, v: Duration) -> &mut Self { - self.connection_keep_alive = v; + /// Sets the timeout for inbound and outbound requests. + #[deprecated(note = "Use `Config::with_request_timeout` for one-liner constructions.")] + pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + self.request_timeout = v; self } /// Sets the timeout for inbound and outbound requests. - pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + pub fn with_request_timeout(mut self, v: Duration) -> Self { self.request_timeout = v; self } + + /// Sets the upper bound for the number of concurrent inbound + outbound streams. + pub fn with_max_concurrent_streams(mut self, num_streams: usize) -> Self { + self.max_concurrent_streams = num_streams; + self + } } /// A request/response protocol for some message codec. @@ -308,24 +343,37 @@ where /// The supported outbound protocols. outbound_protocols: SmallVec<[TCodec::Protocol; 2]>, /// The next (local) request ID. - next_request_id: RequestId, + next_outbound_request_id: OutboundRequestId, /// The next (inbound) request ID. - next_inbound_id: Arc, + next_inbound_request_id: Arc, /// The protocol configuration. config: Config, /// The protocol codec for reading and writing requests and responses. codec: TCodec, /// Pending events to return from `poll`. pending_events: - VecDeque, RequestProtocol>>, + VecDeque, OutboundMessage>>, /// The currently connected peers, their pending outbound and inbound responses and their known, /// reachable addresses, if any. connected: HashMap>, /// Externally managed addresses via `add_address` and `remove_address`. - addresses: HashMap>, + addresses: HashMap>, /// Requests that have not yet been sent and are waiting for a connection /// to be established. - pending_outbound_requests: HashMap; 10]>>, + pending_outbound_requests: HashMap; 10]>>, +} + +impl Behaviour +where + TCodec: Codec + Default + Clone + Send + 'static, +{ + /// Creates a new `Behaviour` for the given protocols and configuration, using [`Default`] to construct the codec. + pub fn new(protocols: I, cfg: Config) -> Self + where + I: IntoIterator, + { + Self::with_codec(TCodec::default(), protocols, cfg) + } } impl Behaviour @@ -334,7 +382,7 @@ where { /// Creates a new `Behaviour` for the given /// protocols, codec and configuration. - pub fn new(codec: TCodec, protocols: I, cfg: Config) -> Self + pub fn with_codec(codec: TCodec, protocols: I, cfg: Config) -> Self where I: IntoIterator, { @@ -351,8 +399,8 @@ where Behaviour { inbound_protocols, outbound_protocols, - next_request_id: RequestId(1), - next_inbound_id: Arc::new(AtomicU64::new(1)), + next_outbound_request_id: OutboundRequestId(1), + next_inbound_request_id: Arc::new(AtomicU64::new(1)), config: cfg, codec, pending_events: VecDeque::new(), @@ -374,13 +422,12 @@ where /// > address discovery, or known addresses of peers must be /// > managed via [`Behaviour::add_address`] and /// > [`Behaviour::remove_address`]. - pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> RequestId { - let request_id = self.next_request_id(); - let request = RequestProtocol { + pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> OutboundRequestId { + let request_id = self.next_outbound_request_id(); + let request = OutboundMessage { request_id, - codec: self.codec.clone(), - protocols: self.outbound_protocols.clone(), request, + protocols: self.outbound_protocols.clone(), }; if let Some(request) = self.try_send_request(peer, request) { @@ -417,11 +464,14 @@ where /// Adds a known address for a peer that can be used for /// dialing attempts by the `Swarm`, i.e. is returned - /// by [`NetworkBehaviour::addresses_of_peer`]. + /// by [`NetworkBehaviour::handle_pending_outbound_connection`]. /// /// Addresses added in this way are only removed by `remove_address`. - pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) { - self.addresses.entry(*peer).or_default().push(address); + /// + /// Returns true if the address was added, false otherwise (i.e. if the + /// address is already in the list). + pub fn add_address(&mut self, peer: &PeerId, address: Multiaddr) -> bool { + self.addresses.entry(*peer).or_default().insert(address) } /// Removes an address of a peer previously added via `add_address`. @@ -448,14 +498,14 @@ where /// Checks whether an outbound request to the peer with the provided /// [`PeerId`] initiated by [`Behaviour::send_request`] is still /// pending, i.e. waiting for a response. - pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &RequestId) -> bool { + pub fn is_pending_outbound(&self, peer: &PeerId, request_id: &OutboundRequestId) -> bool { // Check if request is already sent on established connection. let est_conn = self .connected .get(peer) .map(|cs| { cs.iter() - .any(|c| c.pending_inbound_responses.contains(request_id)) + .any(|c| c.pending_outbound_responses.contains(request_id)) }) .unwrap_or(false); // Check if request is still pending to be sent. @@ -471,20 +521,20 @@ where /// Checks whether an inbound request from the peer with the provided /// [`PeerId`] is still pending, i.e. waiting for a response by the local /// node through [`Behaviour::send_response`]. - pub fn is_pending_inbound(&self, peer: &PeerId, request_id: &RequestId) -> bool { + pub fn is_pending_inbound(&self, peer: &PeerId, request_id: &InboundRequestId) -> bool { self.connected .get(peer) .map(|cs| { cs.iter() - .any(|c| c.pending_outbound_responses.contains(request_id)) + .any(|c| c.pending_inbound_responses.contains(request_id)) }) .unwrap_or(false) } - /// Returns the next request ID. - fn next_request_id(&mut self) -> RequestId { - let request_id = self.next_request_id; - self.next_request_id.0 += 1; + /// Returns the next outbound request ID. + fn next_outbound_request_id(&mut self) -> OutboundRequestId { + let request_id = self.next_outbound_request_id; + self.next_outbound_request_id.0 += 1; request_id } @@ -494,15 +544,15 @@ where fn try_send_request( &mut self, peer: &PeerId, - request: RequestProtocol, - ) -> Option> { + request: OutboundMessage, + ) -> Option> { if let Some(connections) = self.connected.get_mut(peer) { if connections.is_empty() { return Some(request); } let ix = (request.request_id.0 as usize) % connections.len(); let conn = &mut connections[ix]; - conn.pending_inbound_responses.insert(request.request_id); + conn.pending_outbound_responses.insert(request.request_id); self.pending_events.push_back(ToSwarm::NotifyHandler { peer_id: *peer, handler: NotifyHandler::One(conn.id), @@ -517,13 +567,13 @@ where /// Remove pending outbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`RequestId`] was previously present and is now removed. + /// alive and the [`OutboundRequestId`] was previously present and is now removed. /// Returns `false` otherwise. fn remove_pending_outbound_response( &mut self, peer: &PeerId, connection: ConnectionId, - request: RequestId, + request: OutboundRequestId, ) -> bool { self.get_connection_mut(peer, connection) .map(|c| c.pending_outbound_responses.remove(&request)) @@ -533,16 +583,16 @@ where /// Remove pending inbound response for the given peer and connection. /// /// Returns `true` if the provided connection to the given peer is still - /// alive and the [`RequestId`] was previously present and is now removed. + /// alive and the [`InboundRequestId`] was previously present and is now removed. /// Returns `false` otherwise. fn remove_pending_inbound_response( &mut self, peer: &PeerId, connection: ConnectionId, - request: &RequestId, + request: InboundRequestId, ) -> bool { self.get_connection_mut(peer, connection) - .map(|c| c.pending_inbound_responses.remove(request)) + .map(|c| c.pending_inbound_responses.remove(&request)) .unwrap_or(false) } @@ -580,36 +630,7 @@ where .iter_mut() .find(|c| c.id == connection_id) .expect("Address change can only happen on an established connection."); - connection.address = new_address; - } - - fn on_connection_established( - &mut self, - ConnectionEstablished { - peer_id, - connection_id, - endpoint, - other_established, - .. - }: ConnectionEstablished, - ) { - let address = match endpoint { - ConnectedPoint::Dialer { address, .. } => Some(address.clone()), - ConnectedPoint::Listener { .. } => None, - }; - self.connected - .entry(peer_id) - .or_default() - .push(Connection::new(connection_id, address)); - - if other_established == 0 { - if let Some(pending) = self.pending_outbound_requests.remove(&peer_id) { - for request in pending { - let request = self.try_send_request(&peer_id, request); - assert!(request.is_none()); - } - } - } + connection.remote_address = new_address; } fn on_connection_closed( @@ -619,7 +640,7 @@ where connection_id, remaining_established, .. - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { let connections = self .connected @@ -637,7 +658,7 @@ where self.connected.remove(&peer_id); } - for request_id in connection.pending_outbound_responses { + for request_id in connection.pending_inbound_responses { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { peer: peer_id, @@ -646,7 +667,7 @@ where })); } - for request_id in connection.pending_inbound_responses { + for request_id in connection.pending_outbound_responses { self.pending_events .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer: peer_id, @@ -676,6 +697,28 @@ where } } } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + peer: PeerId, + connection_id: ConnectionId, + remote_address: Option, + ) { + let mut connection = Connection::new(connection_id, remote_address); + + if let Some(pending_requests) = self.pending_outbound_requests.remove(&peer) { + for request in pending_requests { + connection + .pending_outbound_responses + .insert(request.request_id); + handler.on_behaviour_event(request); + } + } + + self.connected.entry(peer).or_default().push(connection); + } } impl NetworkBehaviour for Behaviour @@ -683,22 +726,26 @@ where TCodec: Codec + Send + Clone + 'static, { type ConnectionHandler = Handler; - type OutEvent = Event; + type ToSwarm = Event; fn handle_established_inbound_connection( &mut self, - _: ConnectionId, - _: PeerId, + connection_id: ConnectionId, + peer: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new( + let mut handler = Handler::new( self.inbound_protocols.clone(), self.codec.clone(), - self.config.connection_keep_alive, self.config.request_timeout, - self.next_inbound_id.clone(), - )) + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, None); + + Ok(handler) } fn handle_pending_outbound_connection( @@ -715,10 +762,10 @@ where let mut addresses = Vec::new(); if let Some(connections) = self.connected.get(&peer) { - addresses.extend(connections.iter().filter_map(|c| c.address.clone())) + addresses.extend(connections.iter().filter_map(|c| c.remote_address.clone())) } if let Some(more) = self.addresses.get(&peer) { - addresses.extend(more.into_iter().cloned()); + addresses.extend(more.iter().cloned()); } Ok(addresses) @@ -726,38 +773,38 @@ where fn handle_established_outbound_connection( &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, + connection_id: ConnectionId, + peer: PeerId, + remote_address: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new( + let mut handler = Handler::new( self.inbound_protocols.clone(), self.codec.clone(), - self.config.connection_keep_alive, self.config.request_timeout, - self.next_inbound_id.clone(), - )) + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler( + &mut handler, + peer, + connection_id, + Some(remote_address.clone()), + ); + + Ok(handler) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { match event { - FromSwarm::ConnectionEstablished(connection_established) => { - self.on_connection_established(connection_established) - } + FromSwarm::ConnectionEstablished(_) => {} FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddr(_) => {} - FromSwarm::ExpiredExternalAddr(_) => {} + _ => {} } } @@ -772,7 +819,7 @@ where request_id, response, } => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before receiving response.", @@ -789,35 +836,26 @@ where request_id, request, sender, - } => { - let channel = ResponseChannel { sender }; - let message = Message::Request { - request_id, - request, - channel, - }; - self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); + } => match self.get_connection_mut(&peer, connection) { + Some(connection) => { + let inserted = connection.pending_inbound_responses.insert(request_id); + debug_assert!(inserted, "Expect id of new request to be unknown."); - match self.get_connection_mut(&peer, connection) { - Some(connection) => { - let inserted = connection.pending_outbound_responses.insert(request_id); - debug_assert!(inserted, "Expect id of new request to be unknown."); - } - // Connection closed after `Event::Request` has been emitted. - None => { - self.pending_events.push_back(ToSwarm::GenerateEvent( - Event::InboundFailure { - peer, - request_id, - error: InboundFailure::ConnectionClosed, - }, - )); - } + let channel = ResponseChannel { sender }; + let message = Message::Request { + request_id, + request, + channel, + }; + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); } - } + None => { + tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + } + }, handler::Event::ResponseSent(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before response is sent." @@ -830,7 +868,7 @@ where })); } handler::Event::ResponseOmission(request_id) => { - let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before response is omitted.", @@ -844,7 +882,7 @@ where })); } handler::Event::OutboundTimeout(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before request times out." @@ -857,22 +895,8 @@ where error: OutboundFailure::Timeout, })); } - handler::Event::InboundTimeout(request_id) => { - // Note: `Event::InboundTimeout` is emitted both for timing - // out to receive the request and for timing out sending the response. In the former - // case the request is never added to `pending_outbound_responses` and thus one can - // not assert the request_id to be present before removing it. - self.remove_pending_outbound_response(&peer, connection, request_id); - - self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { - peer, - request_id, - error: InboundFailure::Timeout, - })); - } handler::Event::OutboundUnsupportedProtocols(request_id) => { - let removed = self.remove_pending_inbound_response(&peer, connection, &request_id); + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); debug_assert!( removed, "Expect request_id to be pending before failing to connect.", @@ -885,25 +909,54 @@ where error: OutboundFailure::UnsupportedProtocols, })); } - handler::Event::InboundUnsupportedProtocols(request_id) => { - // Note: No need to call `self.remove_pending_outbound_response`, - // `Event::Request` was never emitted for this request and - // thus request was never added to `pending_outbound_responses`. + handler::Event::OutboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_outbound_response(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending upon failure"); + self.pending_events - .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + .push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { peer, request_id, - error: InboundFailure::UnsupportedProtocols, - })); + error: OutboundFailure::Io(error), + })) + } + handler::Event::InboundTimeout(request_id) => { + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + + if removed { + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Timeout, + })); + } else { + // This happens when timeout is emitted before `read_request` finishes. + tracing::debug!( + "Inbound request timeout for an unknown request_id ({request_id})" + ); + } + } + handler::Event::InboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_inbound_response(&peer, connection, request_id); + + if removed { + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Io(error), + })); + } else { + // This happens when `read_request` fails. + tracing::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); + } } } } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(ev); } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { @@ -923,21 +976,21 @@ const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; /// Internal information tracked for an established connection. struct Connection { id: ConnectionId, - address: Option, + remote_address: Option, /// Pending outbound responses where corresponding inbound requests have /// been received on this connection and emitted via `poll` but have not yet /// been answered. - pending_outbound_responses: HashSet, + pending_outbound_responses: HashSet, /// Pending inbound responses for previously sent requests on this /// connection. - pending_inbound_responses: HashSet, + pending_inbound_responses: HashSet, } impl Connection { - fn new(id: ConnectionId, address: Option) -> Self { + fn new(id: ConnectionId, remote_address: Option) -> Self { Self { id, - address, + remote_address, pending_outbound_responses: Default::default(), pending_inbound_responses: Default::default(), } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs new file mode 100644 index 00000000000..2dc82b2e0c5 --- /dev/null +++ b/protocols/request-response/tests/error_reporting.rs @@ -0,0 +1,568 @@ +use anyhow::{bail, Result}; +use async_std::task::sleep; +use async_trait::async_trait; +use futures::prelude::*; +use libp2p_identity::PeerId; +use libp2p_request_response as request_response; +use libp2p_request_response::ProtocolSupport; +use libp2p_swarm::{StreamProtocol, Swarm}; +use libp2p_swarm_test::SwarmExt; +use request_response::{ + Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId, ResponseChannel, +}; +use std::pin::pin; +use std::time::Duration; +use std::{io, iter}; +use tracing_subscriber::EnvFilter; + +#[async_std::test] +async fn report_outbound_failure_on_read_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnReadResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::FailOnReadResponse) + .unwrap(); + + let (peer, req_id_done) = wait_response_sent(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + wait_no_events(&mut swarm1).await; + }; + + // Expects OutboundFailure::Io failure with `FailOnReadResponse` error + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnReadResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnReadResponse" + ); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_outbound_failure_on_write_request() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + let server_task = wait_no_events(&mut swarm1); + + // Expects OutboundFailure::Io failure with `FailOnWriteRequest` error. + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnWriteRequest" + ); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_outbound_timeout_on_read_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnReadResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::TimeoutOnReadResponse) + .unwrap(); + + let (peer, req_id_done) = wait_response_sent(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead + wait_no_events(&mut swarm1).await; + }; + + // Expects OutboundFailure::Timeout + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::TimeoutOnReadResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_failure_on_read_request() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` instead. + let server_task = wait_no_events(&mut swarm1); + + // Expects io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnReadRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + }; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_failure_on_write_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects OutboundFailure::Io failure with `FailOnWriteResponse` error + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnWriteResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::FailOnWriteResponse) + .unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + InboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!( + error.into_inner().unwrap().to_string(), + "FailOnWriteResponse" + ); + }; + + // Expects OutboundFailure::ConnectionClosed or io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::FailOnWriteResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::ConnectionClosed => { + // ConnectionClosed is allowed here because we mainly test the behavior + // of `server_task`. + } + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + }; + + // Keep alive the task, so only `server_task` can finish + wait_no_events(&mut swarm2).await; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[async_std::test] +async fn report_inbound_timeout_on_write_response() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(200)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects InboundFailure::Timeout + let server_task = async move { + let (peer, req_id, action, resp_channel) = wait_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnWriteResponse); + swarm1 + .behaviour_mut() + .send_response(resp_channel, Action::TimeoutOnWriteResponse) + .unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::Timeout)); + }; + + // Expects OutboundFailure::ConnectionClosed or io::ErrorKind::UnexpectedEof + let client_task = async move { + let req_id = swarm2 + .behaviour_mut() + .send_request(&peer1_id, Action::TimeoutOnWriteResponse); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + match error { + OutboundFailure::ConnectionClosed => { + // ConnectionClosed is allowed here because we mainly test the behavior + // of `server_task`. + } + OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof => {} + e => panic!("Unexpected error: {e:?}"), + } + + // Keep alive the task, so only `server_task` can finish + wait_no_events(&mut swarm2).await; + }; + + let server_task = pin!(server_task); + let client_task = pin!(client_task); + futures::future::select(server_task, client_task).await; +} + +#[derive(Clone, Default)] +struct TestCodec; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Action { + FailOnReadRequest, + FailOnReadResponse, + TimeoutOnReadResponse, + FailOnWriteRequest, + FailOnWriteResponse, + TimeoutOnWriteResponse, +} + +impl From for u8 { + fn from(value: Action) -> Self { + match value { + Action::FailOnReadRequest => 0, + Action::FailOnReadResponse => 1, + Action::TimeoutOnReadResponse => 2, + Action::FailOnWriteRequest => 3, + Action::FailOnWriteResponse => 4, + Action::TimeoutOnWriteResponse => 5, + } + } +} + +impl TryFrom for Action { + type Error = io::Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(Action::FailOnReadRequest), + 1 => Ok(Action::FailOnReadResponse), + 2 => Ok(Action::TimeoutOnReadResponse), + 3 => Ok(Action::FailOnWriteRequest), + 4 => Ok(Action::FailOnWriteResponse), + 5 => Ok(Action::TimeoutOnWriteResponse), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid action")), + } + } +} + +#[async_trait] +impl Codec for TestCodec { + type Protocol = StreamProtocol; + type Request = Action; + type Response = Action; + + async fn read_request( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = Vec::new(); + io.read_to_end(&mut buf).await?; + + if buf.is_empty() { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + assert_eq!(buf.len(), 1); + + match buf[0].try_into()? { + Action::FailOnReadRequest => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnReadRequest")) + } + action => Ok(action), + } + } + + async fn read_response( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = Vec::new(); + io.read_to_end(&mut buf).await?; + + if buf.is_empty() { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + assert_eq!(buf.len(), 1); + + match buf[0].try_into()? { + Action::FailOnReadResponse => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnReadResponse")) + } + Action::TimeoutOnReadResponse => loop { + sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn write_request( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match req { + Action::FailOnWriteRequest => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteRequest")) + } + action => { + let bytes = [action.into()]; + io.write_all(&bytes).await?; + Ok(()) + } + } + } + + async fn write_response( + &mut self, + _protocol: &Self::Protocol, + io: &mut T, + res: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match res { + Action::FailOnWriteResponse => { + Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteResponse")) + } + Action::TimeoutOnWriteResponse => loop { + sleep(Duration::MAX).await; + }, + action => { + let bytes = [action.into()]; + io.write_all(&bytes).await?; + Ok(()) + } + } + } +} + +fn new_swarm_with_timeout( + timeout: Duration, +) -> (PeerId, Swarm>) { + let protocols = iter::once((StreamProtocol::new("/test/1"), ProtocolSupport::Full)); + let cfg = request_response::Config::default().with_request_timeout(timeout); + + let swarm = + Swarm::new_ephemeral(|_| request_response::Behaviour::::new(protocols, cfg)); + let peed_id = *swarm.local_peer_id(); + + (peed_id, swarm) +} + +fn new_swarm() -> (PeerId, Swarm>) { + new_swarm_with_timeout(Duration::from_millis(100)) +} + +async fn wait_no_events(swarm: &mut Swarm>) { + loop { + if let Ok(ev) = swarm.select_next_some().await.try_into_behaviour_event() { + panic!("Unexpected event: {ev:?}") + } + } +} + +async fn wait_request( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, Action, ResponseChannel)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::Message { + peer, + message: + request_response::Message::Request { + request_id, + request, + channel, + }, + }) => { + return Ok((peer, request_id, request, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_response_sent( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::ResponseSent { + peer, request_id, .. + }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_inbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, InboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::InboundFailure { + peer, + request_id, + error, + }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +async fn wait_outbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, OutboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(request_response::Event::OutboundFailure { + peer, + request_id, + error, + }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 48860b5887f..b9e7878a78b 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -20,31 +20,35 @@ //! Integration tests for the `Behaviour`. -use async_trait::async_trait; -use futures::{prelude::*, AsyncWriteExt}; -use libp2p_core::upgrade::{read_length_prefixed, write_length_prefixed}; +use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_request_response as request_response; use libp2p_request_response::ProtocolSupport; use libp2p_swarm::{StreamProtocol, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use rand::{self, Rng}; +use serde::{Deserialize, Serialize}; use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] +#[cfg(feature = "cbor")] async fn is_response_outbound() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ping = Ping("ping".to_string().into_bytes()); let offline_peer = PeerId::random(); - let protocols = iter::once(( - StreamProtocol::new("/ping/1"), - request_response::ProtocolSupport::Full, - )); - let cfg = request_response::Config::default(); - - let mut swarm1 = - Swarm::new_ephemeral(|_| request_response::Behaviour::new(PingCodec(), protocols, cfg)); + let mut swarm1 = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new( + [( + StreamProtocol::new("/ping/1"), + request_response::ProtocolSupport::Full, + )], + request_response::Config::default(), + ) + }); let request_id1 = swarm1 .behaviour_mut() @@ -79,6 +83,7 @@ async fn is_response_outbound() { /// Exercises a simple ping protocol. #[async_std::test] +#[cfg(feature = "cbor")] async fn ping_protocol() { let ping = Ping("ping".to_string().into_bytes()); let pong = Pong("pong".to_string().into_bytes()); @@ -87,14 +92,15 @@ async fn ping_protocol() { let cfg = request_response::Config::default(); let mut swarm1 = Swarm::new_ephemeral(|_| { - request_response::Behaviour::new(PingCodec(), protocols.clone(), cfg.clone()) + request_response::cbor::Behaviour::::new(protocols.clone(), cfg.clone()) }); let peer1_id = *swarm1.local_peer_id(); - let mut swarm2 = - Swarm::new_ephemeral(|_| request_response::Behaviour::new(PingCodec(), protocols, cfg)); + let mut swarm2 = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new(protocols, cfg) + }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let expected_ping = ping.clone(); @@ -171,6 +177,7 @@ async fn ping_protocol() { } #[async_std::test] +#[cfg(feature = "cbor")] async fn emits_inbound_connection_closed_failure() { let ping = Ping("ping".to_string().into_bytes()); @@ -178,14 +185,15 @@ async fn emits_inbound_connection_closed_failure() { let cfg = request_response::Config::default(); let mut swarm1 = Swarm::new_ephemeral(|_| { - request_response::Behaviour::new(PingCodec(), protocols.clone(), cfg.clone()) + request_response::cbor::Behaviour::::new(protocols.clone(), cfg.clone()) }); let peer1_id = *swarm1.local_peer_id(); - let mut swarm2 = - Swarm::new_ephemeral(|_| request_response::Behaviour::new(PingCodec(), protocols, cfg)); + let mut swarm2 = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new(protocols, cfg) + }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); @@ -234,6 +242,7 @@ async fn emits_inbound_connection_closed_failure() { /// If the substream were not properly closed when dropped, the sender would instead /// run into a timeout waiting for the response. #[async_std::test] +#[cfg(feature = "cbor")] async fn emits_inbound_connection_closed_if_channel_is_dropped() { let ping = Ping("ping".to_string().into_bytes()); @@ -241,14 +250,15 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { let cfg = request_response::Config::default(); let mut swarm1 = Swarm::new_ephemeral(|_| { - request_response::Behaviour::new(PingCodec(), protocols.clone(), cfg.clone()) + request_response::cbor::Behaviour::::new(protocols.clone(), cfg.clone()) }); let peer1_id = *swarm1.local_peer_id(); - let mut swarm2 = - Swarm::new_ephemeral(|_| request_response::Behaviour::new(PingCodec(), protocols, cfg)); + let mut swarm2 = Swarm::new_ephemeral(|_| { + request_response::cbor::Behaviour::::new(protocols, cfg) + }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); @@ -281,81 +291,14 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { e => panic!("unexpected event from peer 2: {e:?}"), }; - assert_eq!(error, request_response::OutboundFailure::ConnectionClosed); + assert!(matches!( + error, + request_response::OutboundFailure::Io(e) if e.kind() == io::ErrorKind::UnexpectedEof, + )); } // Simple Ping-Pong Protocol - -#[derive(Clone)] -struct PingCodec(); -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct Ping(Vec); -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] struct Pong(Vec); - -#[async_trait] -impl libp2p_request_response::Codec for PingCodec { - type Protocol = StreamProtocol; - type Request = Ping; - type Response = Pong; - - async fn read_request(&mut self, _: &StreamProtocol, io: &mut T) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let vec = read_length_prefixed(io, 1024).await?; - - if vec.is_empty() { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - - Ok(Ping(vec)) - } - - async fn read_response( - &mut self, - _: &StreamProtocol, - io: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let vec = read_length_prefixed(io, 1024).await?; - - if vec.is_empty() { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - - Ok(Pong(vec)) - } - - async fn write_request( - &mut self, - _: &StreamProtocol, - io: &mut T, - Ping(data): Ping, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, data).await?; - io.close().await?; - - Ok(()) - } - - async fn write_response( - &mut self, - _: &StreamProtocol, - io: &mut T, - Pong(data): Pong, - ) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_length_prefixed(io, data).await?; - io.close().await?; - - Ok(()) - } -} diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md new file mode 100644 index 00000000000..806ed2e11d5 --- /dev/null +++ b/protocols/upnp/CHANGELOG.md @@ -0,0 +1,14 @@ +## 0.2.0 + + +## 0.1.1 + +- Fix high CPU usage due to repeated generation of failure events. + See [PR 4569](https://github.com/libp2p/rust-libp2p/pull/4569). + +- Fix port mapping protocol used for a UDP multiaddress. + See [PR 4542](https://github.com/libp2p/rust-libp2p/pull/4542). + +## 0.1.0 + +- Initial version diff --git a/transports/wasm-ext/Cargo.toml b/protocols/upnp/Cargo.toml similarity index 53% rename from transports/wasm-ext/Cargo.toml rename to protocols/upnp/Cargo.toml index 98fc25362c4..2118dd4a728 100644 --- a/transports/wasm-ext/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -1,27 +1,32 @@ [package] -name = "libp2p-wasm-ext" +name = "libp2p-upnp" edition = "2021" -rust-version = { workspace = true } -description = "Allows passing in an external transport in a WASM environment" -version = "0.40.0" -authors = ["Pierre Krieger "] +rust-version = "1.60.0" +description = "UPnP support for libp2p transports" +version = "0.2.0" license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] +publish = true [dependencies] -futures = "0.3.28" -js-sys = "0.3.61" +futures = "0.3.30" +futures-timer = "3.0.2" +igd-next = "0.14.2" libp2p-core = { workspace = true } -parity-send-wrapper = "0.1.0" -wasm-bindgen = "0.2.42" -wasm-bindgen-futures = "0.4.34" +libp2p-swarm = { workspace = true } +tokio = { version = "1.35", default-features = false, features = ["rt"], optional = true } +tracing = "0.1.37" +void = "1.0.2" [features] -websocket = [] +tokio = ["igd-next/aio_tokio", "dep:tokio"] -# Passing arguments to the docsrs builder in order to properly document cfg's. +[lints] +workspace = true + +# Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs new file mode 100644 index 00000000000..a94ef9526dd --- /dev/null +++ b/protocols/upnp/src/behaviour.rs @@ -0,0 +1,547 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::{ + borrow::Borrow, + collections::{HashMap, VecDeque}, + error::Error, + hash::{Hash, Hasher}, + net::{self, IpAddr, SocketAddr, SocketAddrV4}, + ops::{Deref, DerefMut}, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use crate::tokio::{is_addr_global, Gateway}; +use futures::{channel::oneshot, Future, StreamExt}; +use futures_timer::Delay; +use igd_next::PortMappingProtocol; +use libp2p_core::{multiaddr, transport::ListenerId, Endpoint, Multiaddr}; +use libp2p_swarm::{ + derive_prelude::PeerId, dummy, ConnectionDenied, ConnectionId, ExpiredListenAddr, FromSwarm, + NetworkBehaviour, NewListenAddr, ToSwarm, +}; + +/// The duration in seconds of a port mapping on the gateway. +const MAPPING_DURATION: u32 = 3600; + +/// Renew the Mapping every half of `MAPPING_DURATION` to avoid the port being unmapped. +const MAPPING_TIMEOUT: u64 = MAPPING_DURATION as u64 / 2; + +/// A [`Gateway`] Request. +#[derive(Debug)] +pub(crate) enum GatewayRequest { + AddMapping { mapping: Mapping, duration: u32 }, + RemoveMapping(Mapping), +} + +/// A [`Gateway`] event. +#[derive(Debug)] +pub(crate) enum GatewayEvent { + /// Port was successfully mapped. + Mapped(Mapping), + /// There was a failure mapping port. + MapFailure(Mapping, Box), + /// Port was successfully removed. + Removed(Mapping), + /// There was a failure removing the mapped port. + RemovalFailure(Mapping, Box), +} + +/// Mapping of a Protocol and Port on the gateway. +#[derive(Debug, Clone)] +pub(crate) struct Mapping { + pub(crate) listener_id: ListenerId, + pub(crate) protocol: PortMappingProtocol, + pub(crate) multiaddr: Multiaddr, + pub(crate) internal_addr: SocketAddr, +} + +impl Mapping { + /// Given the input gateway address, calculate the + /// open external `Multiaddr`. + fn external_addr(&self, gateway_addr: IpAddr) -> Multiaddr { + let addr = match gateway_addr { + net::IpAddr::V4(ip) => multiaddr::Protocol::Ip4(ip), + net::IpAddr::V6(ip) => multiaddr::Protocol::Ip6(ip), + }; + self.multiaddr + .replace(0, |_| Some(addr)) + .expect("multiaddr should be valid") + } +} + +impl Hash for Mapping { + fn hash(&self, state: &mut H) { + self.listener_id.hash(state); + } +} + +impl PartialEq for Mapping { + fn eq(&self, other: &Self) -> bool { + self.listener_id == other.listener_id + } +} + +impl Eq for Mapping {} + +impl Borrow for Mapping { + fn borrow(&self) -> &ListenerId { + &self.listener_id + } +} + +/// Current state of a [`Mapping`]. +#[derive(Debug)] +enum MappingState { + /// Port mapping is inactive, will be requested or re-requested on the next iteration. + Inactive, + /// Port mapping/removal has been requested on the gateway. + Pending, + /// Port mapping is active with the inner timeout. + Active(Delay), + /// Port mapping failed, we will try again. + Failed, +} + +/// Current state of the UPnP [`Gateway`]. +enum GatewayState { + Searching(oneshot::Receiver>>), + Available(Gateway), + GatewayNotFound, + NonRoutableGateway(IpAddr), +} + +/// The event produced by `Behaviour`. +#[derive(Debug)] +pub enum Event { + /// The multiaddress is reachable externally. + NewExternalAddr(Multiaddr), + /// The renewal of the multiaddress on the gateway failed. + ExpiredExternalAddr(Multiaddr), + /// The IGD gateway was not found. + GatewayNotFound, + /// The Gateway is not exposed directly to the public network. + NonRoutableGateway, +} + +/// A list of port mappings and its state. +#[derive(Debug, Default)] +struct MappingList(HashMap); + +impl Deref for MappingList { + type Target = HashMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for MappingList { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl MappingList { + /// Queue for renewal the current mapped ports on the `Gateway` that are expiring, + /// and try to activate the inactive. + fn renew(&mut self, gateway: &mut Gateway, cx: &mut Context<'_>) { + for (mapping, state) in self.iter_mut() { + match state { + MappingState::Inactive | MappingState::Failed => { + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", + err + ); + } + *state = MappingState::Pending; + } + MappingState::Active(timeout) => { + if Pin::new(timeout).poll(cx).is_ready() { + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", + err + ); + } + } + } + MappingState::Pending => {} + } + } + } +} + +/// A [`NetworkBehaviour`] for UPnP port mapping. Automatically tries to map the external port +/// to an internal address on the gateway on a [`FromSwarm::NewListenAddr`]. +pub struct Behaviour { + /// UPnP interface state. + state: GatewayState, + + /// List of port mappings. + mappings: MappingList, + + /// Pending behaviour events to be emitted. + pending_events: VecDeque, +} + +impl Default for Behaviour { + fn default() -> Self { + Self { + state: GatewayState::Searching(crate::tokio::search_gateway()), + mappings: Default::default(), + pending_events: VecDeque::new(), + } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = dummy::ConnectionHandler; + + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _local_addr: &Multiaddr, + _remote_addr: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _connection_id: ConnectionId, + _peer: PeerId, + _addr: &Multiaddr, + _role_override: Endpoint, + ) -> Result, libp2p_swarm::ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::NewListenAddr(NewListenAddr { + listener_id, + addr: multiaddr, + }) => { + let (addr, protocol) = match multiaddr_to_socketaddr_protocol(multiaddr.clone()) { + Ok(addr_port) => addr_port, + Err(()) => { + tracing::debug!("multiaddress not supported for UPnP {multiaddr}"); + return; + } + }; + + if let Some((mapping, _state)) = self + .mappings + .iter() + .find(|(mapping, _state)| mapping.internal_addr.port() == addr.port()) + { + tracing::debug!( + multiaddress=%multiaddr, + mapped_multiaddress=%mapping.multiaddr, + "port from multiaddress is already being mapped" + ); + return; + } + + match &mut self.state { + GatewayState::Searching(_) => { + // As the gateway is not yet available we add the mapping with `MappingState::Inactive` + // so that when and if it becomes available we map it. + self.mappings.insert( + Mapping { + listener_id, + protocol, + internal_addr: addr, + multiaddr: multiaddr.clone(), + }, + MappingState::Inactive, + ); + } + GatewayState::Available(ref mut gateway) => { + let mapping = Mapping { + listener_id, + protocol, + internal_addr: addr, + multiaddr: multiaddr.clone(), + }; + + let duration = MAPPING_DURATION; + if let Err(err) = gateway.sender.try_send(GatewayRequest::AddMapping { + mapping: mapping.clone(), + duration, + }) { + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", + err + ); + } + + self.mappings.insert(mapping, MappingState::Pending); + } + GatewayState::GatewayNotFound => { + tracing::debug!( + multiaddres=%multiaddr, + "network gateway not found, UPnP port mapping of multiaddres discarded" + ); + } + GatewayState::NonRoutableGateway(addr) => { + tracing::debug!( + multiaddress=%multiaddr, + network_gateway_ip=%addr, + "the network gateway is not exposed to the public network. / + UPnP port mapping of multiaddress discarded" + ); + } + }; + } + FromSwarm::ExpiredListenAddr(ExpiredListenAddr { + listener_id, + addr: _addr, + }) => { + if let GatewayState::Available(ref mut gateway) = &mut self.state { + if let Some((mapping, _state)) = self.mappings.remove_entry(&listener_id) { + if let Err(err) = gateway + .sender + .try_send(GatewayRequest::RemoveMapping(mapping.clone())) + { + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", + err + ); + } + self.mappings.insert(mapping, MappingState::Pending); + } + } + } + _ => {} + } + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: libp2p_swarm::THandlerOutEvent, + ) { + void::unreachable(event) + } + + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll>> { + // If there are pending addresses to be emitted we emit them. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ToSwarm::GenerateEvent(event)); + } + + // Loop through the gateway state so that if it changes from `Searching` to `Available` + // we poll the pending mapping requests. + loop { + match self.state { + GatewayState::Searching(ref mut fut) => match Pin::new(fut).poll(cx) { + Poll::Ready(result) => { + match result.expect("sender shouldn't have been dropped") { + Ok(gateway) => { + if !is_addr_global(gateway.external_addr) { + self.state = + GatewayState::NonRoutableGateway(gateway.external_addr); + tracing::debug!( + gateway_address=%gateway.external_addr, + "the gateway is not routable" + ); + return Poll::Ready(ToSwarm::GenerateEvent( + Event::NonRoutableGateway, + )); + } + self.state = GatewayState::Available(gateway); + } + Err(err) => { + tracing::debug!("could not find gateway: {err}"); + self.state = GatewayState::GatewayNotFound; + return Poll::Ready(ToSwarm::GenerateEvent(Event::GatewayNotFound)); + } + } + } + Poll::Pending => return Poll::Pending, + }, + GatewayState::Available(ref mut gateway) => { + // Poll pending mapping requests. + if let Poll::Ready(Some(result)) = gateway.receiver.poll_next_unpin(cx) { + match result { + GatewayEvent::Mapped(mapping) => { + let new_state = MappingState::Active(Delay::new( + Duration::from_secs(MAPPING_TIMEOUT), + )); + + match self + .mappings + .insert(mapping.clone(), new_state) + .expect("mapping should exist") + { + MappingState::Pending => { + let external_multiaddr = + mapping.external_addr(gateway.external_addr); + self.pending_events.push_back(Event::NewExternalAddr( + external_multiaddr.clone(), + )); + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully mapped UPnP for protocol" + ); + return Poll::Ready(ToSwarm::ExternalAddrConfirmed( + external_multiaddr, + )); + } + MappingState::Active(_) => { + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully renewed UPnP mapping for protocol" + ); + } + _ => unreachable!(), + } + } + GatewayEvent::MapFailure(mapping, err) => { + match self + .mappings + .insert(mapping.clone(), MappingState::Failed) + .expect("mapping should exist") + { + MappingState::Active(_) => { + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to remap UPnP mapped for protocol: {err}" + ); + let external_multiaddr = + mapping.external_addr(gateway.external_addr); + self.pending_events.push_back(Event::ExpiredExternalAddr( + external_multiaddr.clone(), + )); + return Poll::Ready(ToSwarm::ExternalAddrExpired( + external_multiaddr, + )); + } + MappingState::Pending => { + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to map UPnP mapped for protocol: {err}" + ); + } + _ => { + unreachable!() + } + } + } + GatewayEvent::Removed(mapping) => { + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully removed UPnP mapping for protocol" + ); + self.mappings + .remove(&mapping) + .expect("mapping should exist"); + } + GatewayEvent::RemovalFailure(mapping, err) => { + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "could not remove UPnP mapping for protocol: {err}" + ); + if let Err(err) = gateway + .sender + .try_send(GatewayRequest::RemoveMapping(mapping.clone())) + { + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", + err + ); + } + } + } + } + + // Renew expired and request inactive mappings. + self.mappings.renew(gateway, cx); + return Poll::Pending; + } + _ => return Poll::Pending, + } + } + } +} + +/// Extracts a [`SocketAddrV4`] and [`PortMappingProtocol`] from a given [`Multiaddr`]. +/// +/// Fails if the given [`Multiaddr`] does not begin with an IP +/// protocol encapsulating a TCP or UDP port. +fn multiaddr_to_socketaddr_protocol( + addr: Multiaddr, +) -> Result<(SocketAddr, PortMappingProtocol), ()> { + let mut iter = addr.into_iter(); + match iter.next() { + // Idg only supports Ipv4. + Some(multiaddr::Protocol::Ip4(ipv4)) if ipv4.is_private() => match iter.next() { + Some(multiaddr::Protocol::Tcp(port)) => { + return Ok(( + SocketAddr::V4(SocketAddrV4::new(ipv4, port)), + PortMappingProtocol::TCP, + )); + } + Some(multiaddr::Protocol::Udp(port)) => { + return Ok(( + SocketAddr::V4(SocketAddrV4::new(ipv4, port)), + PortMappingProtocol::UDP, + )); + } + _ => {} + }, + _ => {} + } + Err(()) +} diff --git a/protocols/upnp/src/lib.rs b/protocols/upnp/src/lib.rs new file mode 100644 index 00000000000..8a74d7e8f63 --- /dev/null +++ b/protocols/upnp/src/lib.rs @@ -0,0 +1,37 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Implementation of UPnP port mapping for libp2p. +//! +//! This crate provides a `tokio::Behaviour` which +//! implements the [`libp2p_swarm::NetworkBehaviour`] trait. +//! This struct will automatically try to map the ports externally to internal +//! addresses on the gateway. +//! + +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +#[cfg(feature = "tokio")] +mod behaviour; +#[cfg(feature = "tokio")] +pub mod tokio; + +#[cfg(feature = "tokio")] +pub use behaviour::Event; diff --git a/protocols/upnp/src/tokio.rs b/protocols/upnp/src/tokio.rs new file mode 100644 index 00000000000..c6a40182b33 --- /dev/null +++ b/protocols/upnp/src/tokio.rs @@ -0,0 +1,169 @@ +// Copyright 2023 Protocol Labs. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{error::Error, net::IpAddr}; + +use crate::behaviour::{GatewayEvent, GatewayRequest}; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, StreamExt, +}; +use igd_next::SearchOptions; + +pub use crate::behaviour::Behaviour; + +//TODO: remove when `IpAddr::is_global` stabilizes. +pub(crate) fn is_addr_global(addr: IpAddr) -> bool { + match addr { + IpAddr::V4(ip) => { + !(ip.octets()[0] == 0 // "This network" + || ip.is_private() + // code for Ipv4::is_shared() + || (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) + || ip.is_loopback() + || ip.is_link_local() + // addresses reserved for future protocols (`192.0.0.0/24`) + ||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0) + || ip.is_documentation() + // code for Ipv4::is_benchmarking() + || (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) + // code for Ipv4::is_reserved() + || (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) + || ip.is_broadcast()) + } + IpAddr::V6(ip) => { + !(ip.is_unspecified() + || ip.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b)) + )) + // code for Ipv4::is_documentation() + || (ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8) + // code for Ipv4::is_unique_local() + || (ip.segments()[0] & 0xfe00) == 0xfc00 + // code for Ipv4::is_unicast_link_local() + || (ip.segments()[0] & 0xffc0) == 0xfe80) + } + } +} + +/// Interface that interacts with the inner gateway by messages, +/// `GatewayRequest`s and `GatewayEvent`s. +#[derive(Debug)] +pub(crate) struct Gateway { + pub(crate) sender: mpsc::Sender, + pub(crate) receiver: mpsc::Receiver, + pub(crate) external_addr: IpAddr, +} + +pub(crate) fn search_gateway() -> oneshot::Receiver>> { + let (search_result_sender, search_result_receiver) = oneshot::channel(); + + let (events_sender, mut task_receiver) = mpsc::channel(10); + let (mut task_sender, events_queue) = mpsc::channel(0); + + tokio::spawn(async move { + let gateway = match igd_next::aio::tokio::search_gateway(SearchOptions::default()).await { + Ok(gateway) => gateway, + Err(err) => { + search_result_sender + .send(Err(err.into())) + .expect("receiver shouldn't have been dropped"); + return; + } + }; + + let external_addr = match gateway.get_external_ip().await { + Ok(addr) => addr, + Err(err) => { + search_result_sender + .send(Err(err.into())) + .expect("receiver shouldn't have been dropped"); + return; + } + }; + + search_result_sender + .send(Ok(Gateway { + sender: events_sender, + receiver: events_queue, + external_addr, + })) + .expect("receiver shouldn't have been dropped"); + + loop { + // The task sender has dropped so we can return. + let Some(req) = task_receiver.next().await else { + return; + }; + let event = match req { + GatewayRequest::AddMapping { mapping, duration } => { + let gateway = gateway.clone(); + match gateway + .add_port( + mapping.protocol, + mapping.internal_addr.port(), + mapping.internal_addr, + duration, + "rust-libp2p mapping", + ) + .await + { + Ok(()) => GatewayEvent::Mapped(mapping), + Err(err) => GatewayEvent::MapFailure(mapping, err.into()), + } + } + GatewayRequest::RemoveMapping(mapping) => { + let gateway = gateway.clone(); + match gateway + .remove_port(mapping.protocol, mapping.internal_addr.port()) + .await + { + Ok(()) => GatewayEvent::Removed(mapping), + Err(err) => GatewayEvent::RemovalFailure(mapping, err.into()), + } + } + }; + task_sender + .send(event) + .await + .expect("receiver should be available"); + } + }); + + search_result_receiver +} diff --git a/scripts/add-changelog-header.sh b/scripts/add-changelog-header.sh new file mode 100755 index 00000000000..4717940c8d7 --- /dev/null +++ b/scripts/add-changelog-header.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +header=$(head -n 1 "$CRATE_ROOT/CHANGELOG.md") +prefix="## $NEW_VERSION" + +if [[ $header == $prefix* ]]; then + exit +fi + +sed -i "1i ## ${NEW_VERSION}\n\n" "$CRATE_ROOT/CHANGELOG.md" diff --git a/scripts/build-interop-image.sh b/scripts/build-interop-image.sh new file mode 100755 index 00000000000..28a8db9188d --- /dev/null +++ b/scripts/build-interop-image.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# This uses the same S3 cache as all test-plans images. Because we use `cargo-chef` in the Dockerfile, we have a layer available with all dependencies built. + +CACHE_TO="" + +# If we have credentials, write to cache +if [[ -n "${AWS_SECRET_ACCESS_KEY}" ]]; then + CACHE_TO="--cache-to type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head" +fi + +docker buildx build \ + --load \ + $CACHE_TO \ + --cache-from type=s3,mode=max,bucket=libp2p-by-tf-aws-bootstrap,region=us-east-1,prefix=buildCache,name=${FLAVOUR}-rust-libp2p-head \ + -t ${FLAVOUR}-rust-libp2p-head \ + . \ + -f interop-tests/Dockerfile.${FLAVOUR} diff --git a/scripts/ensure-version-bump-and-changelog.sh b/scripts/ensure-version-bump-and-changelog.sh new file mode 100755 index 00000000000..a7a0992005a --- /dev/null +++ b/scripts/ensure-version-bump-and-changelog.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -ex; + +MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') +DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") + +MERGE_BASE=$(git merge-base "$HEAD_SHA" "$PR_BASE") # Find the merge base. This ensures we only diff what was actually added in the PR. + +SRC_DIFF_TO_BASE=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") +CHANGELOG_DIFF=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-only -- "$DIR_TO_CRATE/CHANGELOG.md") + +# If the source files of this crate weren't touched in this PR, exit early. +if [ -z "$SRC_DIFF_TO_BASE" ]; then + exit 0; +fi + +# Code was touched, ensure changelog is updated too. +if [ -z "$CHANGELOG_DIFF" ]; then + echo "Files in $DIR_TO_CRATE have changed, please write a changelog entry in $DIR_TO_CRATE/CHANGELOG.md" + exit 1 +fi + +# Code was touched, ensure the version used in the manifest hasn't been released yet. +if git tag | grep -q "^$CRATE-v${CRATE_VERSION}$"; then + echo "v$CRATE_VERSION of '$CRATE' has already been released, please bump the version." + exit 1 +fi diff --git a/scripts/list-external-contributors.sh b/scripts/list-external-contributors.sh new file mode 100755 index 00000000000..baf3ee032bf --- /dev/null +++ b/scripts/list-external-contributors.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Usage: ./scripts/list-external-contributors.sh + +set -e + +date_of_tag=$(git log -1 --format=%aI --date=iso-strict $1) +unique_authors=$(gh api "repos/libp2p/rust-libp2p/commits?since=$date_of_tag" --paginate -q '.[].author.login' | sort -u) +rust_libp2p_maintainers_team_members=$(gh api teams/6797340/members --paginate | jq -r '.[].login' | sort -u) + +echo "$unique_authors" | grep -vxF -f <(echo "$rust_libp2p_maintainers_team_members") | grep -vF "bot" | grep -vF "web-flow" diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 9628e7ea777..74f35806eec 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -1,9 +1,30 @@ -## 0.33.0 - unreleased +## 0.34.2 + +- Restore support for generic constraints on behaviours combined with `out_event` generated by `NetworkBehaviour` where no where clause is used. + See [PR 5003](https://github.com/libp2p/rust-libp2p/pull/5003). + +## 0.34.1 + +- Always forward all variants of `FromSwarm`. + See [PR 4825](https://github.com/libp2p/rust-libp2p/pull/4825). + +## 0.34.0 + +- Adapt to interface changes in `libp2p-swarm`. + See [PR 4706](https://github.com/libp2p/rust-libp2p/pull/4076). +- Remove supported for deprecated `#[behaviour(out_event = "...")]`. + To same functionality is available using `#[behaviour(to_swarm = "...")]` + See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). + +## 0.33.0 - Raise MSRV to 1.65. See [PR 3715]. +- Rename `out_event` to `to_swarm` and deprecate `out_event`. See [PR 3848]. + [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3848]: https://github.com/libp2p/rust-libp2p/pull/3848 ## 0.32.0 diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 8d31a389ae5..7740d57feb1 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm-derive" edition = "2021" rust-version = { workspace = true } description = "Procedural macros of libp2p-swarm" -version = "0.33.0" +version = "0.34.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,7 +16,8 @@ proc-macro = true [dependencies] heck = "0.4" quote = "1.0" -syn = { version = "2.0.15", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } +syn = { version = "2.0.43", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } +proc-macro2 = "1.0" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling @@ -24,3 +25,6 @@ syn = { version = "2.0.15", default-features = false, features = ["clone-impls", all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index 04a32cf91a7..2e7daf7acc4 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -21,43 +21,46 @@ #![recursion_limit = "256"] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod syn_ext; + +use crate::syn_ext::RequireStrLit; use heck::ToUpperCamelCase; use proc_macro::TokenStream; use quote::quote; use syn::punctuated::Punctuated; -use syn::{ - parse_macro_input, Data, DataStruct, DeriveInput, Expr, ExprLit, Lit, Meta, MetaNameValue, - Token, -}; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Meta, Token}; /// Generates a delegating `NetworkBehaviour` implementation for the struct this is used for. See /// the trait documentation for better description. #[proc_macro_derive(NetworkBehaviour, attributes(behaviour))] pub fn hello_macro_derive(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); - build(&ast) + build(&ast).unwrap_or_else(|e| e.to_compile_error().into()) } /// The actual implementation. -fn build(ast: &DeriveInput) -> TokenStream { +fn build(ast: &DeriveInput) -> syn::Result { match ast.data { Data::Struct(ref s) => build_struct(ast, s), - Data::Enum(_) => unimplemented!("Deriving NetworkBehaviour is not implemented for enums"), - Data::Union(_) => unimplemented!("Deriving NetworkBehaviour is not implemented for unions"), + Data::Enum(_) => Err(syn::Error::new_spanned( + ast, + "Cannot derive `NetworkBehaviour` on enums", + )), + Data::Union(_) => Err(syn::Error::new_spanned( + ast, + "Cannot derive `NetworkBehaviour` on union", + )), } } /// The version for structs -fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { +fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result { let name = &ast.ident; let (_, ty_generics, where_clause) = ast.generics.split_for_impl(); let BehaviourAttributes { prelude_path, user_specified_out_event, - } = match parse_attributes(ast) { - Ok(attrs) => attrs, - Err(e) => return e, - }; + } = parse_attributes(ast)?; let multiaddr = quote! { #prelude_path::Multiaddr }; let trait_to_impl = quote! { #prelude_path::NetworkBehaviour }; @@ -67,20 +70,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let proto_select_ident = quote! { #prelude_path::ConnectionHandlerSelect }; let peer_id = quote! { #prelude_path::PeerId }; let connection_id = quote! { #prelude_path::ConnectionId }; - let poll_parameters = quote! { #prelude_path::PollParameters }; let from_swarm = quote! { #prelude_path::FromSwarm }; - let connection_established = quote! { #prelude_path::ConnectionEstablished }; - let address_change = quote! { #prelude_path::AddressChange }; - let connection_closed = quote! { #prelude_path::ConnectionClosed }; - let dial_failure = quote! { #prelude_path::DialFailure }; - let listen_failure = quote! { #prelude_path::ListenFailure }; - let new_listener = quote! { #prelude_path::NewListener }; - let new_listen_addr = quote! { #prelude_path::NewListenAddr }; - let expired_listen_addr = quote! { #prelude_path::ExpiredListenAddr }; - let new_external_addr = quote! { #prelude_path::NewExternalAddr }; - let expired_external_addr = quote! { #prelude_path::ExpiredExternalAddr }; - let listener_error = quote! { #prelude_path::ListenerError }; - let listener_closed = quote! { #prelude_path::ListenerClosed }; let t_handler = quote! { #prelude_path::THandler }; let t_handler_in_event = quote! { #prelude_path::THandlerInEvent }; let t_handler_out_event = quote! { #prelude_path::THandlerOutEvent }; @@ -96,11 +86,11 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { }; let (out_event_name, out_event_definition, out_event_from_clauses) = { - // If we find a `#[behaviour(out_event = "Foo")]` attribute on the - // struct, we set `Foo` as the out event. If not, the `OutEvent` is + // If we find a `#[behaviour(to_swarm = "Foo")]` attribute on the + // struct, we set `Foo` as the out event. If not, the `ToSwarm` is // generated. match user_specified_out_event { - // User provided `OutEvent`. + // User provided `ToSwarm`. Some(name) => { let definition = None; let from_clauses = data_struct @@ -108,12 +98,12 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { .iter() .map(|field| { let ty = &field.ty; - quote! {#name: From< <#ty as #trait_to_impl>::OutEvent >} + quote! {#name: From< <#ty as #trait_to_impl>::ToSwarm >} }) .collect::>(); (name, definition, from_clauses) } - // User did not provide `OutEvent`. Generate it. + // User did not provide `ToSwarm`. Generate it. None => { let enum_name_str = ast.ident.to_string() + "Event"; let enum_name: syn::Type = @@ -135,7 +125,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let enum_variants = fields .clone() - .map(|(variant, ty)| quote! {#variant(<#ty as #trait_to_impl>::OutEvent)}); + .map(|(variant, ty)| quote! {#variant(<#ty as #trait_to_impl>::ToSwarm)}); let visibility = &ast.vis; @@ -146,7 +136,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { let additional_debug = fields .clone() - .map(|(_variant, ty)| quote! { <#ty as #trait_to_impl>::OutEvent : ::core::fmt::Debug }) + .map(|(_variant, ty)| quote! { <#ty as #trait_to_impl>::ToSwarm : ::core::fmt::Debug }) .collect::>(); let where_clause = { @@ -168,11 +158,11 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { .map(|where_clause| quote! {#where_clause, #(#additional_debug),*}); let match_variants = fields.map(|(variant, _ty)| variant); - let msg = format!("`NetworkBehaviour::OutEvent` produced by {name}."); + let msg = format!("`NetworkBehaviour::ToSwarm` produced by {name}."); Some(quote! { #[doc = #msg] - #visibility enum #enum_name #ty_generics + #visibility enum #enum_name #impl_generics #where_clause { #(#enum_variants),* @@ -218,308 +208,18 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { } }; - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ConnectionEstablished` variant. - let on_connection_established_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ConnectionEstablished(#connection_established { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ConnectionEstablished(#connection_established { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::AddressChange variant`. - let on_address_change_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::AddressChange(#address_change { - peer_id, - connection_id, - old, - new, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::AddressChange(#address_change { - peer_id, - connection_id, - old, - new, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ConnectionClosed` variant. - let on_connection_closed_stmts = { - data_struct - .fields - .iter() - .enumerate() - // The outmost handler belongs to the last behaviour. - .rev() - .enumerate() - .map(|(enum_n, (field_n, field))| { - let handler = if field_n == 0 { - // Given that the iterator is reversed, this is the innermost handler only. - quote! { let handler = handlers } - } else { - quote! { - let (handlers, handler) = handlers.into_inner() - } - }; - let inject = match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ConnectionClosed(#connection_closed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::ConnectionClosed(#connection_closed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - })); - }, - }; - - quote! { - #handler; - #inject; - } - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::DialFailure` variant. - let on_dial_failure_stmts = data_struct - .fields - .iter() - .enumerate() - .map(|(enum_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::DialFailure(#dial_failure { - peer_id, - connection_id, - error, - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::DialFailure(#dial_failure { - peer_id, - connection_id, - error, - })); - }, - }); - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenFailure` variant. - let on_listen_failure_stmts = data_struct - .fields - .iter() - .enumerate() - .map(|(enum_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenFailure(#listen_failure { - local_addr, - send_back_addr, - connection_id, - error - })); - }, - None => quote! { - self.#enum_n.on_swarm_event(#from_swarm::ListenFailure(#listen_failure { - local_addr, - send_back_addr, - connection_id, - error - })); - }, - }); - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewListener` variant. - let on_new_listener_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewListener(#new_listener { - listener_id, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewListener(#new_listener { - listener_id, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewListenAddr` variant. - let on_new_listen_addr_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewListenAddr(#new_listen_addr { - listener_id, - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewListenAddr(#new_listen_addr { - listener_id, - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ExpiredListenAddr` variant. - let on_expired_listen_addr_stmts = { + // Build the list of statements to put in the body of `on_swarm_event()`. + let on_swarm_event_stmts = { data_struct .fields .iter() .enumerate() .map(|(field_n, field)| match field.ident { Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ExpiredListenAddr(#expired_listen_addr { - listener_id, - addr, - })); + self.#i.on_swarm_event(event); }, None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ExpiredListenAddr(#expired_listen_addr { - listener_id, - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::NewExternalAddr` variant. - let on_new_external_addr_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::NewExternalAddr(#new_external_addr { - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::NewExternalAddr(#new_external_addr { - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ExpiredExternalAddr` variant. - let on_expired_external_addr_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ExpiredExternalAddr(#expired_external_addr { - addr, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ExpiredExternalAddr(#expired_external_addr { - addr, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenerError` variant. - let on_listener_error_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenerError(#listener_error { - listener_id, - err, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ListenerError(#listener_error { - listener_id, - err, - })); - }, - }) - }; - - // Build the list of statements to put in the body of `on_swarm_event()` - // for the `FromSwarm::ListenerClosed` variant. - let on_listener_closed_stmts = { - data_struct - .fields - .iter() - .enumerate() - .map(|(field_n, field)| match field.ident { - Some(ref i) => quote! { - self.#i.on_swarm_event(#from_swarm::ListenerClosed(#listener_closed { - listener_id, - reason, - })); - }, - None => quote! { - self.#field_n.on_swarm_event(#from_swarm::ListenerClosed(#listener_closed { - listener_id, - reason, - })); + self.#field_n.on_swarm_event(event); }, }) }; @@ -661,67 +361,47 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { // List of statements to put in `poll()`. // // We poll each child one by one and wrap around the output. - let poll_stmts = data_struct.fields.iter().enumerate().map(|(field_n, field)| { - let field = field - .ident - .clone() - .expect("Fields of NetworkBehaviour implementation to be named."); - - let mut wrapped_event = if field_n != 0 { - quote!{ #either_ident::Right(event) } - } else { - quote!{ event } - }; - for _ in 0 .. data_struct.fields.len() - 1 - field_n { - wrapped_event = quote!{ #either_ident::Left(#wrapped_event) }; - } + let poll_stmts = data_struct + .fields + .iter() + .enumerate() + .map(|(field_n, field)| { + let field = field + .ident + .clone() + .expect("Fields of NetworkBehaviour implementation to be named."); + + let mut wrapped_event = if field_n != 0 { + quote! { #either_ident::Right(event) } + } else { + quote! { event } + }; + for _ in 0..data_struct.fields.len() - 1 - field_n { + wrapped_event = quote! { #either_ident::Left(#wrapped_event) }; + } - let generate_event_match_arm = { - // If the `NetworkBehaviour`'s `OutEvent` is generated by the derive macro, wrap the sub - // `NetworkBehaviour` `OutEvent` in the variant of the generated `OutEvent`. If the - // `NetworkBehaviour`'s `OutEvent` is provided by the user, use the corresponding `From` + // If the `NetworkBehaviour`'s `ToSwarm` is generated by the derive macro, wrap the sub + // `NetworkBehaviour` `ToSwarm` in the variant of the generated `ToSwarm`. If the + // `NetworkBehaviour`'s `ToSwarm` is provided by the user, use the corresponding `From` // implementation. - let into_out_event = if out_event_definition.is_some() { - let event_variant: syn::Variant = syn::parse_str( - &field - .to_string() - .to_upper_camel_case() - ).expect("uppercased field name to be a valid enum variant name"); - quote! { #out_event_name::#event_variant(event) } + let map_out_event = if out_event_definition.is_some() { + let event_variant: syn::Variant = + syn::parse_str(&field.to_string().to_upper_camel_case()) + .expect("uppercased field name to be a valid enum variant name"); + quote! { #out_event_name::#event_variant } } else { - quote! { event.into() } + quote! { |e| e.into() } }; - quote! { - std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(event)) => { - return std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(#into_out_event)) - } - } - }; + let map_in_event = quote! { |event| #wrapped_event }; - quote!{ - match #trait_to_impl::poll(&mut self.#field, cx, poll_params) { - #generate_event_match_arm - std::task::Poll::Ready(#network_behaviour_action::Dial { opts }) => { - return std::task::Poll::Ready(#network_behaviour_action::Dial { opts }); - } - std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { peer_id, handler, event }) => { - return std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { - peer_id, - handler, - event: #wrapped_event, - }); - } - std::task::Poll::Ready(#network_behaviour_action::ReportObservedAddr { address, score }) => { - return std::task::Poll::Ready(#network_behaviour_action::ReportObservedAddr { address, score }); - } - std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }) => { - return std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }); + quote! { + match #trait_to_impl::poll(&mut self.#field, cx) { + std::task::Poll::Ready(e) => return std::task::Poll::Ready(e.map_out(#map_out_event).map_in(#map_in_event)), + std::task::Poll::Pending => {}, } - std::task::Poll::Pending => {}, } - } - }); + }); let out_event_reference = if out_event_definition.is_some() { quote! { #out_event_name #ty_generics } @@ -737,7 +417,7 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { #where_clause { type ConnectionHandler = #connection_handler_ty; - type OutEvent = #out_event_reference; + type ToSwarm = #out_event_reference; #[allow(clippy::needless_question_mark)] fn handle_pending_inbound_connection( @@ -795,57 +475,18 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> TokenStream { } } - fn poll(&mut self, cx: &mut std::task::Context, poll_params: &mut impl #poll_parameters) -> std::task::Poll<#network_behaviour_action>> { - use #prelude_path::futures::*; + fn poll(&mut self, cx: &mut std::task::Context) -> std::task::Poll<#network_behaviour_action>> { #(#poll_stmts)* std::task::Poll::Pending } - fn on_swarm_event(&mut self, event: #from_swarm) { - match event { - #from_swarm::ConnectionEstablished( - #connection_established { peer_id, connection_id, endpoint, failed_addresses, other_established }) - => { #(#on_connection_established_stmts)* } - #from_swarm::AddressChange( - #address_change { peer_id, connection_id, old, new }) - => { #(#on_address_change_stmts)* } - #from_swarm::ConnectionClosed( - #connection_closed { peer_id, connection_id, endpoint, handler: handlers, remaining_established }) - => { #(#on_connection_closed_stmts)* } - #from_swarm::DialFailure( - #dial_failure { peer_id, connection_id, error }) - => { #(#on_dial_failure_stmts)* } - #from_swarm::ListenFailure( - #listen_failure { local_addr, send_back_addr, connection_id, error }) - => { #(#on_listen_failure_stmts)* } - #from_swarm::NewListener( - #new_listener { listener_id }) - => { #(#on_new_listener_stmts)* } - #from_swarm::NewListenAddr( - #new_listen_addr { listener_id, addr }) - => { #(#on_new_listen_addr_stmts)* } - #from_swarm::ExpiredListenAddr( - #expired_listen_addr { listener_id, addr }) - => { #(#on_expired_listen_addr_stmts)* } - #from_swarm::NewExternalAddr( - #new_external_addr { addr }) - => { #(#on_new_external_addr_stmts)* } - #from_swarm::ExpiredExternalAddr( - #expired_external_addr { addr }) - => { #(#on_expired_external_addr_stmts)* } - #from_swarm::ListenerError( - #listener_error { listener_id, err }) - => { #(#on_listener_error_stmts)* } - #from_swarm::ListenerClosed( - #listener_closed { listener_id, reason }) - => { #(#on_listener_closed_stmts)* } - _ => {} - } + fn on_swarm_event(&mut self, event: #from_swarm) { + #(#on_swarm_event_stmts)* } } }; - final_quote.into() + Ok(final_quote.into()) } struct BehaviourAttributes { @@ -854,7 +495,7 @@ struct BehaviourAttributes { } /// Parses the `value` of a key=value pair in the `#[behaviour]` attribute into the requested type. -fn parse_attributes(ast: &DeriveInput) -> Result { +fn parse_attributes(ast: &DeriveInput) -> syn::Result { let mut attributes = BehaviourAttributes { prelude_path: syn::parse_quote! { ::libp2p::swarm::derive_prelude }, user_specified_out_event: None, @@ -865,61 +506,21 @@ fn parse_attributes(ast: &DeriveInput) -> Result::parse_terminated) - .expect("`parse_args_with` never fails when parsing nested meta"); + let nested = attr.parse_args_with(Punctuated::::parse_terminated)?; for meta in nested { if meta.path().is_ident("prelude") { - match meta { - Meta::Path(_) => unimplemented!(), - Meta::List(_) => unimplemented!(), - Meta::NameValue(MetaNameValue { - value: - Expr::Lit(ExprLit { - lit: Lit::Str(s), .. - }), - .. - }) => { - attributes.prelude_path = syn::parse_str(&s.value()).unwrap(); - } - Meta::NameValue(name_value) => { - return Err(syn::Error::new_spanned( - name_value.value, - "`prelude` value must be a quoted path", - ) - .to_compile_error() - .into()); - } - } + let value = meta.require_name_value()?.value.require_str_lit()?; + + attributes.prelude_path = syn::parse_str(&value)?; continue; } - if meta.path().is_ident("out_event") { - match meta { - Meta::Path(_) => unimplemented!(), - Meta::List(_) => unimplemented!(), - - Meta::NameValue(MetaNameValue { - value: - Expr::Lit(ExprLit { - lit: Lit::Str(s), .. - }), - .. - }) => { - attributes.user_specified_out_event = - Some(syn::parse_str(&s.value()).unwrap()); - } - Meta::NameValue(name_value) => { - return Err(syn::Error::new_spanned( - name_value.value, - "`out_event` value must be a quoted type", - ) - .to_compile_error() - .into()); - } - } + if meta.path().is_ident("to_swarm") || meta.path().is_ident("out_event") { + let value = meta.require_name_value()?.value.require_str_lit()?; + + attributes.user_specified_out_event = Some(syn::parse_str(&value)?); continue; } diff --git a/swarm-derive/src/syn_ext.rs b/swarm-derive/src/syn_ext.rs new file mode 100644 index 00000000000..d57a8a5dc43 --- /dev/null +++ b/swarm-derive/src/syn_ext.rs @@ -0,0 +1,16 @@ +use syn::{Expr, ExprLit, Lit}; + +pub(crate) trait RequireStrLit { + fn require_str_lit(&self) -> syn::Result; +} + +impl RequireStrLit for Expr { + fn require_str_lit(&self) -> syn::Result { + match self { + Expr::Lit(ExprLit { + lit: Lit::Str(str), .. + }) => Ok(str.value()), + _ => Err(syn::Error::new_spanned(self, "expected a string literal")), + } + } +} diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index b3a1028e768..95223e60272 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -1,4 +1,7 @@ -## 0.2.0 - unreleased +## 0.3.0 + + +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index 2422618c190..cc16a7bccc4 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "libp2p-swarm-test" -version = "0.2.0" +version = "0.3.0" edition = "2021" -rust-version = "1.65.0" +rust-version = { workspace = true } license = "MIT" description = "Test framework for code building on top of libp2p-swarm" repository = "https://github.com/libp2p/rust-libp2p" @@ -12,14 +12,17 @@ categories = ["network-programming", "asynchronous"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-trait = "0.1.68" +async-trait = "0.1.75" libp2p-core = { workspace = true } -libp2p-identity = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } libp2p-plaintext = { workspace = true } -libp2p-swarm = { workspace = true } +libp2p-swarm = { workspace = true, features = ["async-std"] } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } -futures = "0.3.28" -log = "0.4.17" +futures = "0.3.30" rand = "0.8.5" +tracing = "0.1.37" futures-timer = "3.0.2" + +[lints] +workspace = true diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index f93a1efcfe8..48f5bcbf4ef 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -19,21 +19,18 @@ // DEALINGS IN THE SOFTWARE. use async_trait::async_trait; -use futures::future::Either; -use futures::StreamExt; +use futures::future::{BoxFuture, Either}; +use futures::{FutureExt, StreamExt}; use libp2p_core::{ - identity::Keypair, multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, - Multiaddr, Transport, + multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, }; -use libp2p_identity::PeerId; -use libp2p_plaintext::PlainText2Config; +use libp2p_identity::{Keypair, PeerId}; +use libp2p_plaintext as plaintext; use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{ - dial_opts::DialOpts, AddressScore, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, - THandlerErr, -}; +use libp2p_swarm::{self as swarm, dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_yamux as yamux; use std::fmt::Debug; +use std::future::IntoFuture; use std::time::Duration; /// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. @@ -43,18 +40,22 @@ pub trait SwarmExt { /// Create a new [`Swarm`] with an ephemeral identity. /// - /// The swarm will use a [`MemoryTransport`] together with [`PlainText2Config`] authentication layer and - /// yamux as the multiplexer. However, these details should not be relied upon by the test + /// The swarm will use a [`MemoryTransport`] together with a [`plaintext::Config`] authentication layer and + /// [`yamux::Config`] as the multiplexer. However, these details should not be relied upon by the test /// and may change at any time. fn new_ephemeral(behaviour_fn: impl FnOnce(Keypair) -> Self::NB) -> Self where Self: Sized; /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// + /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. + /// By default, this iterator will not yield any addresses. + /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, - ::OutEvent: Debug; + ::ToSwarm: Debug; /// Dial the provided address and wait until a connection has been established. /// @@ -67,27 +68,23 @@ pub trait SwarmExt { /// Wait for specified condition to return `Some`. async fn wait(&mut self, predicate: P) -> E where - P: Fn( - SwarmEvent<::OutEvent, THandlerErr>, - ) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. /// /// The first address is for the memory transport, the second one for the TCP transport. - async fn listen(&mut self) -> (Multiaddr, Multiaddr); + fn listen(&mut self) -> ListenFuture<&mut Self>; /// Returns the next [`SwarmEvent`] or times out after 10 seconds. /// /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::OutEvent, THandlerErr>; + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm>; /// Returns the next behaviour event or times out after 10 seconds. /// /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. - async fn next_behaviour_event(&mut self) -> ::OutEvent; + async fn next_behaviour_event(&mut self) -> ::ToSwarm; async fn loop_on_next(self); } @@ -108,7 +105,7 @@ pub trait SwarmExt { /// This function utilizes the [`TryIntoOutput`] trait. /// Similar as to the number of expected events, the type of event is inferred based on your usage. /// If you match against a [`SwarmEvent`], the first [`SwarmEvent`] will be returned. -/// If you match against your [`NetworkBehaviour::OutEvent`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. +/// If you match against your [`NetworkBehaviour::ToSwarm`] type, [`SwarmEvent`]s which are not [`SwarmEvent::Behaviour`] will be skipped until the [`Swarm`] returns a behaviour event. /// /// You can implement the [`TryIntoOutput`] for any other type to further customize this behaviour. /// @@ -136,11 +133,11 @@ pub async fn drive< ) -> ([Out1; NUM_EVENTS_SWARM_1], [Out2; NUM_EVENTS_SWARM_2]) where TBehaviour2: NetworkBehaviour + Send, - TBehaviour2::OutEvent: Debug, + TBehaviour2::ToSwarm: Debug, TBehaviour1: NetworkBehaviour + Send, - TBehaviour1::OutEvent: Debug, - SwarmEvent>: TryIntoOutput, - SwarmEvent>: TryIntoOutput, + TBehaviour1::ToSwarm: Debug, + SwarmEvent: TryIntoOutput, + SwarmEvent: TryIntoOutput, Out1: Debug, Out2: Debug, { @@ -182,15 +179,15 @@ pub trait TryIntoOutput: Sized { fn try_into_output(self) -> Result; } -impl TryIntoOutput for SwarmEvent { +impl TryIntoOutput for SwarmEvent { fn try_into_output(self) -> Result { self.try_into_behaviour_event() } } -impl TryIntoOutput> - for SwarmEvent +impl TryIntoOutput> + for SwarmEvent { - fn try_into_output(self) -> Result, Self> { + fn try_into_output(self) -> Result, Self> { Ok(self) } } @@ -199,7 +196,7 @@ impl TryIntoOutput SwarmExt for Swarm where B: NetworkBehaviour + Send, - ::OutEvent: Debug, + ::ToSwarm: Debug, { type NB = B; @@ -213,26 +210,26 @@ where let transport = MemoryTransport::default() .or_transport(libp2p_tcp::async_io::Transport::default()) .upgrade(Version::V1) - .authenticate(PlainText2Config { - local_public_key: identity.public(), - }) + .authenticate(plaintext::Config::new(&identity)) .multiplex(yamux::Config::default()) .timeout(Duration::from_secs(20)) .boxed(); - SwarmBuilder::without_executor(transport, behaviour_fn(identity), peer_id).build() + Swarm::new( + transport, + behaviour_fn(identity), + peer_id, + swarm::Config::with_async_std_executor() + .with_idle_connection_timeout(Duration::from_secs(5)), // Some tests need connections to be kept alive beyond what the individual behaviour configures., + ) } async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, - ::OutEvent: Debug, + ::ToSwarm: Debug, { - let external_addresses = other - .external_addresses() - .cloned() - .map(|r| r.addr) - .collect(); + let external_addresses = other.external_addresses().cloned().collect(); let dial_opts = DialOpts::peer_id(*other.local_peer_id()) .addresses(external_addresses) @@ -253,10 +250,16 @@ where listener_done = true; } Either::Left((other, _)) => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); } Either::Right((other, _)) => { - log::debug!("Ignoring event from listener {:?}", other); + tracing::debug!( + listener=?other, + "Ignoring event from listener" + ); } } @@ -274,7 +277,10 @@ where endpoint, peer_id, .. } => (endpoint.get_remote_address() == &addr).then_some(peer_id), other => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); None } }) @@ -283,7 +289,7 @@ where async fn wait(&mut self, predicate: P) -> E where - P: Fn(SwarmEvent<::OutEvent, THandlerErr>) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send, { loop { @@ -294,58 +300,15 @@ where } } - async fn listen(&mut self) -> (Multiaddr, Multiaddr) { - let memory_addr_listener_id = self.listen_on(Protocol::Memory(0).into()).unwrap(); - - // block until we are actually listening - let memory_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == memory_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // Memory addresses are externally reachable because they all share the same memory-space. - self.add_external_address(memory_multiaddr.clone(), AddressScore::Infinite); - - let tcp_addr_listener_id = self - .listen_on("/ip4/0.0.0.0/tcp/0".parse().unwrap()) - .unwrap(); - - let tcp_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == tcp_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // We purposely don't add the TCP addr as an external one because we want to only use the memory transport for making connections in here. - // The TCP transport is only supported for protocols that manage their own connections. - - (memory_multiaddr, tcp_multiaddr) + fn listen(&mut self) -> ListenFuture<&mut Self> { + ListenFuture { + add_memory_external: false, + add_tcp_external: false, + swarm: self, + } } - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::OutEvent, THandlerErr> { + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm> { match futures::future::select( futures_timer::Delay::new(Duration::from_secs(10)), self.select_next_some(), @@ -354,14 +317,14 @@ where { Either::Left(((), _)) => panic!("Swarm did not emit an event within 10s"), Either::Right((event, _)) => { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); event } } } - async fn next_behaviour_event(&mut self) -> ::OutEvent { + async fn next_behaviour_event(&mut self) -> ::ToSwarm { loop { if let Ok(event) = self.next_swarm_event().await.try_into_behaviour_event() { return event; @@ -371,7 +334,91 @@ where async fn loop_on_next(mut self) { while let Some(event) = self.next().await { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); + } + } +} + +pub struct ListenFuture { + add_memory_external: bool, + add_tcp_external: bool, + swarm: S, +} + +impl ListenFuture { + /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_memory_addr_external(mut self) -> Self { + self.add_memory_external = true; + + self + } + + /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_tcp_addr_external(mut self) -> Self { + self.add_tcp_external = true; + + self + } +} + +impl<'s, B> IntoFuture for ListenFuture<&'s mut Swarm> +where + B: NetworkBehaviour + Send, + ::ToSwarm: Debug, +{ + type Output = (Multiaddr, Multiaddr); + type IntoFuture = BoxFuture<'s, Self::Output>; + + fn into_future(self) -> Self::IntoFuture { + async move { + let swarm = self.swarm; + + let memory_addr_listener_id = swarm.listen_on(Protocol::Memory(0).into()).unwrap(); + + // block until we are actually listening + let memory_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == memory_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + let tcp_addr_listener_id = swarm + .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + .unwrap(); + + let tcp_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == tcp_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + if self.add_memory_external { + swarm.add_external_address(memory_multiaddr.clone()); + } + if self.add_tcp_external { + swarm.add_external_address(tcp_multiaddr.clone()); + } + + (memory_multiaddr, tcp_multiaddr) } + .boxed() } } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 365c0e771c2..65dce4b002a 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,4 +1,107 @@ -## 0.43.0 - unreleased +## 0.44.1 + +- Implement `Clone` & `Copy` for `FromSwarm. + This makes it easier to forward these events when wrapping other behaviours. + See [PR 4825](https://github.com/libp2p/rust-libp2p/pull/4825). + +## 0.44.0 + +- Add `#[non_exhaustive]` to `FromSwarm`, `ToSwarm`, `SwarmEvent`, `ConnectionHandlerEvent`, `ConnectionEvent`. + See [PR 4581](https://github.com/libp2p/rust-libp2p/pull/4581). +- Remove `handler` field from `ConnectionClosed`. + If you need to transfer state from a `ConnectionHandler` to its `NetworkBehaviour` when a connection closes, use `ConnectionHandler::poll_close`. + See [PR 4076](https://github.com/libp2p/rust-libp2p/pull/4076). +- Remove deprecated `PollParameters` from `NetworkBehaviour::poll` function. + See [PR 4490](https://github.com/libp2p/rust-libp2p/pull/4490). +- Remove deprecated `ConnectionHandlerEvent::Close` and `ConnectionHandler::Error`. + `ConnectionHandler`s should not close connections directly as the connection might still be in use by other handlers. + See [PR 4755](https://github.com/libp2p/rust-libp2p/pull/4755). +- Add `PeerCondition::DisconnectedAndNotDialing` variant, combining pre-existing conditions. + This is the new default. + A new dialing attempt is iniated _only if_ the peer is both considered disconnected and there is currently no ongoing dialing attempt. + See [PR 4225](https://github.com/libp2p/rust-libp2p/pull/4225). +- Remove deprecated `keep_alive_timeout` in `OneShotHandlerConfig`. + See [PR 4677](https://github.com/libp2p/rust-libp2p/pull/4677). +- Don't close entire connection upon `DialUpgradeError`s within `OneShotHandler`. + Instead, the error is reported as `Err(e)` via `ConnectionHandler::ToBehaviour`. + See [PR 4715](https://github.com/libp2p/rust-libp2p/pull/4715). +- Log `PeerId` of `Swarm` even when constructed with new `SwarmBuilder`. + See [PR 4671](https://github.com/libp2p/rust-libp2p/pull/4671). +- Add `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}` variants. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). +- Remove deprecated symbols. + See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). + +## 0.43.7 + +- Deprecate `ConnectionHandlerEvent::Close`. + See [issue 3591](https://github.com/libp2p/rust-libp2p/issues/3591) for details. + See [PR 4714](https://github.com/libp2p/rust-libp2p/pull/4714). + +## 0.43.6 + +- Deprecate `libp2p::swarm::SwarmBuilder`. + Most users should use `libp2p::SwarmBuilder`. + In some special cases, users may need to use `Swarm::new` and `Config` instead of the new `libp2p::SwarmBuilder`. + See [PR 4120]. +- Make the `Debug` implementation of `StreamProtocol` more concise. + See [PR 4631](https://github.com/libp2p/rust-libp2p/pull/4631). +- Fix overflow in `KeepAlive` computation that could occur panic at `Delay::new` if `SwarmBuilder::idle_connection_timeout` is configured too large. + See [PR 4644](https://github.com/libp2p/rust-libp2p/pull/4644). +- Deprecate `KeepAlive::Until`. + Individual protocols should not keep connections alive for longer than necessary. + Users should use `swarm::Config::idle_connection_timeout` instead. + See [PR 4656](https://github.com/libp2p/rust-libp2p/pull/4656). +- Deprecate `keep_alive_timeout` in `OneShotHandlerConfig`. + See [PR 4680](https://github.com/libp2p/rust-libp2p/pull/4680). + +[PR 4120]: https://github.com/libp2p/rust-libp2p/pull/4120 + +## 0.43.5 + +- Fix overflow in `KeepAlive` computation that could occur if `SwarmBuilder::idle_connection_timeout` is configured with `u64::MAX`. + See [PR 4559](https://github.com/libp2p/rust-libp2p/pull/4559). + +## 0.43.4 + +- Implement `Debug` for event structs. + See [PR 4426]. + +- Improve error message when `DialPeerCondition` prevents a dial. + See [PR 4409]. + +- Introduce `SwarmBuilder::idle_conncetion_timeout` and deprecate `keep_alive::Behaviour` as a result. + See [PR 4161]. + +[PR 4426]: https://github.com/libp2p/rust-libp2p/pull/4426 +[PR 4409]: https://github.com/libp2p/rust-libp2p/pull/4409 +[PR 4161]: https://github.com/libp2p/rust-libp2p/pull/4161 + +## 0.43.3 + +- Implement `Display` for `ConnectionId`. + See [PR 4278]. + +[PR 4278]: https://github.com/libp2p/rust-libp2p/pull/4278 + +## 0.43.2 +- Display the cause of a `ListenError::Denied`. + See [PR 4232] + +[PR 4232]: https://github.com/libp2p/rust-libp2p/pull/4158 + +## 0.43.1 + +- Do not announce external address candidate before address translation, unless translation does not apply. + This will prevent ephemeral TCP addresses being announced as external address candidates. + See [PR 4158]. + +[PR 4158]: https://github.com/libp2p/rust-libp2p/pull/4158 + +## 0.43.0 + +- Allow `NetworkBehaviours` to create and remove listeners. + See [PR 3292]. - Raise MSRV to 1.65. See [PR 3715]. @@ -10,9 +113,77 @@ - Return a bool from `ExternalAddresses::on_swarm_event` and `ListenAddresses::on_swarm_event` indicating whether any state was changed. See [PR 3865]. +- Remove deprecated banning API from `Swarm`. + Users should migrate to `libp2p::allow_block_list`. + See [PR 3886]. + +- Remove `ConnectionHandlerUpgrErr::Timer` variant. + This variant was never constructed and thus dead code. + See [PR 3605]. + +- Remove deprecated `IntoConnectionHandler` and all its implementations. + This also removes the `NetworkBehaviour::new_handler` and `NetworkBehaviour::addresses_of_peer` methods. + See changelog for `0.42` on how to migrate. + See [PR 3884]. + +- Remove `ConnectionHandlerUpgrErr::Timer` variant. + This variant was never constructed and thus dead code. + See [PR 3605]. + +- Flatten `ConnectionHandlerUpgrErr` and rename to `StreamUpgradeError`. + See [PR 3882]. + +- Remove deprecated `ConnectionLimits`. + Users should migrate to `libp2p::connection_limits::Behaviour`. + See [PR 3885]. + +- Allow `ConnectionHandler`s to report and learn about the supported protocols on a connection. + The newly introduced API elements are: + - `ConnectionHandlerEvent::ReportRemoteProtocols` + - `ConnectionEvent::LocalProtocolsChange` + - `ConnectionEvent::RemoteProtocolsChange` + + See [PR 3651]. + +- Deprecate the `NegotiatedSubstream` type and replace it with `Stream`. + See [PR 3912]. + +- Rename `NetworkBehaviour::OutEvent` to `NetworkBehaviour::ToSwarm`, `ConnectionHandler::InEvent` to `ConnectionHandler::FromBehaviour`, `ConnectionHandler::OutEvent` to `ConnectionHandler::ToBehaviour`. See [PR 3848]. + +- Remove deprecated `NetworkBehaviourAction` type. + See [PR 3919]. + +- Expose `ConnectionId` on `SwarmEvent::{ConnectionEstablished,ConnectionClosed,IncomingConnection,IncomingConnectionError,OutgoingConnectionError,Dialing}`. + Also emit `SwarmEvent::Dialing` for dials with unknown `PeerId`. + See [PR 3927]. + +- Rename `ConnectionHandlerEvent::Custom` to `ConnectionHandlerEvent::NotifyBehaviour`. See [PR 3955]. + +- Remove `DialError::InvalidPeerId` variant. With the move to `multiaddr` `v0.18.0` peer IDs in `/p2p` are type safe and thus usage of the contained peer ID can not result in a parsing error. + See [PR 4037]. + +- Remove deprecated items. See [PR 3956]. + +- Add ability to `downcast_ref` ConnectionDenied errors. See [PR 4020]. + +[PR 3292]: https://github.com/libp2p/rust-libp2p/pull/3292 +[PR 3605]: https://github.com/libp2p/rust-libp2p/pull/3605 +[PR 3651]: https://github.com/libp2p/rust-libp2p/pull/3651 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 [PR 3746]: https://github.com/libp2p/rust-libp2p/pull/3746 +[PR 3848]: https://github.com/libp2p/rust-libp2p/pull/3848 [PR 3865]: https://github.com/libp2p/rust-libp2p/pull/3865 +[PR 3882]: https://github.com/libp2p/rust-libp2p/pull/3882 +[PR 3884]: https://github.com/libp2p/rust-libp2p/pull/3884 +[PR 3885]: https://github.com/libp2p/rust-libp2p/pull/3885 +[PR 3886]: https://github.com/libp2p/rust-libp2p/pull/3886 +[PR 3912]: https://github.com/libp2p/rust-libp2p/pull/3912 +[PR 3919]: https://github.com/libp2p/rust-libp2p/pull/3919 +[PR 3927]: https://github.com/libp2p/rust-libp2p/pull/3927 +[PR 3955]: https://github.com/libp2p/rust-libp2p/pull/3955 +[PR 3956]: https://github.com/libp2p/rust-libp2p/pull/3956 +[PR 4020]: https://github.com/libp2p/rust-libp2p/pull/4020 +[PR 4037]: https://github.com/libp2p/rust-libp2p/pull/4037 ## 0.42.2 diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 722f3610b95..9de312e76d7 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-swarm" edition = "2021" rust-version = { workspace = true } description = "The libp2p swarm" -version = "0.43.0" +version = "0.44.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,24 +11,26 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -either = "1.6.0" +either = "1.9.0" fnv = "1.0" -futures = "0.3.28" +futures = "0.3.30" futures-timer = "3.0.2" -instant = "0.1.11" +getrandom = { version = "0.2.11", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature +instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-swarm-derive = { workspace = true, optional = true } -log = "0.4" +multistream-select = { workspace = true } +once_cell = "1.19.0" rand = "0.8" -smallvec = "1.6.1" +smallvec = "1.11.2" +tracing = "0.1.37" void = "1" -wasm-bindgen-futures = { version = "0.4.34", optional = true } -getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature +wasm-bindgen-futures = { version = "0.4.39", optional = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } -tokio = { version = "1.28", features = ["rt"], optional = true } +tokio = { version = "1.35", features = ["rt"], optional = true } [features] macros = ["dep:libp2p-swarm-derive"] @@ -38,20 +40,22 @@ wasm-bindgen = ["dep:wasm-bindgen-futures", "dep:getrandom"] [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -either = "1.6.0" -env_logger = "0.10" -futures = "0.3.28" -libp2p-identify = { workspace = true } +either = "1.9.0" +futures = "0.3.30" +libp2p-identify = { path = "../protocols/identify" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. libp2p-identity = { workspace = true, features = ["ed25519"] } -libp2p-kad = { workspace = true } -libp2p-ping = { workspace = true } -libp2p-plaintext = { workspace = true } -libp2p-swarm-derive = { workspace = true } -libp2p-swarm-test = { workspace = true } -libp2p-yamux = { workspace = true } +libp2p-kad = { path = "../protocols/kad" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-ping = { path = "../protocols/ping" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-plaintext = { path = "../transports/plaintext" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-swarm-derive = { path = "../swarm-derive" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-swarm-test = { path = "../swarm-test" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. +libp2p-yamux = { path = "../muxers/yamux" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. quickcheck = { workspace = true } void = "1" -once_cell = "1.17.1" +once_cell = "1.19.0" +trybuild = "1.0.86" +tokio = { version = "1.35.1", features = ["time", "rt", "macros", "rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "swarm_derive" @@ -63,3 +67,6 @@ required-features = ["macros"] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 9fd014bdc47..4be129a4eea 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -28,11 +28,10 @@ pub use listen_addresses::ListenAddresses; use crate::connection::ConnectionId; use crate::dial_opts::DialOpts; -#[allow(deprecated)] -use crate::handler::IntoConnectionHandler; +use crate::listen_opts::ListenOpts; use crate::{ - AddressRecord, AddressScore, ConnectionDenied, DialError, ListenError, THandler, - THandlerInEvent, THandlerOutEvent, + ConnectionDenied, ConnectionHandler, DialError, ListenError, THandler, THandlerInEvent, + THandlerOutEvent, }; use libp2p_core::{transport::ListenerId, ConnectedPoint, Endpoint, Multiaddr}; use libp2p_identity::PeerId; @@ -77,18 +76,16 @@ use std::{task::Context, task::Poll}; /// implementation for the custom `struct`. Each [`NetworkBehaviour`] trait method is simply /// delegated to each `struct` member in the order the `struct` is defined. For example for /// [`NetworkBehaviour::poll`] it will first poll the first `struct` member until it returns -/// [`Poll::Pending`] before moving on to later members. For [`NetworkBehaviour::addresses_of_peer`] -/// it will delegate to each `struct` member and return a concatenated array of all addresses -/// returned by the struct members. +/// [`Poll::Pending`] before moving on to later members. /// -/// Events ([`NetworkBehaviour::OutEvent`]) returned by each `struct` member are wrapped in a new +/// Events ([`NetworkBehaviour::ToSwarm`]) returned by each `struct` member are wrapped in a new /// `enum` event, with an `enum` variant for each `struct` member. Users can define this event -/// `enum` themselves and provide the name to the derive macro via `#[behaviour(out_event = -/// "MyCustomOutEvent")]`. If the user does not specify an `out_event`, the derive macro generates +/// `enum` themselves and provide the name to the derive macro via `#[behaviour(to_swarm = +/// "MyCustomOutEvent")]`. If the user does not specify an `to_swarm`, the derive macro generates /// the event definition itself, naming it `Event`. /// /// The aforementioned conversion of each of the event types generated by the struct members to the -/// custom `out_event` is handled by [`From`] implementations which the user needs to define in +/// custom `to_swarm` is handled by [`From`] implementations which the user needs to define in /// addition to the event `enum` itself. /// /// ``` rust @@ -96,7 +93,7 @@ use std::{task::Context, task::Poll}; /// # use libp2p_ping as ping; /// # use libp2p_swarm_derive::NetworkBehaviour; /// #[derive(NetworkBehaviour)] -/// #[behaviour(out_event = "Event")] +/// #[behaviour(to_swarm = "Event")] /// # #[behaviour(prelude = "libp2p_swarm::derive_prelude")] /// struct MyBehaviour { /// identify: identify::Behaviour, @@ -122,36 +119,10 @@ use std::{task::Context, task::Poll}; /// ``` pub trait NetworkBehaviour: 'static { /// Handler for all the protocols the network behaviour supports. - #[allow(deprecated)] - type ConnectionHandler: IntoConnectionHandler; + type ConnectionHandler: ConnectionHandler; /// Event generated by the `NetworkBehaviour` and that the swarm will report back. - type OutEvent: Send + 'static; - - /// Creates a new [`ConnectionHandler`](crate::ConnectionHandler) for a connection with a peer. - /// - /// Every time an incoming connection is opened, and every time another [`NetworkBehaviour`] - /// emitted a dial request, this method is called. - /// - /// The returned object is a handler for that specific connection, and will be moved to a - /// background task dedicated to that connection. - /// - /// The network behaviour (ie. the implementation of this trait) and the handlers it has spawned - /// (ie. the objects returned by `new_handler`) can communicate by passing messages. Messages - /// sent from the handler to the behaviour are invoked with - /// [`NetworkBehaviour::on_connection_handler_event`], - /// and the behaviour can send a message to the handler by making [`NetworkBehaviour::poll`] - /// return [`ToSwarm::NotifyHandler`]. - /// - /// Note that the handler is returned to the [`NetworkBehaviour`] on connection failure and - /// connection closing. - #[deprecated( - since = "0.42.0", - note = "Use one or more of `NetworkBehaviour::{handle_pending_inbound_connection,handle_established_inbound_connection,handle_pending_outbound_connection,handle_established_outbound_connection}` instead." - )] - fn new_handler(&mut self) -> Self::ConnectionHandler { - panic!("You must implement `handle_established_inbound_connection` and `handle_established_outbound_connection`.") - } + type ToSwarm: Send + 'static; /// Callback that is invoked for every new inbound connection. /// @@ -173,7 +144,7 @@ pub trait NetworkBehaviour: 'static { /// This is invoked once another peer has successfully dialed us. /// /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`](crate::ConnectionHandler). + /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. /// Returning an error will immediately close the connection. fn handle_established_inbound_connection( &mut self, @@ -181,16 +152,7 @@ pub trait NetworkBehaviour: 'static { peer: PeerId, local_addr: &Multiaddr, remote_addr: &Multiaddr, - ) -> Result, ConnectionDenied> { - #[allow(deprecated)] - Ok(self.new_handler().into_handler( - &peer, - &ConnectedPoint::Listener { - local_addr: local_addr.clone(), - send_back_addr: remote_addr.clone(), - }, - )) - } + ) -> Result, ConnectionDenied>; /// Callback that is invoked for every outbound connection attempt. /// @@ -207,23 +169,18 @@ pub trait NetworkBehaviour: 'static { fn handle_pending_outbound_connection( &mut self, _connection_id: ConnectionId, - maybe_peer: Option, + _maybe_peer: Option, _addresses: &[Multiaddr], _effective_role: Endpoint, ) -> Result, ConnectionDenied> { - #[allow(deprecated)] - if let Some(peer_id) = maybe_peer { - Ok(self.addresses_of_peer(&peer_id)) - } else { - Ok(vec![]) - } + Ok(vec![]) } /// Callback that is invoked for every established outbound connection. /// /// This is invoked once we have successfully dialed a peer. /// At this point, we have verified their [`PeerId`] and we know, which particular [`Multiaddr`] succeeded in the dial. - /// In order to actually use this connection, this function must return a [`ConnectionHandler`](crate::ConnectionHandler). + /// In order to actually use this connection, this function must return a [`ConnectionHandler`]. /// Returning an error will immediately close the connection. fn handle_established_outbound_connection( &mut self, @@ -231,32 +188,12 @@ pub trait NetworkBehaviour: 'static { peer: PeerId, addr: &Multiaddr, role_override: Endpoint, - ) -> Result, ConnectionDenied> { - #[allow(deprecated)] - Ok(self.new_handler().into_handler( - &peer, - &ConnectedPoint::Dialer { - address: addr.clone(), - role_override, - }, - )) - } - - /// Addresses that this behaviour is aware of for this specific peer, and that may allow - /// reaching the peer. - /// - /// The addresses will be tried in the order returned by this function, which means that they - /// should be ordered by decreasing likelihood of reachability. In other words, the first - /// address should be the most likely to be reachable. - #[deprecated(note = "Use `NetworkBehaviour::handle_pending_outbound_connection` instead.")] - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - vec![] - } + ) -> Result, ConnectionDenied>; /// Informs the behaviour about an event from the [`Swarm`](crate::Swarm). - fn on_swarm_event(&mut self, event: FromSwarm); + fn on_swarm_event(&mut self, event: FromSwarm); - /// Informs the behaviour about an event generated by the [`ConnectionHandler`](crate::ConnectionHandler) + /// Informs the behaviour about an event generated by the [`ConnectionHandler`] /// dedicated to the peer identified by `peer_id`. for the behaviour. /// /// The [`PeerId`] is guaranteed to be in a connected state. In other words, @@ -272,59 +209,15 @@ pub trait NetworkBehaviour: 'static { /// /// This API mimics the API of the `Stream` trait. The method may register the current task in /// order to wake it up at a later point in time. - fn poll( - &mut self, - cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>>; -} - -/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to. -pub trait PollParameters { - /// Iterator returned by [`supported_protocols`](PollParameters::supported_protocols). - type SupportedProtocolsIter: ExactSizeIterator>; - /// Iterator returned by [`listened_addresses`](PollParameters::listened_addresses). - type ListenedAddressesIter: ExactSizeIterator; - /// Iterator returned by [`external_addresses`](PollParameters::external_addresses). - type ExternalAddressesIter: ExactSizeIterator; - - /// Returns the list of protocol the behaviour supports when a remote negotiates a protocol on - /// an inbound substream. - /// - /// The iterator's elements are the ASCII names as reported on the wire. - /// - /// Note that the list is computed once at initialization and never refreshed. - fn supported_protocols(&self) -> Self::SupportedProtocolsIter; - - /// Returns the list of the addresses we're listening on. - #[deprecated( - since = "0.42.0", - note = "Use `libp2p_swarm::ListenAddresses` instead." - )] - fn listened_addresses(&self) -> Self::ListenedAddressesIter; - - /// Returns the list of the addresses nodes can use to reach us. - #[deprecated( - since = "0.42.0", - note = "Use `libp2p_swarm::ExternalAddresses` instead." - )] - fn external_addresses(&self) -> Self::ExternalAddressesIter; - - /// Returns the peer id of the local node. - #[deprecated( - since = "0.42.0", - note = "Pass the node's `PeerId` into the behaviour instead." - )] - fn local_peer_id(&self) -> &PeerId; + fn poll(&mut self, cx: &mut Context<'_>) + -> Poll>>; } -#[deprecated(note = "Use `ToSwarm` instead.")] -pub type NetworkBehaviourAction = ToSwarm; - /// A command issued from a [`NetworkBehaviour`] for the [`Swarm`]. /// /// [`Swarm`]: super::Swarm #[derive(Debug)] +#[non_exhaustive] pub enum ToSwarm { /// Instructs the `Swarm` to return an event when it is being polled. GenerateEvent(TOutEvent), @@ -339,23 +232,29 @@ pub enum ToSwarm { /// This allows a [`NetworkBehaviour`] to identify a connection that resulted out of its own dial request. Dial { opts: DialOpts }, + /// Instructs the [`Swarm`](crate::Swarm) to listen on the provided address. + ListenOn { opts: ListenOpts }, + + /// Instructs the [`Swarm`](crate::Swarm) to remove the listener. + RemoveListener { id: ListenerId }, + /// Instructs the `Swarm` to send an event to the handler dedicated to a /// connection with a peer. /// - /// If the `Swarm` is connected to the peer, the message is delivered to the - /// [`ConnectionHandler`](crate::ConnectionHandler) instance identified by the peer ID and connection ID. + /// If the `Swarm` is connected to the peer, the message is delivered to the [`ConnectionHandler`] + /// instance identified by the peer ID and connection ID. /// /// If the specified connection no longer exists, the event is silently dropped. /// /// Typically the connection ID given is the same as the one passed to /// [`NetworkBehaviour::on_connection_handler_event`], i.e. whenever the behaviour wishes to /// respond to a request on the same connection (and possibly the same - /// substream, as per the implementation of [`ConnectionHandler`](crate::ConnectionHandler)). + /// substream, as per the implementation of [`ConnectionHandler`]). /// /// Note that even if the peer is currently connected, connections can get closed /// at any time and thus the event may not reach a handler. NotifyHandler { - /// The peer for whom a [`ConnectionHandler`](crate::ConnectionHandler) should be notified. + /// The peer for whom a [`ConnectionHandler`] should be notified. peer_id: PeerId, /// The options w.r.t. which connection handler to notify of the event. handler: NotifyHandler, @@ -363,32 +262,41 @@ pub enum ToSwarm { event: TInEvent, }, - /// Informs the `Swarm` about an address observed by a remote for - /// the local node by which the local node is supposedly publicly - /// reachable. + /// Reports a **new** candidate for an external address to the [`Swarm`](crate::Swarm). /// - /// It is advisable to issue `ReportObservedAddr` actions at a fixed frequency - /// per node. This way address information will be more accurate over time - /// and individual outliers carry less weight. - ReportObservedAddr { - /// The observed address of the local node. - address: Multiaddr, - /// The score to associate with this observation, i.e. - /// an indicator for the trusworthiness of this address - /// relative to other observed addresses. - score: AddressScore, - }, + /// The emphasis on a **new** candidate is important. + /// Protocols MUST take care to only emit a candidate once per "source". + /// For example, the observed address of a TCP connection does not change throughout its lifetime. + /// Thus, only one candidate should be emitted per connection. + /// + /// This makes the report frequency of an address a meaningful data-point for consumers of this event. + /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. + /// + /// This address could come from a variety of sources: + /// - A protocol such as identify obtained it from a remote. + /// - The user provided it based on configuration. + /// - We made an educated guess based on one of our listen addresses. + NewExternalAddrCandidate(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections - /// with the given peer. + /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. /// - /// Note: Closing a connection via - /// [`ToSwarm::CloseConnection`] does not inform the - /// corresponding [`ConnectionHandler`](crate::ConnectionHandler). - /// Closing a connection via a [`ConnectionHandler`](crate::ConnectionHandler) can be done - /// either in a collaborative manner across [`ConnectionHandler`](crate::ConnectionHandler)s - /// with [`ConnectionHandler::connection_keep_alive`](crate::ConnectionHandler::connection_keep_alive) or directly with - /// [`ConnectionHandlerEvent::Close`](crate::ConnectionHandlerEvent::Close). + /// This is intended to be issued in response to a [`FromSwarm::NewExternalAddrCandidate`] if we are indeed externally reachable on this address. + /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + ExternalAddrConfirmed(Multiaddr), + + /// Indicates to the [`Swarm`](crate::Swarm) that we are no longer externally reachable under the provided address. + /// + /// This expires an address that was earlier confirmed via [`ToSwarm::ExternalAddrConfirmed`]. + /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + ExternalAddrExpired(Multiaddr), + + /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. + /// + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. + /// In most cases, stopping to "use" a connection is enough to have it closed. + /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. + /// + /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, @@ -406,6 +314,8 @@ impl ToSwarm { match self { ToSwarm::GenerateEvent(e) => ToSwarm::GenerateEvent(e), ToSwarm::Dial { opts } => ToSwarm::Dial { opts }, + ToSwarm::ListenOn { opts } => ToSwarm::ListenOn { opts }, + ToSwarm::RemoveListener { id } => ToSwarm::RemoveListener { id }, ToSwarm::NotifyHandler { peer_id, handler, @@ -415,9 +325,6 @@ impl ToSwarm { handler, event: f(event), }, - ToSwarm::ReportObservedAddr { address, score } => { - ToSwarm::ReportObservedAddr { address, score } - } ToSwarm::CloseConnection { peer_id, connection, @@ -425,6 +332,9 @@ impl ToSwarm { peer_id, connection, }, + ToSwarm::NewExternalAddrCandidate(addr) => ToSwarm::NewExternalAddrCandidate(addr), + ToSwarm::ExternalAddrConfirmed(addr) => ToSwarm::ExternalAddrConfirmed(addr), + ToSwarm::ExternalAddrExpired(addr) => ToSwarm::ExternalAddrExpired(addr), } } } @@ -435,6 +345,8 @@ impl ToSwarm { match self { ToSwarm::GenerateEvent(e) => ToSwarm::GenerateEvent(f(e)), ToSwarm::Dial { opts } => ToSwarm::Dial { opts }, + ToSwarm::ListenOn { opts } => ToSwarm::ListenOn { opts }, + ToSwarm::RemoveListener { id } => ToSwarm::RemoveListener { id }, ToSwarm::NotifyHandler { peer_id, handler, @@ -444,9 +356,9 @@ impl ToSwarm { handler, event, }, - ToSwarm::ReportObservedAddr { address, score } => { - ToSwarm::ReportObservedAddr { address, score } - } + ToSwarm::NewExternalAddrCandidate(addr) => ToSwarm::NewExternalAddrCandidate(addr), + ToSwarm::ExternalAddrConfirmed(addr) => ToSwarm::ExternalAddrConfirmed(addr), + ToSwarm::ExternalAddrExpired(addr) => ToSwarm::ExternalAddrExpired(addr), ToSwarm::CloseConnection { peer_id, connection, @@ -479,8 +391,9 @@ pub enum CloseConnection { /// Enumeration with the list of the possible events /// to pass to [`on_swarm_event`](NetworkBehaviour::on_swarm_event). -#[allow(deprecated)] -pub enum FromSwarm<'a, Handler: IntoConnectionHandler> { +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub enum FromSwarm<'a> { /// Informs the behaviour about a newly established connection to a peer. ConnectionEstablished(ConnectionEstablished<'a>), /// Informs the behaviour about a closed connection to a peer. @@ -488,7 +401,7 @@ pub enum FromSwarm<'a, Handler: IntoConnectionHandler> { /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID /// and endpoint. - ConnectionClosed(ConnectionClosed<'a, Handler>), + ConnectionClosed(ConnectionClosed<'a>), /// Informs the behaviour that the [`ConnectedPoint`] of an existing /// connection has changed. AddressChange(AddressChange<'a>), @@ -513,14 +426,16 @@ pub enum FromSwarm<'a, Handler: IntoConnectionHandler> { ListenerError(ListenerError<'a>), /// Informs the behaviour that a listener closed. ListenerClosed(ListenerClosed<'a>), - /// Informs the behaviour that we have discovered a new external address for us. - NewExternalAddr(NewExternalAddr<'a>), - /// Informs the behaviour that an external address was removed. - ExpiredExternalAddr(ExpiredExternalAddr<'a>), + /// Informs the behaviour that we have discovered a new candidate for an external address for us. + NewExternalAddrCandidate(NewExternalAddrCandidate<'a>), + /// Informs the behaviour that an external address of the local node was confirmed. + ExternalAddrConfirmed(ExternalAddrConfirmed<'a>), + /// Informs the behaviour that an external address of the local node expired, i.e. is no-longer confirmed. + ExternalAddrExpired(ExternalAddrExpired<'a>), } /// [`FromSwarm`] variant that informs the behaviour about a newly established connection to a peer. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ConnectionEstablished<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, @@ -534,18 +449,17 @@ pub struct ConnectionEstablished<'a> { /// This event is always paired with an earlier /// [`FromSwarm::ConnectionEstablished`] with the same peer ID, connection ID /// and endpoint. -#[allow(deprecated)] -pub struct ConnectionClosed<'a, Handler: IntoConnectionHandler> { +#[derive(Debug, Clone, Copy)] +pub struct ConnectionClosed<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, pub endpoint: &'a ConnectedPoint, - pub handler: ::Handler, pub remaining_established: usize, } /// [`FromSwarm`] variant that informs the behaviour that the [`ConnectedPoint`] of an existing /// connection has changed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct AddressChange<'a> { pub peer_id: PeerId, pub connection_id: ConnectionId, @@ -555,7 +469,7 @@ pub struct AddressChange<'a> { /// [`FromSwarm`] variant that informs the behaviour that the dial to a known /// or unknown node failed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct DialFailure<'a> { pub peer_id: Option, pub error: &'a DialError, @@ -567,7 +481,7 @@ pub struct DialFailure<'a> { /// /// This can include, for example, an error during the handshake of the encryption layer, or the /// connection unexpectedly closed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenFailure<'a> { pub local_addr: &'a Multiaddr, pub send_back_addr: &'a Multiaddr, @@ -576,14 +490,14 @@ pub struct ListenFailure<'a> { } /// [`FromSwarm`] variant that informs the behaviour that a new listener was created. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct NewListener { pub listener_id: ListenerId, } /// [`FromSwarm`] variant that informs the behaviour /// that we have started listening on a new multiaddr. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct NewListenAddr<'a> { pub listener_id: ListenerId, pub addr: &'a Multiaddr, @@ -592,152 +506,40 @@ pub struct NewListenAddr<'a> { /// [`FromSwarm`] variant that informs the behaviour that a multiaddr /// we were listening on has expired, /// which means that we are no longer listening on it. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ExpiredListenAddr<'a> { pub listener_id: ListenerId, pub addr: &'a Multiaddr, } /// [`FromSwarm`] variant that informs the behaviour that a listener experienced an error. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenerError<'a> { pub listener_id: ListenerId, pub err: &'a (dyn std::error::Error + 'static), } /// [`FromSwarm`] variant that informs the behaviour that a listener closed. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct ListenerClosed<'a> { pub listener_id: ListenerId, pub reason: Result<(), &'a std::io::Error>, } -/// [`FromSwarm`] variant that informs the behaviour -/// that we have discovered a new external address for us. -#[derive(Clone, Copy)] -pub struct NewExternalAddr<'a> { +/// [`FromSwarm`] variant that informs the behaviour about a new candidate for an external address for us. +#[derive(Debug, Clone, Copy)] +pub struct NewExternalAddrCandidate<'a> { pub addr: &'a Multiaddr, } -/// [`FromSwarm`] variant that informs the behaviour that an external address was removed. -#[derive(Clone, Copy)] -pub struct ExpiredExternalAddr<'a> { +/// [`FromSwarm`] variant that informs the behaviour that an external address was confirmed. +#[derive(Debug, Clone, Copy)] +pub struct ExternalAddrConfirmed<'a> { pub addr: &'a Multiaddr, } -#[allow(deprecated)] -impl<'a, Handler: IntoConnectionHandler> FromSwarm<'a, Handler> { - fn map_handler( - self, - map_handler: impl FnOnce( - ::Handler, - ) -> ::Handler, - ) -> FromSwarm<'a, NewHandler> - where - NewHandler: IntoConnectionHandler, - { - self.maybe_map_handler(|h| Some(map_handler(h))) - .expect("To return Some as all closures return Some.") - } - - fn maybe_map_handler( - self, - map_handler: impl FnOnce( - ::Handler, - ) -> Option<::Handler>, - ) -> Option> - where - NewHandler: IntoConnectionHandler, - { - match self { - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler, - remaining_established, - }) => Some(FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - connection_id, - endpoint, - handler: map_handler(handler)?, - remaining_established, - })), - FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - }) => Some(FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - connection_id, - endpoint, - failed_addresses, - other_established, - })), - FromSwarm::AddressChange(AddressChange { - peer_id, - connection_id, - old, - new, - }) => Some(FromSwarm::AddressChange(AddressChange { - peer_id, - connection_id, - old, - new, - })), - FromSwarm::DialFailure(DialFailure { - peer_id, - error, - connection_id, - }) => Some(FromSwarm::DialFailure(DialFailure { - peer_id, - error, - connection_id, - })), - FromSwarm::ListenFailure(ListenFailure { - local_addr, - send_back_addr, - connection_id, - error, - }) => Some(FromSwarm::ListenFailure(ListenFailure { - local_addr, - send_back_addr, - connection_id, - error, - })), - FromSwarm::NewListener(NewListener { listener_id }) => { - Some(FromSwarm::NewListener(NewListener { listener_id })) - } - FromSwarm::NewListenAddr(NewListenAddr { listener_id, addr }) => { - Some(FromSwarm::NewListenAddr(NewListenAddr { - listener_id, - addr, - })) - } - FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { - Some(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })) - } - FromSwarm::ListenerError(ListenerError { listener_id, err }) => { - Some(FromSwarm::ListenerError(ListenerError { listener_id, err })) - } - FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - }) => Some(FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - })), - FromSwarm::NewExternalAddr(NewExternalAddr { addr }) => { - Some(FromSwarm::NewExternalAddr(NewExternalAddr { addr })) - } - FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr }) => { - Some(FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr })) - } - } - } +/// [`FromSwarm`] variant that informs the behaviour that an external address was removed. +#[derive(Debug, Clone, Copy)] +pub struct ExternalAddrExpired<'a> { + pub addr: &'a Multiaddr, } diff --git a/swarm/src/behaviour/either.rs b/swarm/src/behaviour/either.rs index bf59949ccfe..25da83fa11f 100644 --- a/swarm/src/behaviour/either.rs +++ b/swarm/src/behaviour/either.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::behaviour::{self, NetworkBehaviour, PollParameters, ToSwarm}; +use crate::behaviour::{self, NetworkBehaviour, ToSwarm}; use crate::connection::ConnectionId; use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; use either::Either; @@ -33,7 +33,7 @@ where R: NetworkBehaviour, { type ConnectionHandler = Either, THandler>; - type OutEvent = Either; + type ToSwarm = Either; fn handle_pending_inbound_connection( &mut self, @@ -122,16 +122,10 @@ where Ok(handler) } - fn on_swarm_event(&mut self, event: behaviour::FromSwarm) { + fn on_swarm_event(&mut self, event: behaviour::FromSwarm) { match self { - Either::Left(b) => b.on_swarm_event(event.map_handler(|h| match h { - Either::Left(h) => h, - Either::Right(_) => unreachable!(), - })), - Either::Right(b) => b.on_swarm_event(event.map_handler(|h| match h { - Either::Right(h) => h, - Either::Left(_) => unreachable!(), - })), + Either::Left(b) => b.on_swarm_event(event), + Either::Right(b) => b.on_swarm_event(event), } } @@ -155,13 +149,12 @@ where fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { let event = match self { - Either::Left(behaviour) => futures::ready!(behaviour.poll(cx, params)) + Either::Left(behaviour) => futures::ready!(behaviour.poll(cx)) .map_out(Either::Left) .map_in(Either::Left), - Either::Right(behaviour) => futures::ready!(behaviour.poll(cx, params)) + Either::Right(behaviour) => futures::ready!(behaviour.poll(cx)) .map_out(Either::Right) .map_in(Either::Right), }; diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 6f1d523ef37..579f46fe486 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -1,8 +1,5 @@ -use crate::behaviour::{ExpiredExternalAddr, FromSwarm, NewExternalAddr}; -#[allow(deprecated)] -use crate::IntoConnectionHandler; +use crate::behaviour::{ExternalAddrConfirmed, ExternalAddrExpired, FromSwarm}; use libp2p_core::Multiaddr; -use std::collections::HashSet; /// The maximum number of local external addresses. When reached any /// further externally reported addresses are ignored. The behaviour always @@ -10,19 +7,9 @@ use std::collections::HashSet; const MAX_LOCAL_EXTERNAL_ADDRS: usize = 20; /// Utility struct for tracking the external addresses of a [`Swarm`](crate::Swarm). -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct ExternalAddresses { - addresses: HashSet, - limit: usize, -} - -impl Default for ExternalAddresses { - fn default() -> Self { - Self { - addresses: Default::default(), - limit: MAX_LOCAL_EXTERNAL_ADDRS, - } - } + addresses: Vec, } impl ExternalAddresses { @@ -31,68 +18,165 @@ impl ExternalAddresses { self.addresses.iter() } + pub fn as_slice(&self) -> &[Multiaddr] { + self.addresses.as_slice() + } + /// Feed a [`FromSwarm`] event to this struct. /// /// Returns whether the event changed our set of external addresses. - #[allow(deprecated)] - pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool - where - THandler: IntoConnectionHandler, - { + pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { match event { - FromSwarm::NewExternalAddr(NewExternalAddr { addr, .. }) => { - if self.addresses.len() < self.limit { - return self.addresses.insert((*addr).clone()); + FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { addr }) => { + if let Some(pos) = self + .addresses + .iter() + .position(|candidate| candidate == *addr) + { + // Refresh the existing confirmed address. + self.addresses.remove(pos); + self.push_front(addr); + + tracing::debug!(address=%addr, "Refreshed external address"); + + return false; // No changes to our external addresses. + } + + self.push_front(addr); + + if self.addresses.len() > MAX_LOCAL_EXTERNAL_ADDRS { + let expired = self.addresses.pop().expect("list to be not empty"); + + tracing::debug!( + external_address=%expired, + address_limit=%MAX_LOCAL_EXTERNAL_ADDRS, + "Removing previously confirmed external address because we reached the address limit" + ); } + + return true; } - FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr, .. }) => { - return self.addresses.remove(addr) + FromSwarm::ExternalAddrExpired(ExternalAddrExpired { + addr: expired_addr, .. + }) => { + let pos = match self + .addresses + .iter() + .position(|candidate| candidate == *expired_addr) + { + None => return false, + Some(p) => p, + }; + + self.addresses.remove(pos); + return true; } _ => {} } false } + + fn push_front(&mut self, addr: &Multiaddr) { + self.addresses.insert(0, addr.clone()); // We have at most `MAX_LOCAL_EXTERNAL_ADDRS` so this isn't very expensive. + } } #[cfg(test)] mod tests { use super::*; - use crate::dummy; use libp2p_core::multiaddr::Protocol; use once_cell::sync::Lazy; + use rand::Rng; #[test] fn new_external_addr_returns_correct_changed_value() { let mut addresses = ExternalAddresses::default(); - let changed = addresses.on_swarm_event(&new_external_addr()); + let changed = addresses.on_swarm_event(&new_external_addr1()); assert!(changed); - let changed = addresses.on_swarm_event(&new_external_addr()); + let changed = addresses.on_swarm_event(&new_external_addr1()); assert!(!changed) } #[test] fn expired_external_addr_returns_correct_changed_value() { let mut addresses = ExternalAddresses::default(); - addresses.on_swarm_event(&new_external_addr()); + addresses.on_swarm_event(&new_external_addr1()); - let changed = addresses.on_swarm_event(&expired_external_addr()); + let changed = addresses.on_swarm_event(&expired_external_addr1()); assert!(changed); - let changed = addresses.on_swarm_event(&expired_external_addr()); + let changed = addresses.on_swarm_event(&expired_external_addr1()); assert!(!changed) } - fn new_external_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { - FromSwarm::NewExternalAddr(NewExternalAddr { addr: &MEMORY_ADDR }) + #[test] + fn more_recent_external_addresses_are_prioritized() { + let mut addresses = ExternalAddresses::default(); + + addresses.on_swarm_event(&new_external_addr1()); + addresses.on_swarm_event(&new_external_addr2()); + + assert_eq!( + addresses.as_slice(), + &[(*MEMORY_ADDR_2000).clone(), (*MEMORY_ADDR_1000).clone()] + ); + } + + #[test] + fn when_pushing_more_than_max_addresses_oldest_is_evicted() { + let mut addresses = ExternalAddresses::default(); + + while addresses.as_slice().len() < MAX_LOCAL_EXTERNAL_ADDRS { + let random_address = + Multiaddr::empty().with(Protocol::Memory(rand::thread_rng().gen_range(0..1000))); + addresses.on_swarm_event(&FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { + addr: &random_address, + })); + } + + addresses.on_swarm_event(&new_external_addr2()); + + assert_eq!(addresses.as_slice().len(), 20); + assert_eq!(addresses.as_slice()[0], (*MEMORY_ADDR_2000).clone()); + } + + #[test] + fn reporting_existing_external_address_moves_it_to_the_front() { + let mut addresses = ExternalAddresses::default(); + + addresses.on_swarm_event(&new_external_addr1()); + addresses.on_swarm_event(&new_external_addr2()); + addresses.on_swarm_event(&new_external_addr1()); + + assert_eq!( + addresses.as_slice(), + &[(*MEMORY_ADDR_1000).clone(), (*MEMORY_ADDR_2000).clone()] + ); + } + + fn new_external_addr1() -> FromSwarm<'static> { + FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { + addr: &MEMORY_ADDR_1000, + }) + } + + fn new_external_addr2() -> FromSwarm<'static> { + FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { + addr: &MEMORY_ADDR_2000, + }) } - fn expired_external_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { - FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr: &MEMORY_ADDR }) + fn expired_external_addr1() -> FromSwarm<'static> { + FromSwarm::ExternalAddrExpired(ExternalAddrExpired { + addr: &MEMORY_ADDR_1000, + }) } - static MEMORY_ADDR: Lazy = + static MEMORY_ADDR_1000: Lazy = Lazy::new(|| Multiaddr::empty().with(Protocol::Memory(1000))); + static MEMORY_ADDR_2000: Lazy = + Lazy::new(|| Multiaddr::empty().with(Protocol::Memory(2000))); } diff --git a/swarm/src/behaviour/listen_addresses.rs b/swarm/src/behaviour/listen_addresses.rs index f2b61582850..6076f5e7923 100644 --- a/swarm/src/behaviour/listen_addresses.rs +++ b/swarm/src/behaviour/listen_addresses.rs @@ -1,6 +1,4 @@ use crate::behaviour::{ExpiredListenAddr, FromSwarm, NewListenAddr}; -#[allow(deprecated)] -use crate::IntoConnectionHandler; use libp2p_core::Multiaddr; use std::collections::HashSet; @@ -19,11 +17,7 @@ impl ListenAddresses { /// Feed a [`FromSwarm`] event to this struct. /// /// Returns whether the event changed our set of listen addresses. - #[allow(deprecated)] - pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool - where - THandler: IntoConnectionHandler, - { + pub fn on_swarm_event(&mut self, event: &FromSwarm) -> bool { match event { FromSwarm::NewListenAddr(NewListenAddr { addr, .. }) => { self.addresses.insert((*addr).clone()) @@ -39,8 +33,7 @@ impl ListenAddresses { #[cfg(test)] mod tests { use super::*; - use crate::dummy; - use libp2p_core::multiaddr::Protocol; + use libp2p_core::{multiaddr::Protocol, transport::ListenerId}; use once_cell::sync::Lazy; #[test] @@ -66,16 +59,16 @@ mod tests { assert!(!changed) } - fn new_listen_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn new_listen_addr() -> FromSwarm<'static> { FromSwarm::NewListenAddr(NewListenAddr { - listener_id: Default::default(), + listener_id: ListenerId::next(), addr: &MEMORY_ADDR, }) } - fn expired_listen_addr() -> FromSwarm<'static, dummy::ConnectionHandler> { + fn expired_listen_addr() -> FromSwarm<'static> { FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id: Default::default(), + listener_id: ListenerId::next(), addr: &MEMORY_ADDR, }) } diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index bd4678a5e58..e81c5343701 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -21,14 +21,12 @@ use crate::behaviour::FromSwarm; use crate::connection::ConnectionId; use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, - ConnectionHandlerUpgrErr, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - KeepAlive, ListenUpgradeError, SubstreamProtocol, + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; use crate::{ - ConnectionDenied, NetworkBehaviour, PollParameters, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, + ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use either::Either; use futures::future; @@ -71,7 +69,7 @@ where TBehaviour: NetworkBehaviour, { type ConnectionHandler = ToggleConnectionHandler>; - type OutEvent = TBehaviour::OutEvent; + type ToSwarm = TBehaviour::ToSwarm; fn handle_pending_inbound_connection( &mut self, @@ -159,11 +157,9 @@ where }) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { if let Some(behaviour) = &mut self.inner { - if let Some(event) = event.maybe_map_handler(|h| h.inner) { - behaviour.on_swarm_event(event); - } + behaviour.on_swarm_event(event); } } @@ -181,10 +177,9 @@ where fn poll( &mut self, cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll>> { if let Some(inner) = self.inner.as_mut() { - inner.poll(cx, params) + inner.poll(cx) } else { Poll::Pending } @@ -252,14 +247,8 @@ where }; let err = match err { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(err) => { - ConnectionHandlerUpgrErr::Upgrade(err.map_err(|err| match err { - Either::Left(e) => e, - Either::Right(v) => void::unreachable(v), - })) - } + Either::Left(e) => e, + Either::Right(v) => void::unreachable(v), }; inner.on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { @@ -273,9 +262,8 @@ impl ConnectionHandler for ToggleConnectionHandler where TInner: ConnectionHandler, { - type InEvent = TInner::InEvent; - type OutEvent = TInner::OutEvent; - type Error = TInner::Error; + type FromBehaviour = TInner::FromBehaviour; + type ToBehaviour = TInner::ToBehaviour; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = TInner::OutboundProtocol; type OutboundOpenInfo = TInner::OutboundOpenInfo; @@ -292,30 +280,25 @@ where } } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { self.inner .as_mut() .expect("Can't receive events if disabled; QED") .on_behaviour_event(event) } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner .as_ref() .map(|h| h.connection_keep_alive()) - .unwrap_or(KeepAlive::No) + .unwrap_or(false) } fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(inner) = self.inner.as_mut() { inner.poll(cx) @@ -368,36 +351,24 @@ where ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } + ConnectionEvent::LocalProtocolsChange(change) => { + if let Some(inner) = self.inner.as_mut() { + inner.on_connection_event(ConnectionEvent::LocalProtocolsChange(change)); + } + } + ConnectionEvent::RemoteProtocolsChange(change) => { + if let Some(inner) = self.inner.as_mut() { + inner.on_connection_event(ConnectionEvent::RemoteProtocolsChange(change)); + } + } } } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::dummy; - - /// A disabled [`ToggleConnectionHandler`] can receive listen upgrade errors in - /// the following two cases: - /// - /// 1. Protocol negotiation on an incoming stream failed with no protocol - /// being agreed on. - /// - /// 2. When combining [`ConnectionHandler`] implementations a single - /// [`ConnectionHandler`] might be notified of an inbound upgrade error - /// unrelated to its own upgrade logic. For example when nesting a - /// [`ToggleConnectionHandler`] in a - /// [`ConnectionHandlerSelect`](crate::connection_handler::ConnectionHandlerSelect) - /// the former might receive an inbound upgrade error even when disabled. - /// - /// [`ToggleConnectionHandler`] should ignore the error in both of these cases. - #[test] - fn ignore_listen_upgrade_error_when_disabled() { - let mut handler = ToggleConnectionHandler:: { inner: None }; - - handler.on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: Either::Right(()), - error: ConnectionHandlerUpgrErr::Timeout, - })); + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(inner) = self.inner.as_mut() else { + return Poll::Ready(None); + }; + + inner.poll_close(cx) } } diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index e813ad0c66d..15c49bb7bd5 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -21,30 +21,39 @@ mod error; pub(crate) mod pool; +mod supported_protocols; pub use error::ConnectionError; pub(crate) use error::{ PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, }; +pub use supported_protocols::SupportedProtocols; use crate::handler::{ AddressChange, ConnectionEvent, ConnectionHandler, DialUpgradeError, FullyNegotiatedInbound, - FullyNegotiatedOutbound, ListenUpgradeError, + FullyNegotiatedOutbound, ListenUpgradeError, ProtocolSupport, ProtocolsAdded, ProtocolsChange, + UpgradeInfoSend, }; -use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper}; -use crate::{ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, SubstreamProtocol}; +use crate::stream::ActiveStreamCounter; +use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; +use crate::{ + ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, +}; +use futures::future::BoxFuture; use futures::stream::FuturesUnordered; -use futures::FutureExt; use futures::StreamExt; +use futures::{stream, FutureExt}; use futures_timer::Delay; use instant::Instant; use libp2p_core::connection::ConnectedPoint; use libp2p_core::multiaddr::Multiaddr; use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerEvent, StreamMuxerExt, SubstreamBox}; -use libp2p_core::upgrade::{InboundUpgradeApply, OutboundUpgradeApply}; +use libp2p_core::upgrade; +use libp2p_core::upgrade::{NegotiationError, ProtocolError}; use libp2p_core::Endpoint; -use libp2p_core::{upgrade, UpgradeError}; use libp2p_identity::PeerId; +use std::collections::HashSet; +use std::fmt::{Display, Formatter}; use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; use std::task::Waker; @@ -58,15 +67,6 @@ static NEXT_CONNECTION_ID: AtomicUsize = AtomicUsize::new(1); pub struct ConnectionId(usize); impl ConnectionId { - /// A "dummy" [`ConnectionId`]. - /// - /// Really, you should not use this, not even for testing but it is here if you need it. - #[deprecated( - since = "0.42.0", - note = "Don't use this, it will be removed at a later stage again." - )] - pub const DUMMY: ConnectionId = ConnectionId(0); - /// Creates an _unchecked_ [`ConnectionId`]. /// /// [`Swarm`](crate::Swarm) enforces that [`ConnectionId`]s are unique and not reused. @@ -83,6 +83,12 @@ impl ConnectionId { } } +impl Display for ConnectionId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + /// Information about a successfully established connection. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct Connected { @@ -112,16 +118,18 @@ where handler: THandler, /// Futures that upgrade incoming substreams. negotiating_in: FuturesUnordered< - SubstreamUpgrade< + StreamUpgrade< THandler::InboundOpenInfo, - InboundUpgradeApply>, + ::Output, + ::Error, >, >, /// Futures that upgrade outgoing substreams. negotiating_out: FuturesUnordered< - SubstreamUpgrade< + StreamUpgrade< THandler::OutboundOpenInfo, - OutboundUpgradeApply>, + ::Output, + ::Error, >, >, /// The currently planned connection & handler shutdown. @@ -135,8 +143,7 @@ where /// Note: This only enforces a limit on the number of concurrently /// negotiating inbound streams. The total number of inbound streams on a /// connection is the sum of negotiating and negotiated streams. A limit on - /// the total number of streams can be enforced at the - /// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level. + /// the total number of streams can be enforced at the [`StreamMuxerBox`] level. max_negotiating_inbound_streams: usize, /// Contains all upgrades that are waiting for a new outbound substream. /// @@ -145,6 +152,11 @@ where requested_substreams: FuturesUnordered< SubstreamRequested, >, + + local_supported_protocols: HashSet, + remote_supported_protocols: HashSet, + idle_timeout: Duration, + stream_counter: ActiveStreamCounter, } impl fmt::Debug for Connection @@ -169,10 +181,17 @@ where /// and connection handler. pub(crate) fn new( muxer: StreamMuxerBox, - handler: THandler, + mut handler: THandler, substream_upgrade_protocol_override: Option, max_negotiating_inbound_streams: usize, + idle_timeout: Duration, ) -> Self { + let initial_protocols = gather_supported_protocols(&handler); + if !initial_protocols.is_empty() { + handler.on_connection_event(ConnectionEvent::LocalProtocolsChange( + ProtocolsChange::Added(ProtocolsAdded::from_set(&initial_protocols)), + )); + } Connection { muxing: muxer, handler, @@ -182,26 +201,44 @@ where substream_upgrade_protocol_override, max_negotiating_inbound_streams, requested_substreams: Default::default(), + local_supported_protocols: initial_protocols, + remote_supported_protocols: Default::default(), + idle_timeout, + stream_counter: ActiveStreamCounter::default(), } } /// Notifies the connection handler of an event. - pub(crate) fn on_behaviour_event(&mut self, event: THandler::InEvent) { + pub(crate) fn on_behaviour_event(&mut self, event: THandler::FromBehaviour) { self.handler.on_behaviour_event(event); } - /// Begins an orderly shutdown of the connection, returning the connection - /// handler and a `Future` that resolves when connection shutdown is complete. - pub(crate) fn close(self) -> (THandler, impl Future>) { - (self.handler, self.muxing.close()) + /// Begins an orderly shutdown of the connection, returning a stream of final events and a `Future` that resolves when connection shutdown is complete. + pub(crate) fn close( + self, + ) -> ( + impl futures::Stream, + impl Future>, + ) { + let Connection { + mut handler, + muxing, + .. + } = self; + + ( + stream::poll_fn(move |cx| handler.poll_close(cx)), + muxing.close(), + ) } /// Polls the handler and the substream, forwarding events from the former to the latter and /// vice versa. + #[tracing::instrument(level = "debug", name = "Connection::poll", skip(self, cx))] pub(crate) fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, ConnectionError>> { + ) -> Poll, ConnectionError>> { let Self { requested_substreams, muxing, @@ -211,6 +248,11 @@ where shutdown, max_negotiating_inbound_streams, substream_upgrade_protocol_override, + local_supported_protocols: supported_protocols, + remote_supported_protocols, + idle_timeout, + stream_counter, + .. } = self.get_mut(); loop { @@ -220,7 +262,7 @@ where handler.on_connection_event(ConnectionEvent::DialUpgradeError( DialUpgradeError { info, - error: ConnectionHandlerUpgrErr::Timeout, + error: StreamUpgradeError::Timeout, }, )); continue; @@ -238,11 +280,33 @@ where requested_substreams.push(SubstreamRequested::new(user_data, timeout, upgrade)); continue; // Poll handler until exhausted. } - Poll::Ready(ConnectionHandlerEvent::Custom(event)) => { + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { return Poll::Ready(Ok(Event::Handler(event))); } - Poll::Ready(ConnectionHandlerEvent::Close(err)) => { - return Poll::Ready(Err(ConnectionError::Handler(err))); + Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Added(protocols), + )) => { + if let Some(added) = + ProtocolsChange::add(remote_supported_protocols, &protocols) + { + handler.on_connection_event(ConnectionEvent::RemoteProtocolsChange(added)); + remote_supported_protocols.extend(protocols); + } + + continue; + } + Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Removed(protocols), + )) => { + if let Some(removed) = + ProtocolsChange::remove(remote_supported_protocols, &protocols) + { + handler + .on_connection_event(ConnectionEvent::RemoteProtocolsChange(removed)); + remote_supported_protocols.retain(|p| !protocols.contains(p)); + } + + continue; } } @@ -273,41 +337,39 @@ where )); continue; } - Poll::Ready(Some((info, Err(error)))) => { + Poll::Ready(Some((info, Err(StreamUpgradeError::Apply(error))))) => { handler.on_connection_event(ConnectionEvent::ListenUpgradeError( ListenUpgradeError { info, error }, )); continue; } - } - - // Ask the handler whether it wants the connection (and the handler itself) - // to be kept alive, which determines the planned shutdown, if any. - let keep_alive = handler.connection_keep_alive(); - match (&mut *shutdown, keep_alive) { - (Shutdown::Later(timer, deadline), KeepAlive::Until(t)) => { - if *deadline != t { - *deadline = t; - if let Some(dur) = deadline.checked_duration_since(Instant::now()) { - timer.reset(dur) - } - } + Poll::Ready(Some((_, Err(StreamUpgradeError::Io(e))))) => { + tracing::debug!("failed to upgrade inbound stream: {e}"); + continue; } - (_, KeepAlive::Until(t)) => { - if let Some(dur) = t.checked_duration_since(Instant::now()) { - *shutdown = Shutdown::Later(Delay::new(dur), t) - } + Poll::Ready(Some((_, Err(StreamUpgradeError::NegotiationFailed)))) => { + tracing::debug!("no protocol could be agreed upon for inbound stream"); + continue; } - (_, KeepAlive::No) => *shutdown = Shutdown::Asap, - (_, KeepAlive::Yes) => *shutdown = Shutdown::None, - }; + Poll::Ready(Some((_, Err(StreamUpgradeError::Timeout)))) => { + tracing::debug!("inbound stream upgrade timed out"); + continue; + } + } // Check if the connection (and handler) should be shut down. - // As long as we're still negotiating substreams, shutdown is always postponed. + // As long as we're still negotiating substreams or have any active streams shutdown is always postponed. if negotiating_in.is_empty() && negotiating_out.is_empty() && requested_substreams.is_empty() + && stream_counter.has_no_active_streams() { + if let Some(new_timeout) = + compute_new_shutdown(handler.connection_keep_alive(), shutdown, *idle_timeout) + { + *shutdown = new_timeout; + } + match shutdown { Shutdown::None => {} Shutdown::Asap => return Poll::Ready(Err(ConnectionError::KeepAliveTimeout)), @@ -318,6 +380,8 @@ where Poll::Pending => {} }, } + } else { + *shutdown = Shutdown::None; } match muxing.poll_unpin(cx)? { @@ -336,12 +400,13 @@ where Poll::Ready(substream) => { let (user_data, timeout, upgrade) = requested_substream.extract(); - negotiating_out.push(SubstreamUpgrade::new_outbound( + negotiating_out.push(StreamUpgrade::new_outbound( substream, user_data, timeout, upgrade, *substream_upgrade_protocol_override, + stream_counter.clone(), )); continue; // Go back to the top, handler can potentially make progress again. @@ -355,16 +420,82 @@ where Poll::Ready(substream) => { let protocol = handler.listen_protocol(); - negotiating_in.push(SubstreamUpgrade::new_inbound(substream, protocol)); + negotiating_in.push(StreamUpgrade::new_inbound( + substream, + protocol, + stream_counter.clone(), + )); continue; // Go back to the top, handler can potentially make progress again. } } } + let new_protocols = gather_supported_protocols(handler); + let changes = ProtocolsChange::from_full_sets(supported_protocols, &new_protocols); + + if !changes.is_empty() { + for change in changes { + handler.on_connection_event(ConnectionEvent::LocalProtocolsChange(change)); + } + + *supported_protocols = new_protocols; + + continue; // Go back to the top, handler can potentially make progress again. + } + return Poll::Pending; // Nothing can make progress, return `Pending`. } } + + #[cfg(test)] + fn poll_noop_waker(&mut self) -> Poll, ConnectionError>> { + Pin::new(self).poll(&mut Context::from_waker(futures::task::noop_waker_ref())) + } +} + +fn gather_supported_protocols(handler: &impl ConnectionHandler) -> HashSet { + handler + .listen_protocol() + .upgrade() + .protocol_info() + .filter_map(|i| StreamProtocol::try_from_owned(i.as_ref().to_owned()).ok()) + .collect() +} + +fn compute_new_shutdown( + handler_keep_alive: bool, + current_shutdown: &Shutdown, + idle_timeout: Duration, +) -> Option { + match (current_shutdown, handler_keep_alive) { + (_, false) if idle_timeout == Duration::ZERO => Some(Shutdown::Asap), + (Shutdown::Later(_, _), false) => None, // Do nothing, i.e. let the shutdown timer continue to tick. + (_, false) => { + let now = Instant::now(); + let safe_keep_alive = checked_add_fraction(now, idle_timeout); + + Some(Shutdown::Later( + Delay::new(safe_keep_alive), + now + safe_keep_alive, + )) + } + (_, true) => Some(Shutdown::None), + } +} + +/// Repeatedly halves and adds the [`Duration`] to the [`Instant`] until [`Instant::checked_add`] succeeds. +/// +/// [`Instant`] depends on the underlying platform and has a limit of which points in time it can represent. +/// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. +fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { + while start.checked_add(duration).is_none() { + tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); + + duration /= 2; + } + + duration } /// Borrowed information about an incoming connection currently being negotiated. @@ -386,52 +517,27 @@ impl<'a> IncomingInfo<'a> { } } -/// Information about a connection limit. -#[deprecated(note = "Use `libp2p::connection_limits` instead.", since = "0.42.1")] -#[derive(Debug, Clone, Copy)] -pub struct ConnectionLimit { - /// The maximum number of connections. - pub limit: u32, - /// The current number of connections. - pub current: u32, -} - -#[allow(deprecated)] -impl fmt::Display for ConnectionLimit { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "connection limit exceeded ({}/{})", - self.current, self.limit - ) - } -} - -/// A `ConnectionLimit` can represent an error if it has been exceeded. -#[allow(deprecated)] -impl std::error::Error for ConnectionLimit {} - -struct SubstreamUpgrade { +struct StreamUpgrade { user_data: Option, timeout: Delay, - upgrade: Upgrade, + upgrade: BoxFuture<'static, Result>>, } -impl - SubstreamUpgrade>> -where - Upgrade: Send + OutboundUpgradeSend, -{ - fn new_outbound( +impl StreamUpgrade { + fn new_outbound( substream: SubstreamBox, user_data: UserData, timeout: Delay, upgrade: Upgrade, version_override: Option, - ) -> Self { + counter: ActiveStreamCounter, + ) -> Self + where + Upgrade: OutboundUpgradeSend, + { let effective_version = match version_override { Some(version_override) if version_override != upgrade::Version::default() => { - log::debug!( + tracing::debug!( "Substream upgrade protocol override: {:?} -> {:?}", upgrade::Version::default(), version_override @@ -441,45 +547,78 @@ where } _ => upgrade::Version::default(), }; + let protocols = upgrade.protocol_info(); Self { user_data: Some(user_data), timeout, - upgrade: upgrade::apply_outbound(substream, SendWrapper(upgrade), effective_version), + upgrade: Box::pin(async move { + let (info, stream) = multistream_select::dialer_select_proto( + substream, + protocols, + effective_version, + ) + .await + .map_err(to_stream_upgrade_error)?; + + let output = upgrade + .upgrade_outbound(Stream::new(stream, counter), info) + .await + .map_err(StreamUpgradeError::Apply)?; + + Ok(output) + }), } } } -impl - SubstreamUpgrade>> -where - Upgrade: Send + InboundUpgradeSend, -{ - fn new_inbound( +impl StreamUpgrade { + fn new_inbound( substream: SubstreamBox, protocol: SubstreamProtocol, - ) -> Self { + counter: ActiveStreamCounter, + ) -> Self + where + Upgrade: InboundUpgradeSend, + { let timeout = *protocol.timeout(); let (upgrade, open_info) = protocol.into_upgrade(); + let protocols = upgrade.protocol_info(); Self { user_data: Some(open_info), timeout: Delay::new(timeout), - upgrade: upgrade::apply_inbound(substream, SendWrapper(upgrade)), + upgrade: Box::pin(async move { + let (info, stream) = + multistream_select::listener_select_proto(substream, protocols) + .await + .map_err(to_stream_upgrade_error)?; + + let output = upgrade + .upgrade_inbound(Stream::new(stream, counter), info) + .await + .map_err(StreamUpgradeError::Apply)?; + + Ok(output) + }), } } } -impl Unpin for SubstreamUpgrade {} +fn to_stream_upgrade_error(e: NegotiationError) -> StreamUpgradeError { + match e { + NegotiationError::Failed => StreamUpgradeError::NegotiationFailed, + NegotiationError::ProtocolError(ProtocolError::IoError(e)) => StreamUpgradeError::Io(e), + NegotiationError::ProtocolError(other) => { + StreamUpgradeError::Io(io::Error::new(io::ErrorKind::Other, other)) + } + } +} -impl Future for SubstreamUpgrade -where - Upgrade: Future>> + Unpin, -{ - type Output = ( - UserData, - Result>, - ); +impl Unpin for StreamUpgrade {} + +impl Future for StreamUpgrade { + type Output = (UserData, Result>); fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match self.timeout.poll_unpin(cx) { @@ -488,28 +627,20 @@ where self.user_data .take() .expect("Future not to be polled again once ready."), - Err(ConnectionHandlerUpgrErr::Timeout), + Err(StreamUpgradeError::Timeout), )) } Poll::Pending => {} } - match self.upgrade.poll_unpin(cx) { - Poll::Ready(Ok(upgrade)) => Poll::Ready(( - self.user_data - .take() - .expect("Future not to be polled again once ready."), - Ok(upgrade), - )), - Poll::Ready(Err(err)) => Poll::Ready(( - self.user_data - .take() - .expect("Future not to be polled again once ready."), - Err(ConnectionHandlerUpgrErr::Upgrade(err)), - )), - Poll::Pending => Poll::Pending, - } + let result = futures::ready!(self.upgrade.poll_unpin(cx)); + let user_data = self + .user_data + .take() + .expect("Future not to be polled again once ready."); + + Poll::Ready((user_data, result)) } } @@ -609,33 +740,39 @@ enum Shutdown { #[cfg(test)] mod tests { use super::*; - use crate::keep_alive; + use crate::dummy; + use futures::future; use futures::AsyncRead; use futures::AsyncWrite; - use libp2p_core::upgrade::DeniedUpgrade; + use libp2p_core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_core::StreamMuxer; use quickcheck::*; use std::sync::{Arc, Weak}; + use std::time::Instant; + use tracing_subscriber::EnvFilter; use void::Void; #[test] fn max_negotiating_inbound_streams() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + fn prop(max_negotiating_inbound_streams: u8) { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); let alive_substream_counter = Arc::new(()); - let mut connection = Connection::new( StreamMuxerBox::new(DummyStreamMuxer { counter: alive_substream_counter.clone(), }), - keep_alive::ConnectionHandler, + MockConnectionHandler::new(Duration::from_secs(10)), None, max_negotiating_inbound_streams, + Duration::ZERO, ); - let result = Pin::new(&mut connection) - .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + let result = connection.poll_noop_waker(); assert!(result.is_pending()); assert_eq!( @@ -656,23 +793,201 @@ mod tests { MockConnectionHandler::new(upgrade_timeout), None, 2, + Duration::ZERO, ); connection.handler.open_new_outbound(); - let _ = Pin::new(&mut connection) - .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + let _ = connection.poll_noop_waker(); std::thread::sleep(upgrade_timeout + Duration::from_secs(1)); - let _ = Pin::new(&mut connection) - .poll(&mut Context::from_waker(futures::task::noop_waker_ref())); + let _ = connection.poll_noop_waker(); assert!(matches!( connection.handler.error.unwrap(), - ConnectionHandlerUpgrErr::Timeout + StreamUpgradeError::Timeout )) } + #[test] + fn propagates_changes_to_supported_inbound_protocols() { + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + ConfigurableProtocolConnectionHandler::default(), + None, + 0, + Duration::ZERO, + ); + + // First, start listening on a single protocol. + connection.handler.listen_on(&["/foo"]); + let _ = connection.poll_noop_waker(); + + assert_eq!(connection.handler.local_added, vec![vec!["/foo"]]); + assert!(connection.handler.local_removed.is_empty()); + + // Second, listen on two protocols. + connection.handler.listen_on(&["/foo", "/bar"]); + let _ = connection.poll_noop_waker(); + + assert_eq!( + connection.handler.local_added, + vec![vec!["/foo"], vec!["/bar"]], + "expect to only receive an event for the newly added protocols" + ); + assert!(connection.handler.local_removed.is_empty()); + + // Third, stop listening on the first protocol. + connection.handler.listen_on(&["/bar"]); + let _ = connection.poll_noop_waker(); + + assert_eq!( + connection.handler.local_added, + vec![vec!["/foo"], vec!["/bar"]] + ); + assert_eq!(connection.handler.local_removed, vec![vec!["/foo"]]); + } + + #[test] + fn only_propagtes_actual_changes_to_remote_protocols_to_handler() { + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + ConfigurableProtocolConnectionHandler::default(), + None, + 0, + Duration::ZERO, + ); + + // First, remote supports a single protocol. + connection.handler.remote_adds_support_for(&["/foo"]); + let _ = connection.poll_noop_waker(); + + assert_eq!(connection.handler.remote_added, vec![vec!["/foo"]]); + assert!(connection.handler.remote_removed.is_empty()); + + // Second, it adds a protocol but also still includes the first one. + connection + .handler + .remote_adds_support_for(&["/foo", "/bar"]); + let _ = connection.poll_noop_waker(); + + assert_eq!( + connection.handler.remote_added, + vec![vec!["/foo"], vec!["/bar"]], + "expect to only receive an event for the newly added protocol" + ); + assert!(connection.handler.remote_removed.is_empty()); + + // Third, stop listening on a protocol it never advertised (we can't control what handlers do so this needs to be handled gracefully). + connection.handler.remote_removes_support_for(&["/baz"]); + let _ = connection.poll_noop_waker(); + + assert_eq!( + connection.handler.remote_added, + vec![vec!["/foo"], vec!["/bar"]] + ); + assert!(&connection.handler.remote_removed.is_empty()); + + // Fourth, stop listening on a protocol that was previously supported + connection.handler.remote_removes_support_for(&["/bar"]); + let _ = connection.poll_noop_waker(); + + assert_eq!( + connection.handler.remote_added, + vec![vec!["/foo"], vec!["/bar"]] + ); + assert_eq!(connection.handler.remote_removed, vec![vec!["/bar"]]); + } + + #[tokio::test] + async fn idle_timeout_with_keep_alive_no() { + let idle_timeout = Duration::from_millis(100); + + let mut connection = Connection::new( + StreamMuxerBox::new(PendingStreamMuxer), + dummy::ConnectionHandler, + None, + 0, + idle_timeout, + ); + + assert!(connection.poll_noop_waker().is_pending()); + + tokio::time::sleep(idle_timeout).await; + + assert!(matches!( + connection.poll_noop_waker(), + Poll::Ready(Err(ConnectionError::KeepAliveTimeout)) + )); + } + + #[test] + fn checked_add_fraction_can_add_u64_max() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); + let start = Instant::now(); + + let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); + + assert!(start.checked_add(duration).is_some()) + } + + #[test] + fn compute_new_shutdown_does_not_panic() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + #[derive(Debug)] + struct ArbitraryShutdown(Shutdown); + + impl Clone for ArbitraryShutdown { + fn clone(&self) -> Self { + let shutdown = match self.0 { + Shutdown::None => Shutdown::None, + Shutdown::Asap => Shutdown::Asap, + Shutdown::Later(_, instant) => Shutdown::Later( + // compute_new_shutdown does not touch the delay. Delay does not + // implement Clone. Thus use a placeholder delay. + Delay::new(Duration::from_secs(1)), + instant, + ), + }; + + ArbitraryShutdown(shutdown) + } + } + + impl Arbitrary for ArbitraryShutdown { + fn arbitrary(g: &mut Gen) -> Self { + let shutdown = match g.gen_range(1u8..4) { + 1 => Shutdown::None, + 2 => Shutdown::Asap, + 3 => Shutdown::Later( + Delay::new(Duration::from_secs(u32::arbitrary(g) as u64)), + Instant::now() + .checked_add(Duration::arbitrary(g)) + .unwrap_or(Instant::now()), + ), + _ => unreachable!(), + }; + + Self(shutdown) + } + } + + fn prop( + handler_keep_alive: bool, + current_shutdown: ArbitraryShutdown, + idle_timeout: Duration, + ) { + compute_new_shutdown(handler_keep_alive, ¤t_shutdown.0, idle_timeout); + } + + QuickCheck::new().quickcheck(prop as fn(_, _, _)); + } + struct DummyStreamMuxer { counter: Arc<()>, } @@ -772,7 +1087,7 @@ mod tests { struct MockConnectionHandler { outbound_requested: bool, - error: Option>, + error: Option>, upgrade_timeout: Duration, } @@ -790,10 +1105,43 @@ mod tests { } } + #[derive(Default)] + struct ConfigurableProtocolConnectionHandler { + events: Vec>, + active_protocols: HashSet, + local_added: Vec>, + local_removed: Vec>, + remote_added: Vec>, + remote_removed: Vec>, + } + + impl ConfigurableProtocolConnectionHandler { + fn listen_on(&mut self, protocols: &[&'static str]) { + self.active_protocols = protocols.iter().copied().map(StreamProtocol::new).collect(); + } + + fn remote_adds_support_for(&mut self, protocols: &[&'static str]) { + self.events + .push(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Added( + protocols.iter().copied().map(StreamProtocol::new).collect(), + ), + )); + } + + fn remote_removes_support_for(&mut self, protocols: &[&'static str]) { + self.events + .push(ConnectionHandlerEvent::ReportRemoteProtocols( + ProtocolSupport::Removed( + protocols.iter().copied().map(StreamProtocol::new).collect(), + ), + )); + } + } + impl ConnectionHandler for MockConnectionHandler { - type InEvent = Void; - type OutEvent = Void; - type Error = Void; + type FromBehaviour = Void; + type ToBehaviour = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -826,16 +1174,19 @@ mod tests { ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { self.error = Some(error) } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + ConnectionEvent::AddressChange(_) + | ConnectionEvent::ListenUpgradeError(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} } } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { void::unreachable(event) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes + fn connection_keep_alive(&self) -> bool { + true } fn poll( @@ -845,8 +1196,7 @@ mod tests { ConnectionHandlerEvent< Self::OutboundProtocol, Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, + Self::ToBehaviour, >, > { if self.outbound_requested { @@ -860,6 +1210,110 @@ mod tests { Poll::Pending } } + + impl ConnectionHandler for ConfigurableProtocolConnectionHandler { + type FromBehaviour = Void; + type ToBehaviour = Void; + type InboundProtocol = ManyProtocolsUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = (); + + fn listen_protocol( + &self, + ) -> SubstreamProtocol { + SubstreamProtocol::new( + ManyProtocolsUpgrade { + protocols: Vec::from_iter(self.active_protocols.clone()), + }, + (), + ) + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::LocalProtocolsChange(ProtocolsChange::Added(added)) => { + self.local_added.push(added.cloned().collect()) + } + ConnectionEvent::LocalProtocolsChange(ProtocolsChange::Removed(removed)) => { + self.local_removed.push(removed.cloned().collect()) + } + ConnectionEvent::RemoteProtocolsChange(ProtocolsChange::Added(added)) => { + self.remote_added.push(added.cloned().collect()) + } + ConnectionEvent::RemoteProtocolsChange(ProtocolsChange::Removed(removed)) => { + self.remote_removed.push(removed.cloned().collect()) + } + _ => {} + } + } + + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + void::unreachable(event) + } + + fn connection_keep_alive(&self) -> bool { + true + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::ToBehaviour, + >, + > { + if let Some(event) = self.events.pop() { + return Poll::Ready(event); + } + + Poll::Pending + } + } + + struct ManyProtocolsUpgrade { + protocols: Vec, + } + + impl UpgradeInfo for ManyProtocolsUpgrade { + type Info = StreamProtocol; + type InfoIter = std::vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocols.clone().into_iter() + } + } + + impl InboundUpgrade for ManyProtocolsUpgrade { + type Output = C; + type Error = Void; + type Future = future::Ready>; + + fn upgrade_inbound(self, stream: C, _: Self::Info) -> Self::Future { + future::ready(Ok(stream)) + } + } + + impl OutboundUpgrade for ManyProtocolsUpgrade { + type Output = C; + type Error = Void; + type Future = future::Ready>; + + fn upgrade_outbound(self, stream: C, _: Self::Info) -> Self::Future { + future::ready(Ok(stream)) + } + } } /// The endpoint roles associated with a pending peer-to-peer connection. diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 9e0c58a4e7d..33aa81c19a9 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -#[allow(deprecated)] -use crate::connection::ConnectionLimit; use crate::transport::TransportError; use crate::Multiaddr; use crate::{ConnectedPoint, PeerId}; @@ -27,47 +25,36 @@ use std::{fmt, io}; /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] -pub enum ConnectionError { +pub enum ConnectionError { /// An I/O error occurred on the connection. // TODO: Eventually this should also be a custom error? IO(io::Error), /// The connection keep-alive timeout expired. KeepAliveTimeout, - - /// The connection handler produced an error. - Handler(THandlerErr), } -impl fmt::Display for ConnectionError -where - THandlerErr: fmt::Display, -{ +impl fmt::Display for ConnectionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ConnectionError::IO(err) => write!(f, "Connection error: I/O error: {err}"), ConnectionError::KeepAliveTimeout => { write!(f, "Connection closed due to expired keep-alive timeout.") } - ConnectionError::Handler(err) => write!(f, "Connection error: Handler error: {err}"), } } } -impl std::error::Error for ConnectionError -where - THandlerErr: std::error::Error + 'static, -{ +impl std::error::Error for ConnectionError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { ConnectionError::IO(err) => Some(err), ConnectionError::KeepAliveTimeout => None, - ConnectionError::Handler(err) => Some(err), } } } -impl From for ConnectionError { +impl From for ConnectionError { fn from(error: io::Error) -> Self { ConnectionError::IO(error) } @@ -90,15 +77,6 @@ pub enum PendingConnectionError { /// An error occurred while negotiating the transport protocol(s) on a connection. Transport(TTransErr), - /// The connection was dropped because the connection limit - /// for a peer has been reached. - #[deprecated( - note = "Use `libp2p::connection_limits` instead and handle `{Dial,Listen}Error::Denied::cause`.", - since = "0.42.1" - )] - #[allow(deprecated)] - ConnectionLimit(ConnectionLimit), - /// Pending connection attempt has been aborted. Aborted, @@ -117,10 +95,6 @@ impl PendingConnectionError { pub fn map(self, f: impl FnOnce(T) -> U) -> PendingConnectionError { match self { PendingConnectionError::Transport(t) => PendingConnectionError::Transport(f(t)), - #[allow(deprecated)] - PendingConnectionError::ConnectionLimit(l) => { - PendingConnectionError::ConnectionLimit(l) - } PendingConnectionError::Aborted => PendingConnectionError::Aborted, PendingConnectionError::WrongPeerId { obtained, endpoint } => { PendingConnectionError::WrongPeerId { obtained, endpoint } @@ -145,10 +119,6 @@ where "Pending connection: Transport error on connection: {err}" ) } - #[allow(deprecated)] - PendingConnectionError::ConnectionLimit(l) => { - write!(f, "Connection error: Connection limit: {l}.") - } PendingConnectionError::WrongPeerId { obtained, endpoint } => { write!( f, @@ -172,8 +142,6 @@ where PendingConnectionError::WrongPeerId { .. } => None, PendingConnectionError::LocalPeerId { .. } => None, PendingConnectionError::Aborted => None, - #[allow(deprecated)] - PendingConnectionError::ConnectionLimit(..) => None, } } } diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 68ad16ad36a..9bcd1b446d3 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -18,10 +18,7 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -#[allow(deprecated)] -use crate::connection::{Connection, ConnectionId, ConnectionLimit, PendingPoint}; -#[allow(deprecated)] -use crate::IntoConnectionHandler; +use crate::connection::{Connection, ConnectionId, PendingPoint}; use crate::{ connection::{ Connected, ConnectionError, IncomingInfo, PendingConnectionError, @@ -40,19 +37,19 @@ use futures::{ ready, stream::FuturesUnordered, }; -use instant::Instant; +use instant::{Duration, Instant}; use libp2p_core::connection::Endpoint; use libp2p_core::muxing::{StreamMuxerBox, StreamMuxerExt}; use std::task::Waker; use std::{ collections::{hash_map, HashMap}, - convert::TryFrom as _, fmt, num::{NonZeroU8, NonZeroUsize}, pin::Pin, task::Context, task::Poll, }; +use tracing::Instrument; use void::Void; mod concurrent_dial; @@ -94,8 +91,10 @@ where counters: ConnectionCounters, /// The managed connections of each peer that are currently considered established. - established: - FnvHashMap>>, + established: FnvHashMap< + PeerId, + FnvHashMap>, + >, /// The pending connections that are currently being negotiated. pending: HashMap, @@ -133,10 +132,13 @@ where /// Receivers for events reported from established connections. established_connection_events: - SelectAll>>, + SelectAll>>, /// Receivers for [`NewConnection`] objects that are dropped. new_connection_dropped_listeners: FuturesUnordered>, + + /// How long a connection should be kept alive once it starts idling. + idle_connection_timeout: Duration, } #[derive(Debug)] @@ -224,7 +226,7 @@ impl fmt::Debug for Pool { /// Event that can happen on the `Pool`. #[derive(Debug)] -pub(crate) enum PoolEvent { +pub(crate) enum PoolEvent { /// A new connection has been established. ConnectionEstablished { id: ConnectionId, @@ -256,10 +258,9 @@ pub(crate) enum PoolEvent { connected: Connected, /// The error that occurred, if any. If `None`, the connection /// was closed by the local peer. - error: Option>, + error: Option, /// The remaining established connections to the same peer. remaining_established_connection_ids: Vec, - handler: THandler, }, /// An outbound connection attempt failed. @@ -289,7 +290,7 @@ pub(crate) enum PoolEvent { id: ConnectionId, peer_id: PeerId, /// The produced event. - event: THandler::OutEvent, + event: ToBehaviour, }, /// The connection to a node has changed its address. @@ -308,8 +309,7 @@ where THandler: ConnectionHandler, { /// Creates a new empty `Pool`. - #[allow(deprecated)] - pub(crate) fn new(local_id: PeerId, config: PoolConfig, limits: ConnectionLimits) -> Self { + pub(crate) fn new(local_id: PeerId, config: PoolConfig) -> Self { let (pending_connection_events_tx, pending_connection_events_rx) = mpsc::channel(0); let executor = match config.executor { Some(exec) => ExecSwitch::Executor(exec), @@ -317,7 +317,7 @@ where }; Pool { local_id, - counters: ConnectionCounters::new(limits), + counters: ConnectionCounters::new(), established: Default::default(), pending: Default::default(), task_command_buffer_size: config.task_command_buffer_size, @@ -325,6 +325,7 @@ where substream_upgrade_protocol_override: config.substream_upgrade_protocol_override, max_negotiating_inbound_streams: config.max_negotiating_inbound_streams, per_connection_event_buffer_size: config.per_connection_event_buffer_size, + idle_connection_timeout: config.idle_connection_timeout, executor, pending_connection_events_tx, pending_connection_events_rx, @@ -343,7 +344,7 @@ where pub(crate) fn get_established( &mut self, id: ConnectionId, - ) -> Option<&mut EstablishedConnection> { + ) -> Option<&mut EstablishedConnection> { self.established .values_mut() .find_map(|connections| connections.get_mut(&id)) @@ -409,10 +410,6 @@ where /// Adds a pending outgoing connection to the pool in the form of a `Future` /// that establishes and negotiates the connection. - /// - /// Returns an error if the limit of pending outgoing connections - /// has been reached. - #[allow(deprecated)] pub(crate) fn add_outgoing( &mut self, dials: Vec< @@ -428,23 +425,23 @@ where role_override: Endpoint, dial_concurrency_factor_override: Option, connection_id: ConnectionId, - ) -> Result<(), ConnectionLimit> { - self.counters.check_max_pending_outgoing()?; - - let dial = ConcurrentDial::new( - dials, - dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor), - ); + ) { + let concurrency_factor = + dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor); + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_outgoing_connection", %concurrency_factor, num_dials=%dials.len(), id = %connection_id); + span.follows_from(tracing::Span::current()); let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_outgoing_connection( + self.executor.spawn( + task::new_for_pending_outgoing_connection( connection_id, - dial, + ConcurrentDial::new(dials, concurrency_factor), abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); let endpoint = PendingPoint::Dialer { role_override }; @@ -458,38 +455,34 @@ where accepted_at: Instant::now(), }, ); - - Ok(()) } /// Adds a pending incoming connection to the pool in the form of a /// `Future` that establishes and negotiates the connection. - /// - /// Returns an error if the limit of pending incoming connections - /// has been reached. - #[allow(deprecated)] pub(crate) fn add_incoming( &mut self, future: TFut, info: IncomingInfo<'_>, connection_id: ConnectionId, - ) -> Result<(), ConnectionLimit> - where + ) where TFut: Future> + Send + 'static, { let endpoint = info.create_connected_point(); - self.counters.check_max_pending_incoming()?; - let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_incoming_connection( + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_incoming_connection", remote_addr = %info.send_back_addr, id = %connection_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_pending_incoming_connection( connection_id, future, abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); self.counters.inc_pending_incoming(); self.pending.insert( @@ -501,21 +494,17 @@ where accepted_at: Instant::now(), }, ); - - Ok(()) } - #[allow(deprecated)] pub(crate) fn spawn_connection( &mut self, id: ConnectionId, obtained_peer_id: PeerId, endpoint: &ConnectedPoint, connection: NewConnection, - handler: ::Handler, + handler: THandler, ) { let connection = connection.extract(); - let conns = self.established.entry(obtained_peer_id).or_default(); self.counters.inc_established(endpoint); @@ -539,19 +528,27 @@ where handler, self.substream_upgrade_protocol_override, self.max_negotiating_inbound_streams, + self.idle_connection_timeout, ); - self.executor.spawn(task::new_for_established_connection( - id, - obtained_peer_id, - connection, - command_receiver, - event_sender, - )) + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_established_connection", remote_addr = %endpoint.get_remote_address(), %id, peer = %obtained_peer_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_established_connection( + id, + obtained_peer_id, + connection, + command_receiver, + event_sender, + ) + .instrument(span), + ) } /// Polls the connection pool for events. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> + #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, ::OutboundOpenInfo: Send, @@ -592,12 +589,7 @@ where old_endpoint, }); } - Poll::Ready(Some(task::EstablishedConnectionEvent::Closed { - id, - peer_id, - error, - handler, - })) => { + Poll::Ready(Some(task::EstablishedConnectionEvent::Closed { id, peer_id, error })) => { let connections = self .established .get_mut(&peer_id) @@ -615,7 +607,6 @@ where connected: Connected { endpoint, peer_id }, error, remaining_established_connection_ids, - handler, }); } } @@ -686,55 +677,32 @@ where ), }; - #[allow(deprecated)] - // Remove once `PendingConnectionError::ConnectionLimit` is gone. - let error = self - .counters - // Check general established connection limit. - .check_max_established(&endpoint) - .map_err(PendingConnectionError::ConnectionLimit) - // Check per-peer established connection limit. - .and_then(|()| { - self.counters - .check_max_established_per_peer(num_peer_established( - &self.established, - obtained_peer_id, - )) - .map_err(PendingConnectionError::ConnectionLimit) - }) - // Check expected peer id matches. - .and_then(|()| { - if let Some(peer) = expected_peer_id { - if peer != obtained_peer_id { - Err(PendingConnectionError::WrongPeerId { - obtained: obtained_peer_id, - endpoint: endpoint.clone(), - }) - } else { - Ok(()) - } - } else { - Ok(()) - } - }) - // Check peer is not local peer. - .and_then(|()| { - if self.local_id == obtained_peer_id { - Err(PendingConnectionError::LocalPeerId { + let check_peer_id = || { + if let Some(peer) = expected_peer_id { + if peer != obtained_peer_id { + return Err(PendingConnectionError::WrongPeerId { + obtained: obtained_peer_id, endpoint: endpoint.clone(), - }) - } else { - Ok(()) + }); } - }); + } - if let Err(error) = error { + if self.local_id == obtained_peer_id { + return Err(PendingConnectionError::LocalPeerId { + endpoint: endpoint.clone(), + }); + } + + Ok(()) + }; + + if let Err(error) = check_peer_id() { self.executor.spawn(poll_fn(move |cx| { if let Err(e) = ready!(muxer.poll_close_unpin(cx)) { - log::debug!( - "Failed to close connection {:?} to peer {}: {:?}", - id, - obtained_peer_id, + tracing::debug!( + peer=%obtained_peer_id, + connection=%id, + "Failed to close connection to peer: {:?}", e ); } @@ -873,9 +841,6 @@ impl Drop for NewConnection { /// Network connection information. #[derive(Debug, Clone)] pub struct ConnectionCounters { - /// The effective connection limits. - #[allow(deprecated)] - limits: ConnectionLimits, /// The current number of incoming connections. pending_incoming: u32, /// The current number of outgoing connections. @@ -887,10 +852,8 @@ pub struct ConnectionCounters { } impl ConnectionCounters { - #[allow(deprecated)] - fn new(limits: ConnectionLimits) -> Self { + fn new() -> Self { Self { - limits, pending_incoming: 0, pending_outgoing: 0, established_incoming: 0, @@ -898,13 +861,6 @@ impl ConnectionCounters { } } - /// The effective connection limits. - #[deprecated(note = "Use the `libp2p::connection_limits` instead.")] - #[allow(deprecated)] - pub fn limits(&self) -> &ConnectionLimits { - &self.limits - } - /// The total number of connections, both pending and established. pub fn num_connections(&self) -> u32 { self.num_pending() + self.num_established() @@ -987,117 +943,6 @@ impl ConnectionCounters { } } } - - #[allow(deprecated)] - fn check_max_pending_outgoing(&self) -> Result<(), ConnectionLimit> { - Self::check(self.pending_outgoing, self.limits.max_pending_outgoing) - } - - #[allow(deprecated)] - fn check_max_pending_incoming(&self) -> Result<(), ConnectionLimit> { - Self::check(self.pending_incoming, self.limits.max_pending_incoming) - } - - #[allow(deprecated)] - fn check_max_established(&self, endpoint: &ConnectedPoint) -> Result<(), ConnectionLimit> { - // Check total connection limit. - Self::check(self.num_established(), self.limits.max_established_total)?; - // Check incoming/outgoing connection limits - match endpoint { - ConnectedPoint::Dialer { .. } => Self::check( - self.established_outgoing, - self.limits.max_established_outgoing, - ), - ConnectedPoint::Listener { .. } => Self::check( - self.established_incoming, - self.limits.max_established_incoming, - ), - } - } - - #[allow(deprecated)] - fn check_max_established_per_peer(&self, current: u32) -> Result<(), ConnectionLimit> { - Self::check(current, self.limits.max_established_per_peer) - } - - #[allow(deprecated)] - fn check(current: u32, limit: Option) -> Result<(), ConnectionLimit> { - if let Some(limit) = limit { - if current >= limit { - return Err(ConnectionLimit { limit, current }); - } - } - Ok(()) - } -} - -/// Counts the number of established connections to the given peer. -fn num_peer_established( - established: &FnvHashMap>>, - peer: PeerId, -) -> u32 { - established.get(&peer).map_or(0, |conns| { - u32::try_from(conns.len()).expect("Unexpectedly large number of connections for a peer.") - }) -} - -/// The configurable connection limits. -/// -/// By default no connection limits apply. -#[derive(Debug, Clone, Default)] -#[deprecated(note = "Use `libp2p::connection_limits` instead.", since = "0.42.1")] -pub struct ConnectionLimits { - max_pending_incoming: Option, - max_pending_outgoing: Option, - max_established_incoming: Option, - max_established_outgoing: Option, - max_established_per_peer: Option, - max_established_total: Option, -} - -#[allow(deprecated)] -impl ConnectionLimits { - /// Configures the maximum number of concurrently incoming connections being established. - pub fn with_max_pending_incoming(mut self, limit: Option) -> Self { - self.max_pending_incoming = limit; - self - } - - /// Configures the maximum number of concurrently outgoing connections being established. - pub fn with_max_pending_outgoing(mut self, limit: Option) -> Self { - self.max_pending_outgoing = limit; - self - } - - /// Configures the maximum number of concurrent established inbound connections. - pub fn with_max_established_incoming(mut self, limit: Option) -> Self { - self.max_established_incoming = limit; - self - } - - /// Configures the maximum number of concurrent established outbound connections. - pub fn with_max_established_outgoing(mut self, limit: Option) -> Self { - self.max_established_outgoing = limit; - self - } - - /// Configures the maximum number of concurrent established connections (both - /// inbound and outbound). - /// - /// Note: This should be used in conjunction with - /// [`ConnectionLimits::with_max_established_incoming`] to prevent possible - /// eclipse attacks (all connections being inbound). - pub fn with_max_established(mut self, limit: Option) -> Self { - self.max_established_total = limit; - self - } - - /// Configures the maximum number of concurrent established connections per peer, - /// regardless of direction (incoming or outgoing). - pub fn with_max_established_per_peer(mut self, limit: Option) -> Self { - self.max_established_per_peer = limit; - self - } } /// Configuration options when creating a [`Pool`]. @@ -1114,6 +959,8 @@ pub(crate) struct PoolConfig { pub(crate) per_connection_event_buffer_size: usize, /// Number of addresses concurrently dialed for a single outbound connection attempt. pub(crate) dial_concurrency_factor: NonZeroU8, + /// How long a connection should be kept alive once it is idling. + pub(crate) idle_connection_timeout: Duration, /// The configured override for substream protocol upgrades, if any. substream_upgrade_protocol_override: Option, @@ -1130,6 +977,7 @@ impl PoolConfig { task_command_buffer_size: 32, per_connection_event_buffer_size: 7, dial_concurrency_factor: NonZeroU8::new(8).expect("8 > 0"), + idle_connection_timeout: Duration::ZERO, substream_upgrade_protocol_override: None, max_negotiating_inbound_streams: 128, } @@ -1140,7 +988,7 @@ impl PoolConfig { /// delivery to the connection handler. /// /// When the buffer for a particular connection is full, `notify_handler` will no - /// longer be able to deliver events to the associated [`Connection`](super::Connection), + /// longer be able to deliver events to the associated [`Connection`], /// thus exerting back-pressure on the connection and peer API. pub(crate) fn with_notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { self.task_command_buffer_size = n.get() - 1; diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index dd318f77d30..08674fd2ee5 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -66,7 +66,7 @@ pub(crate) enum PendingConnectionEvent { } #[derive(Debug)] -pub(crate) enum EstablishedConnectionEvent { +pub(crate) enum EstablishedConnectionEvent { /// A node we are connected to has changed its address. AddressChange { id: ConnectionId, @@ -77,7 +77,7 @@ pub(crate) enum EstablishedConnectionEvent { Notify { id: ConnectionId, peer_id: PeerId, - event: THandler::OutEvent, + event: ToBehaviour, }, /// A connection closed, possibly due to an error. /// @@ -86,8 +86,7 @@ pub(crate) enum EstablishedConnectionEvent { Closed { id: ConnectionId, peer_id: PeerId, - error: Option>, - handler: THandler, + error: Option, }, } @@ -171,8 +170,8 @@ pub(crate) async fn new_for_established_connection( connection_id: ConnectionId, peer_id: PeerId, mut connection: crate::connection::Connection, - mut command_receiver: mpsc::Receiver>, - mut events: mpsc::Sender>, + mut command_receiver: mpsc::Receiver>, + mut events: mpsc::Sender>, ) where THandler: ConnectionHandler, { @@ -187,15 +186,25 @@ pub(crate) async fn new_for_established_connection( Command::NotifyHandler(event) => connection.on_behaviour_event(event), Command::Close => { command_receiver.close(); - let (handler, closing_muxer) = connection.close(); + let (remaining_events, closing_muxer) = connection.close(); + + let _ = events + .send_all(&mut remaining_events.map(|event| { + Ok(EstablishedConnectionEvent::Notify { + id: connection_id, + event, + peer_id, + }) + })) + .await; let error = closing_muxer.await.err().map(ConnectionError::IO); + let _ = events .send(EstablishedConnectionEvent::Closed { id: connection_id, peer_id, error, - handler, }) .await; return; @@ -227,14 +236,24 @@ pub(crate) async fn new_for_established_connection( } Err(error) => { command_receiver.close(); - let (handler, _closing_muxer) = connection.close(); + let (remaining_events, _closing_muxer) = connection.close(); + + let _ = events + .send_all(&mut remaining_events.map(|event| { + Ok(EstablishedConnectionEvent::Notify { + id: connection_id, + event, + peer_id, + }) + })) + .await; + // Terminate the task with the error, dropping the connection. let _ = events .send(EstablishedConnectionEvent::Closed { id: connection_id, peer_id, error: Some(error), - handler, }) .await; return; diff --git a/swarm/src/connection/supported_protocols.rs b/swarm/src/connection/supported_protocols.rs new file mode 100644 index 00000000000..0575046bb44 --- /dev/null +++ b/swarm/src/connection/supported_protocols.rs @@ -0,0 +1,88 @@ +use crate::handler::ProtocolsChange; +use crate::StreamProtocol; +use std::collections::HashSet; + +#[derive(Default, Clone, Debug)] +pub struct SupportedProtocols { + protocols: HashSet, +} + +impl SupportedProtocols { + pub fn on_protocols_change(&mut self, change: ProtocolsChange) -> bool { + match change { + ProtocolsChange::Added(added) => { + let mut changed = false; + + for p in added { + changed |= self.protocols.insert(p.clone()); + } + + changed + } + ProtocolsChange::Removed(removed) => { + let mut changed = false; + + for p in removed { + changed |= self.protocols.remove(p); + } + + changed + } + } + } + + pub fn iter(&self) -> impl Iterator { + self.protocols.iter() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::handler::{ProtocolsAdded, ProtocolsRemoved}; + use once_cell::sync::Lazy; + + #[test] + fn protocols_change_added_returns_correct_changed_value() { + let mut protocols = SupportedProtocols::default(); + + let changed = protocols.on_protocols_change(add_foo()); + assert!(changed); + + let changed = protocols.on_protocols_change(add_foo()); + assert!(!changed); + + let changed = protocols.on_protocols_change(add_foo_bar()); + assert!(changed); + } + + #[test] + fn protocols_change_removed_returns_correct_changed_value() { + let mut protocols = SupportedProtocols::default(); + + let changed = protocols.on_protocols_change(remove_foo()); + assert!(!changed); + + protocols.on_protocols_change(add_foo()); + + let changed = protocols.on_protocols_change(remove_foo()); + assert!(changed); + } + + fn add_foo() -> ProtocolsChange<'static> { + ProtocolsChange::Added(ProtocolsAdded::from_set(&FOO_PROTOCOLS)) + } + + fn add_foo_bar() -> ProtocolsChange<'static> { + ProtocolsChange::Added(ProtocolsAdded::from_set(&FOO_BAR_PROTOCOLS)) + } + + fn remove_foo() -> ProtocolsChange<'static> { + ProtocolsChange::Removed(ProtocolsRemoved::from_set(&FOO_PROTOCOLS)) + } + + static FOO_PROTOCOLS: Lazy> = + Lazy::new(|| HashSet::from([StreamProtocol::new("/foo")])); + static FOO_BAR_PROTOCOLS: Lazy> = + Lazy::new(|| HashSet::from([StreamProtocol::new("/foo"), StreamProtocol::new("/bar")])); +} diff --git a/swarm/src/dial_opts.rs b/swarm/src/dial_opts.rs index 6ee6083174d..4442d913847 100644 --- a/swarm/src/dial_opts.rs +++ b/swarm/src/dial_opts.rs @@ -22,7 +22,6 @@ use crate::ConnectionId; use libp2p_core::connection::Endpoint; use libp2p_core::multiaddr::Protocol; -use libp2p_core::multihash::Multihash; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use std::num::NonZeroU8; @@ -81,9 +80,21 @@ impl DialOpts { WithoutPeerId {} } - /// Get the [`PeerId`] specified in a [`DialOpts`] if any. + /// Retrieves the [`PeerId`] from the [`DialOpts`] if specified or otherwise tries to extract it + /// from the multihash in the `/p2p` part of the address, if present. pub fn get_peer_id(&self) -> Option { - self.peer_id + if let Some(peer_id) = self.peer_id { + return Some(peer_id); + } + + let first_address = self.addresses.first()?; + let last_protocol = first_address.iter().last()?; + + if let Protocol::P2p(p) = last_protocol { + return Some(p); + } + + None } /// Get the [`ConnectionId`] of this dial attempt. @@ -94,40 +105,6 @@ impl DialOpts { self.connection_id } - /// Retrieves the [`PeerId`] from the [`DialOpts`] if specified or otherwise tries to parse it - /// from the multihash in the `/p2p` part of the address, if present. - /// - /// Note: A [`Multiaddr`] with something else other than a [`PeerId`] within the `/p2p` protocol is invalid as per specification. - /// Unfortunately, we are not making good use of the type system here. - /// Really, this function should be merged with [`DialOpts::get_peer_id`] above. - /// If it weren't for the parsing error, the function signatures would be the same. - /// - /// See . - pub(crate) fn get_or_parse_peer_id(&self) -> Result, Multihash> { - if let Some(peer_id) = self.peer_id { - return Ok(Some(peer_id)); - } - - let first_address = match self.addresses.first() { - Some(first_address) => first_address, - None => return Ok(None), - }; - - let maybe_peer_id = first_address - .iter() - .last() - .and_then(|p| { - if let Protocol::P2p(ma) = p { - Some(PeerId::try_from(ma)) - } else { - None - } - }) - .transpose()?; - - Ok(maybe_peer_id) - } - pub(crate) fn get_addresses(&self) -> Vec { self.addresses.clone() } @@ -207,9 +184,6 @@ impl WithPeerId { } /// Build the final [`DialOpts`]. - /// - /// Addresses to dial the peer are retrieved via - /// [`NetworkBehaviour::addresses_of_peer`](crate::behaviour::NetworkBehaviour::addresses_of_peer). pub fn build(self) -> DialOpts { DialOpts { peer_id: Some(self.peer_id), @@ -241,7 +215,7 @@ impl WithPeerIdWithAddresses { } /// In addition to the provided addresses, extend the set via - /// [`NetworkBehaviour::addresses_of_peer`](crate::behaviour::NetworkBehaviour::addresses_of_peer). + /// [`NetworkBehaviour::handle_pending_outbound_connection`](crate::behaviour::NetworkBehaviour::handle_pending_outbound_connection). pub fn extend_addresses_through_behaviour(mut self) -> Self { self.extend_addresses_through_behaviour = true; self @@ -337,14 +311,18 @@ impl WithoutPeerIdWithAddress { #[derive(Debug, Copy, Clone, Default)] pub enum PeerCondition { /// A new dialing attempt is initiated _only if_ the peer is currently - /// considered disconnected, i.e. there is no established connection - /// and no ongoing dialing attempt. - #[default] + /// considered disconnected, i.e. there is no established connection. Disconnected, /// A new dialing attempt is initiated _only if_ there is currently /// no ongoing dialing attempt, i.e. the peer is either considered /// disconnected or connected but without an ongoing dialing attempt. NotDialing, + /// A combination of [`Disconnected`](PeerCondition::Disconnected) and + /// [`NotDialing`](PeerCondition::NotDialing). A new dialing attempt is + /// iniated _only if_ the peer is both considered disconnected and there + /// is currently no ongoing dialing attempt. + #[default] + DisconnectedAndNotDialing, /// A new dialing attempt is always initiated, only subject to the /// configured connection limits. Always, diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index 4497540a42b..86df676443b 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -1,15 +1,15 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, PollParameters, ToSwarm}; +use crate::behaviour::{FromSwarm, NetworkBehaviour, ToSwarm}; use crate::connection::ConnectionId; use crate::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use crate::{ - ConnectionDenied, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - SubstreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, + ConnectionDenied, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, THandler, + THandlerInEvent, THandlerOutEvent, }; use libp2p_core::upgrade::DeniedUpgrade; use libp2p_core::Endpoint; -use libp2p_core::{Multiaddr, UpgradeError}; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use std::task::{Context, Poll}; use void::Void; @@ -19,7 +19,7 @@ pub struct Behaviour; impl NetworkBehaviour for Behaviour { type ConnectionHandler = ConnectionHandler; - type OutEvent = Void; + type ToSwarm = Void; fn handle_established_inbound_connection( &mut self, @@ -50,30 +50,11 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } /// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. @@ -81,9 +62,8 @@ impl NetworkBehaviour for Behaviour { pub struct ConnectionHandler; impl crate::handler::ConnectionHandler for ConnectionHandler { - type InEvent = Void; - type OutEvent = Void; - type Error = Void; + type FromBehaviour = Void; + type ToBehaviour = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -93,24 +73,15 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { SubstreamProtocol::new(DeniedUpgrade, ()) } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { void::unreachable(event) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } @@ -132,14 +103,16 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { protocol, .. }) => void::unreachable(protocol), ConnectionEvent::DialUpgradeError(DialUpgradeError { info: _, error }) => match error { - ConnectionHandlerUpgrErr::Timeout => unreachable!(), - ConnectionHandlerUpgrErr::Timer => unreachable!(), - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => void::unreachable(e), - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(_)) => { + StreamUpgradeError::Timeout => unreachable!(), + StreamUpgradeError::Apply(e) => void::unreachable(e), + StreamUpgradeError::NegotiationFailed | StreamUpgradeError::Io(_) => { unreachable!("Denied upgrade does not support any protocols") } }, - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + ConnectionEvent::AddressChange(_) + | ConnectionEvent::ListenUpgradeError(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} } } } diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 1917117c44e..31d2c91e391 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -47,17 +47,22 @@ mod pending; mod select; pub use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, SendWrapper, UpgradeInfoSend}; - -use instant::Instant; -use libp2p_core::{upgrade::UpgradeError, ConnectedPoint, Multiaddr}; -use libp2p_identity::PeerId; -use std::{cmp::Ordering, error, fmt, task::Context, task::Poll, time::Duration}; - pub use map_in::MapInEvent; pub use map_out::MapOutEvent; pub use one_shot::{OneShotHandler, OneShotHandlerConfig}; pub use pending::PendingConnectionHandler; -pub use select::{ConnectionHandlerSelect, IntoConnectionHandlerSelect}; +pub use select::ConnectionHandlerSelect; + +use crate::StreamProtocol; +use ::either::Either; +use libp2p_core::Multiaddr; +use once_cell::sync::Lazy; +use smallvec::SmallVec; +use std::collections::hash_map::RandomState; +use std::collections::hash_set::{Difference, Intersection}; +use std::collections::HashSet; +use std::iter::Peekable; +use std::{error, fmt, io, task::Context, task::Poll, time::Duration}; /// A handler for a set of protocols used on a connection with a remote. /// @@ -93,12 +98,10 @@ pub use select::{ConnectionHandlerSelect, IntoConnectionHandlerSelect}; /// When a connection is closed gracefully, the substreams used by the handler may still /// continue reading data until the remote closes its side of the connection. pub trait ConnectionHandler: Send + 'static { - /// Custom event that can be received from the outside. - type InEvent: fmt::Debug + Send + 'static; - /// Custom event that can be produced by the handler and that will be returned to the outside. - type OutEvent: fmt::Debug + Send + 'static; - /// The type of errors returned by [`ConnectionHandler::poll`]. - type Error: error::Error + fmt::Debug + Send + 'static; + /// A type representing the message(s) a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) can send to a [`ConnectionHandler`] via [`ToSwarm::NotifyHandler`](crate::behaviour::ToSwarm::NotifyHandler) + type FromBehaviour: fmt::Debug + Send + 'static; + /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. + type ToBehaviour: fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; /// The outbound upgrade for the protocol(s) used by the handler. @@ -117,46 +120,58 @@ pub trait ConnectionHandler: Send + 'static { /// > This allows a remote to put the list of supported protocols in a cache. fn listen_protocol(&self) -> SubstreamProtocol; - /// Returns until when the connection should be kept alive. + /// Returns whether the connection should be kept alive. + /// + /// ## Keep alive algorithm /// - /// This method is called by the `Swarm` after each invocation of - /// [`ConnectionHandler::poll`] to determine if the connection and the associated - /// [`ConnectionHandler`]s should be kept alive as far as this handler is concerned - /// and if so, for how long. + /// A connection is always kept alive: /// - /// Returning [`KeepAlive::No`] indicates that the connection should be - /// closed and this handler destroyed immediately. + /// - Whilst a [`ConnectionHandler`] returns [`Poll::Ready`]. + /// - We are negotiating inbound or outbound streams. + /// - There are active [`Stream`](crate::Stream)s on the connection. /// - /// Returning [`KeepAlive::Until`] indicates that the connection may be closed - /// and this handler destroyed after the specified `Instant`. + /// The combination of the above means that _most_ protocols will not need to override this method. + /// This method is only invoked when all of the above are `false`, i.e. when the connection is entirely idle. /// - /// Returning [`KeepAlive::Yes`] indicates that the connection should - /// be kept alive until the next call to this method. + /// ## Exceptions /// - /// > **Note**: The connection is always closed and the handler destroyed - /// > when [`ConnectionHandler::poll`] returns an error. Furthermore, the - /// > connection may be closed for reasons outside of the control - /// > of the handler. - fn connection_keep_alive(&self) -> KeepAlive; + /// - Protocols like [circuit-relay v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) need to keep a connection alive beyond these circumstances and can thus override this method. + /// - Protocols like [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) **don't** want to keep a connection alive despite an active streams. + /// In that case, protocol authors can use [`Stream::ignore_for_keep_alive`](crate::Stream::ignore_for_keep_alive) to opt-out a particular stream from the keep-alive algorithm. + fn connection_keep_alive(&self) -> bool { + false + } /// Should behave like `Stream::poll()`. fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, >; + /// Gracefully close the [`ConnectionHandler`]. + /// + /// The contract for this function is equivalent to a [`Stream`](futures::Stream). + /// When a connection is being shut down, we will first poll this function to completion. + /// Following that, the physical connection will be shut down. + /// + /// This is also called when the shutdown was initiated due to an error on the connection. + /// We therefore cannot guarantee that performing IO within here will succeed. + /// + /// To signal completion, [`Poll::Ready(None)`] should be returned. + /// + /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. + /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. + fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(None) + } + /// Adds a closure that turns the input event into something else. fn map_in_event(self, map: TMap) -> MapInEvent where Self: Sized, - TMap: Fn(&TNewIn) -> Option<&Self::InEvent>, + TMap: Fn(&TNewIn) -> Option<&Self::FromBehaviour>, { MapInEvent::new(self, map) } @@ -165,17 +180,13 @@ pub trait ConnectionHandler: Send + 'static { fn map_out_event(self, map: TMap) -> MapOutEvent where Self: Sized, - TMap: FnMut(Self::OutEvent) -> TNewOut, + TMap: FnMut(Self::ToBehaviour) -> TNewOut, { MapOutEvent::new(self, map) } /// Creates a new [`ConnectionHandler`] that selects either this handler or /// `other` by delegating methods calls appropriately. - /// - /// > **Note**: The largest `KeepAlive` returned by the two handlers takes precedence, - /// > i.e. is returned from [`ConnectionHandler::connection_keep_alive`] by the returned - /// > handler. fn select(self, other: TProto2) -> ConnectionHandlerSelect where Self: Sized, @@ -184,7 +195,7 @@ pub trait ConnectionHandler: Send + 'static { } /// Informs the handler about an event from the [`NetworkBehaviour`](super::NetworkBehaviour). - fn on_behaviour_event(&mut self, _event: Self::InEvent); + fn on_behaviour_event(&mut self, _event: Self::FromBehaviour); fn on_connection_event( &mut self, @@ -199,6 +210,7 @@ pub trait ConnectionHandler: Send + 'static { /// Enumeration with the list of the possible stream events /// to pass to [`on_connection_event`](ConnectionHandler::on_connection_event). +#[non_exhaustive] pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> { /// Informs the handler about the output of a successful upgrade on a new inbound substream. FullyNegotiatedInbound(FullyNegotiatedInbound), @@ -210,6 +222,46 @@ pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IO DialUpgradeError(DialUpgradeError), /// Informs the handler that upgrading an inbound substream to the given protocol has failed. ListenUpgradeError(ListenUpgradeError), + /// The local [`ConnectionHandler`] added or removed support for one or more protocols. + LocalProtocolsChange(ProtocolsChange<'a>), + /// The remote [`ConnectionHandler`] now supports a different set of protocols. + RemoteProtocolsChange(ProtocolsChange<'a>), +} + +impl<'a, IP, OP, IOI, OOI> fmt::Debug for ConnectionEvent<'a, IP, OP, IOI, OOI> +where + IP: InboundUpgradeSend + fmt::Debug, + IP::Output: fmt::Debug, + IP::Error: fmt::Debug, + OP: OutboundUpgradeSend + fmt::Debug, + OP::Output: fmt::Debug, + OP::Error: fmt::Debug, + IOI: fmt::Debug, + OOI: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConnectionEvent::FullyNegotiatedInbound(v) => { + f.debug_tuple("FullyNegotiatedInbound").field(v).finish() + } + ConnectionEvent::FullyNegotiatedOutbound(v) => { + f.debug_tuple("FullyNegotiatedOutbound").field(v).finish() + } + ConnectionEvent::AddressChange(v) => f.debug_tuple("AddressChange").field(v).finish(), + ConnectionEvent::DialUpgradeError(v) => { + f.debug_tuple("DialUpgradeError").field(v).finish() + } + ConnectionEvent::ListenUpgradeError(v) => { + f.debug_tuple("ListenUpgradeError").field(v).finish() + } + ConnectionEvent::LocalProtocolsChange(v) => { + f.debug_tuple("LocalProtocolsChange").field(v).finish() + } + ConnectionEvent::RemoteProtocolsChange(v) => { + f.debug_tuple("RemoteProtocolsChange").field(v).finish() + } + } + } } impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> @@ -223,22 +275,22 @@ impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> } ConnectionEvent::FullyNegotiatedInbound(_) | ConnectionEvent::AddressChange(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) | ConnectionEvent::ListenUpgradeError(_) => false, } } /// Whether the event concerns an inbound stream. pub fn is_inbound(&self) -> bool { - // Note: This will get simpler with https://github.com/libp2p/rust-libp2p/pull/3605. match self { - ConnectionEvent::FullyNegotiatedInbound(_) - | ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(_)), // Only `Select` is relevant, the others may be for other handlers too. - .. - }) => true, + ConnectionEvent::FullyNegotiatedInbound(_) | ConnectionEvent::ListenUpgradeError(_) => { + true + } ConnectionEvent::FullyNegotiatedOutbound(_) - | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::AddressChange(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) | ConnectionEvent::DialUpgradeError(_) => false, } } @@ -252,6 +304,7 @@ impl<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> /// of simultaneously open negotiated inbound substreams. In other words it is up to the /// [`ConnectionHandler`] implementation to stop a malicious remote node to open and keep alive /// an excessive amount of inbound substreams. +#[derive(Debug)] pub struct FullyNegotiatedInbound { pub protocol: IP::Output, pub info: IOI, @@ -261,28 +314,148 @@ pub struct FullyNegotiatedInbound { /// /// The `protocol` field is the information that was previously passed to /// [`ConnectionHandlerEvent::OutboundSubstreamRequest`]. +#[derive(Debug)] pub struct FullyNegotiatedOutbound { pub protocol: OP::Output, pub info: OOI, } /// [`ConnectionEvent`] variant that informs the handler about a change in the address of the remote. +#[derive(Debug)] pub struct AddressChange<'a> { pub new_address: &'a Multiaddr, } +/// [`ConnectionEvent`] variant that informs the handler about a change in the protocols supported on the connection. +#[derive(Debug, Clone)] +pub enum ProtocolsChange<'a> { + Added(ProtocolsAdded<'a>), + Removed(ProtocolsRemoved<'a>), +} + +impl<'a> ProtocolsChange<'a> { + /// Compute the [`ProtocolsChange`] that results from adding `to_add` to `existing_protocols`. + /// + /// Returns `None` if the change is a no-op, i.e. `to_add` is a subset of `existing_protocols`. + pub(crate) fn add( + existing_protocols: &'a HashSet, + to_add: &'a HashSet, + ) -> Option { + let mut actually_added_protocols = to_add.difference(existing_protocols).peekable(); + + actually_added_protocols.peek()?; + + Some(ProtocolsChange::Added(ProtocolsAdded { + protocols: actually_added_protocols, + })) + } + + /// Compute the [`ProtocolsChange`] that results from removing `to_remove` from `existing_protocols`. + /// + /// Returns `None` if the change is a no-op, i.e. none of the protocols in `to_remove` are in `existing_protocols`. + pub(crate) fn remove( + existing_protocols: &'a HashSet, + to_remove: &'a HashSet, + ) -> Option { + let mut actually_removed_protocols = existing_protocols.intersection(to_remove).peekable(); + + actually_removed_protocols.peek()?; + + Some(ProtocolsChange::Removed(ProtocolsRemoved { + protocols: Either::Right(actually_removed_protocols), + })) + } + + /// Compute the [`ProtocolsChange`]s required to go from `existing_protocols` to `new_protocols`. + pub(crate) fn from_full_sets( + existing_protocols: &'a HashSet, + new_protocols: &'a HashSet, + ) -> SmallVec<[Self; 2]> { + if existing_protocols == new_protocols { + return SmallVec::new(); + } + + let mut changes = SmallVec::new(); + + let mut added_protocols = new_protocols.difference(existing_protocols).peekable(); + let mut removed_protocols = existing_protocols.difference(new_protocols).peekable(); + + if added_protocols.peek().is_some() { + changes.push(ProtocolsChange::Added(ProtocolsAdded { + protocols: added_protocols, + })); + } + + if removed_protocols.peek().is_some() { + changes.push(ProtocolsChange::Removed(ProtocolsRemoved { + protocols: Either::Left(removed_protocols), + })); + } + + changes + } +} + +/// An [`Iterator`] over all protocols that have been added. +#[derive(Debug, Clone)] +pub struct ProtocolsAdded<'a> { + protocols: Peekable>, +} + +impl<'a> ProtocolsAdded<'a> { + pub(crate) fn from_set(protocols: &'a HashSet) -> Self { + ProtocolsAdded { + protocols: protocols.difference(&EMPTY_HASHSET).peekable(), + } + } +} + +/// An [`Iterator`] over all protocols that have been removed. +#[derive(Debug, Clone)] +pub struct ProtocolsRemoved<'a> { + protocols: Either< + Peekable>, + Peekable>, + >, +} + +impl<'a> ProtocolsRemoved<'a> { + #[cfg(test)] + pub(crate) fn from_set(protocols: &'a HashSet) -> Self { + ProtocolsRemoved { + protocols: Either::Left(protocols.difference(&EMPTY_HASHSET).peekable()), + } + } +} + +impl<'a> Iterator for ProtocolsAdded<'a> { + type Item = &'a StreamProtocol; + fn next(&mut self) -> Option { + self.protocols.next() + } +} + +impl<'a> Iterator for ProtocolsRemoved<'a> { + type Item = &'a StreamProtocol; + fn next(&mut self) -> Option { + self.protocols.next() + } +} + /// [`ConnectionEvent`] variant that informs the handler /// that upgrading an outbound substream to the given protocol has failed. +#[derive(Debug)] pub struct DialUpgradeError { pub info: OOI, - pub error: ConnectionHandlerUpgrErr, + pub error: StreamUpgradeError, } /// [`ConnectionEvent`] variant that informs the handler /// that upgrading an inbound substream to the given protocol has failed. +#[derive(Debug)] pub struct ListenUpgradeError { pub info: IOI, - pub error: ConnectionHandlerUpgrErr, + pub error: IP::Error, } /// Configuration of inbound or outbound substream protocol(s) @@ -362,38 +535,39 @@ impl SubstreamProtocol { } /// Event produced by a handler. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ConnectionHandlerEvent { +#[derive(Debug, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum ConnectionHandlerEvent { /// Request a new outbound substream to be opened with the remote. OutboundSubstreamRequest { /// The protocol(s) to apply on the substream. protocol: SubstreamProtocol, }, + /// We learned something about the protocols supported by the remote. + ReportRemoteProtocols(ProtocolSupport), - /// Close the connection for the given reason. - /// - /// Note this will affect all [`ConnectionHandler`]s handling this - /// connection, in other words it will close the connection for all - /// [`ConnectionHandler`]s. To signal that one has no more need for the - /// connection, while allowing other [`ConnectionHandler`]s to continue using - /// the connection, return [`KeepAlive::No`] in - /// [`ConnectionHandler::connection_keep_alive`]. - Close(TErr), - - /// Other event. - Custom(TCustom), + /// Event that is sent to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour). + NotifyBehaviour(TCustom), +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ProtocolSupport { + /// The remote now supports these additional protocols. + Added(HashSet), + /// The remote no longer supports these protocols. + Removed(HashSet), } /// Event produced by a handler. -impl - ConnectionHandlerEvent +impl + ConnectionHandlerEvent { /// If this is an `OutboundSubstreamRequest`, maps the `info` member from a /// `TOutboundOpenInfo` to something else. pub fn map_outbound_open_info( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TOutboundOpenInfo) -> I, { @@ -403,17 +577,18 @@ impl protocol: protocol.map_info(map), } } - ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(val), - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), + ConnectionHandlerEvent::NotifyBehaviour(val) => { + ConnectionHandlerEvent::NotifyBehaviour(val) + } + ConnectionHandlerEvent::ReportRemoteProtocols(support) => { + ConnectionHandlerEvent::ReportRemoteProtocols(support) + } } } /// If this is an `OutboundSubstreamRequest`, maps the protocol (`TConnectionUpgrade`) /// to something else. - pub fn map_protocol( - self, - map: F, - ) -> ConnectionHandlerEvent + pub fn map_protocol(self, map: F) -> ConnectionHandlerEvent where F: FnOnce(TConnectionUpgrade) -> I, { @@ -423,8 +598,12 @@ impl protocol: protocol.map_upgrade(map), } } - ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(val), - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), + ConnectionHandlerEvent::NotifyBehaviour(val) => { + ConnectionHandlerEvent::NotifyBehaviour(val) + } + ConnectionHandlerEvent::ReportRemoteProtocols(support) => { + ConnectionHandlerEvent::ReportRemoteProtocols(support) + } } } @@ -432,7 +611,7 @@ impl pub fn map_custom( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TCustom) -> I, { @@ -440,166 +619,77 @@ impl ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } } - ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(map(val)), - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), - } - } - - /// If this is a `Close` event, maps the content to something else. - pub fn map_close( - self, - map: F, - ) -> ConnectionHandlerEvent - where - F: FnOnce(TErr) -> I, - { - match self { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } + ConnectionHandlerEvent::NotifyBehaviour(val) => { + ConnectionHandlerEvent::NotifyBehaviour(map(val)) + } + ConnectionHandlerEvent::ReportRemoteProtocols(support) => { + ConnectionHandlerEvent::ReportRemoteProtocols(support) } - ConnectionHandlerEvent::Custom(val) => ConnectionHandlerEvent::Custom(val), - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(map(val)), } } } /// Error that can happen on an outbound substream opening attempt. #[derive(Debug)] -pub enum ConnectionHandlerUpgrErr { +pub enum StreamUpgradeError { /// The opening attempt timed out before the negotiation was fully completed. Timeout, - /// There was an error in the timer used. - Timer, - /// Error while upgrading the substream to the protocol we want. - Upgrade(UpgradeError), + /// The upgrade produced an error. + Apply(TUpgrErr), + /// No protocol could be agreed upon. + NegotiationFailed, + /// An IO or otherwise unrecoverable error happened. + Io(io::Error), } -impl ConnectionHandlerUpgrErr { - /// Map the inner [`UpgradeError`] type. - pub fn map_upgrade_err(self, f: F) -> ConnectionHandlerUpgrErr +impl StreamUpgradeError { + /// Map the inner [`StreamUpgradeError`] type. + pub fn map_upgrade_err(self, f: F) -> StreamUpgradeError where - F: FnOnce(UpgradeError) -> UpgradeError, + F: FnOnce(TUpgrErr) -> E, { match self { - ConnectionHandlerUpgrErr::Timeout => ConnectionHandlerUpgrErr::Timeout, - ConnectionHandlerUpgrErr::Timer => ConnectionHandlerUpgrErr::Timer, - ConnectionHandlerUpgrErr::Upgrade(e) => ConnectionHandlerUpgrErr::Upgrade(f(e)), + StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, + StreamUpgradeError::Apply(e) => StreamUpgradeError::Apply(f(e)), + StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, + StreamUpgradeError::Io(e) => StreamUpgradeError::Io(e), } } } -impl fmt::Display for ConnectionHandlerUpgrErr +impl fmt::Display for StreamUpgradeError where TUpgrErr: error::Error + 'static, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ConnectionHandlerUpgrErr::Timeout => { + StreamUpgradeError::Timeout => { write!(f, "Timeout error while opening a substream") } - ConnectionHandlerUpgrErr::Timer => { - write!(f, "Timer error while opening a substream") - } - ConnectionHandlerUpgrErr::Upgrade(err) => { - write!(f, "Upgrade: ")?; + StreamUpgradeError::Apply(err) => { + write!(f, "Apply: ")?; crate::print_error_chain(f, err) } + StreamUpgradeError::NegotiationFailed => { + write!(f, "no protocols could be agreed upon") + } + StreamUpgradeError::Io(e) => { + write!(f, "IO error: ")?; + crate::print_error_chain(f, e) + } } } } -impl error::Error for ConnectionHandlerUpgrErr +impl error::Error for StreamUpgradeError where TUpgrErr: error::Error + 'static, { fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - ConnectionHandlerUpgrErr::Timeout => None, - ConnectionHandlerUpgrErr::Timer => None, - ConnectionHandlerUpgrErr::Upgrade(_) => None, - } + None } } -/// Prototype for a [`ConnectionHandler`]. -#[deprecated( - note = "Implement `ConnectionHandler` directly and use `NetworkBehaviour::{handle_pending_inbound_connection,handle_pending_outbound_connection}` to handle pending connections." -)] -pub trait IntoConnectionHandler: Send + 'static { - /// The protocols handler. - type Handler: ConnectionHandler; - - /// Builds the protocols handler. - /// - /// The `PeerId` is the id of the node the handler is going to handle. - fn into_handler( - self, - remote_peer_id: &PeerId, - connected_point: &ConnectedPoint, - ) -> Self::Handler; - - /// Return the handler's inbound protocol. - fn inbound_protocol(&self) -> ::InboundProtocol; - - /// Builds an implementation of [`IntoConnectionHandler`] that handles both this protocol and the - /// other one together. - fn select(self, other: TProto2) -> IntoConnectionHandlerSelect - where - Self: Sized, - { - IntoConnectionHandlerSelect::new(self, other) - } -} - -#[allow(deprecated)] -impl IntoConnectionHandler for T -where - T: ConnectionHandler, -{ - type Handler = Self; - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self { - self - } - - fn inbound_protocol(&self) -> ::InboundProtocol { - self.listen_protocol().into_upgrade().0 - } -} - -/// How long the connection should be kept alive. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum KeepAlive { - /// If nothing new happens, the connection should be closed at the given `Instant`. - Until(Instant), - /// Keep the connection alive. - Yes, - /// Close the connection as soon as possible. - No, -} - -impl KeepAlive { - /// Returns true for `Yes`, false otherwise. - pub fn is_yes(&self) -> bool { - matches!(*self, KeepAlive::Yes) - } -} - -impl PartialOrd for KeepAlive { - fn partial_cmp(&self, other: &KeepAlive) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for KeepAlive { - fn cmp(&self, other: &KeepAlive) -> Ordering { - use self::KeepAlive::*; - - match (self, other) { - (No, No) | (Yes, Yes) => Ordering::Equal, - (No, _) | (_, Yes) => Ordering::Less, - (_, No) | (Yes, _) => Ordering::Greater, - (Until(t1), Until(t2)) => t1.cmp(t2), - } - } -} +/// A statically declared, empty [`HashSet`] allows us to work around borrow-checker rules for +/// [`ProtocolsAdded::from_set`]. The lifetimes don't work unless we have a [`HashSet`] with a `'static' lifetime. +static EMPTY_HASHSET: Lazy> = Lazy::new(HashSet::new); diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index 92d82371163..a5aab9b5fee 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -18,81 +18,15 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -#[allow(deprecated)] -use crate::handler::IntoConnectionHandler; use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - InboundUpgradeSend, KeepAlive, ListenUpgradeError, SubstreamProtocol, + InboundUpgradeSend, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; -use crate::ConnectionHandlerUpgrErr; use either::Either; use futures::future; -use libp2p_core::{ConnectedPoint, UpgradeError}; -use libp2p_identity::PeerId; use std::task::{Context, Poll}; -/// Auxiliary type to allow implementing [`IntoConnectionHandler`]. As [`IntoConnectionHandler`] is -/// already implemented for T, we cannot implement it for Either. -pub enum IntoEitherHandler { - Left(L), - Right(R), -} - -/// Implementation of a [`IntoConnectionHandler`] that represents either of two [`IntoConnectionHandler`] -/// implementations. -#[allow(deprecated)] -impl IntoConnectionHandler for IntoEitherHandler -where - L: IntoConnectionHandler, - R: IntoConnectionHandler, -{ - type Handler = Either; - - fn into_handler(self, p: &PeerId, c: &ConnectedPoint) -> Self::Handler { - match self { - IntoEitherHandler::Left(into_handler) => Either::Left(into_handler.into_handler(p, c)), - IntoEitherHandler::Right(into_handler) => { - Either::Right(into_handler.into_handler(p, c)) - } - } - } - - fn inbound_protocol(&self) -> ::InboundProtocol { - match self { - IntoEitherHandler::Left(into_handler) => { - Either::Left(SendWrapper(into_handler.inbound_protocol())) - } - IntoEitherHandler::Right(into_handler) => { - Either::Right(SendWrapper(into_handler.inbound_protocol())) - } - } - } -} - -// Taken from https://github.com/bluss/either. -impl IntoEitherHandler { - /// Returns the left value. - pub fn unwrap_left(self) -> L { - match self { - IntoEitherHandler::Left(l) => l, - IntoEitherHandler::Right(_) => { - panic!("called `IntoEitherHandler::unwrap_left()` on a `Right` value.",) - } - } - } - - /// Returns the right value. - pub fn unwrap_right(self) -> R { - match self { - IntoEitherHandler::Right(r) => r, - IntoEitherHandler::Left(_) => { - panic!("called `IntoEitherHandler::unwrap_right()` on a `Left` value.",) - } - } - } -} - impl FullyNegotiatedInbound, SendWrapper>, Either> where @@ -125,61 +59,13 @@ where fn transpose(self) -> Either, ListenUpgradeError> { match self { ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Left(error))), - info: Either::Left(info), - } => Either::Left(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(error)), - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Right(error))), - info: Either::Right(info), - } => Either::Right(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(error)), - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(error)), - info: Either::Left(info), - } => Either::Left(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(error)), - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(error)), - info: Either::Right(info), - } => Either::Right(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(error)), - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timer, - info: Either::Left(info), - } => Either::Left(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timer, - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timer, - info: Either::Right(info), - } => Either::Right(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timer, - info, - }), - ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timeout, + error: Either::Left(error), info: Either::Left(info), - } => Either::Left(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timeout, - info, - }), + } => Either::Left(ListenUpgradeError { error, info }), ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timeout, + error: Either::Right(error), info: Either::Right(info), - } => Either::Right(ListenUpgradeError { - error: ConnectionHandlerUpgrErr::Timeout, - info, - }), + } => Either::Right(ListenUpgradeError { error, info }), _ => unreachable!(), } } @@ -192,9 +78,8 @@ where L: ConnectionHandler, R: ConnectionHandler, { - type InEvent = Either; - type OutEvent = Either; - type Error = Either; + type FromBehaviour = Either; + type ToBehaviour = Either; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = Either, SendWrapper>; @@ -214,7 +99,7 @@ where } } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match (self, event) { (Either::Left(handler), Either::Left(event)) => handler.on_behaviour_event(event), (Either::Right(handler), Either::Right(event)) => handler.on_behaviour_event(event), @@ -222,7 +107,7 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { match self { Either::Left(handler) => handler.connection_keep_alive(), Either::Right(handler) => handler.connection_keep_alive(), @@ -233,22 +118,15 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { let event = match self { Either::Left(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Left) - .map_close(Either::Left) .map_protocol(|p| Either::Left(SendWrapper(p))) .map_outbound_open_info(Either::Left), Either::Right(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Right) - .map_close(Either::Right) .map_protocol(|p| Either::Right(SendWrapper(p))) .map_outbound_open_info(Either::Right), }; @@ -256,6 +134,15 @@ where Poll::Ready(event) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let event = match self { + Either::Left(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Left), + Either::Right(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Right), + }; + + Poll::Ready(event) + } + fn on_connection_event( &mut self, event: ConnectionEvent< @@ -322,6 +209,22 @@ where handler.on_connection_event(ConnectionEvent::AddressChange(address_change)) } }, + ConnectionEvent::LocalProtocolsChange(supported_protocols) => match self { + Either::Left(handler) => handler.on_connection_event( + ConnectionEvent::LocalProtocolsChange(supported_protocols), + ), + Either::Right(handler) => handler.on_connection_event( + ConnectionEvent::LocalProtocolsChange(supported_protocols), + ), + }, + ConnectionEvent::RemoteProtocolsChange(supported_protocols) => match self { + Either::Left(handler) => handler.on_connection_event( + ConnectionEvent::RemoteProtocolsChange(supported_protocols), + ), + Either::Right(handler) => handler.on_connection_event( + ConnectionEvent::RemoteProtocolsChange(supported_protocols), + ), + }, } } } diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index 3564de919bb..9316ef4d2ce 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -19,11 +19,12 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; use std::{fmt::Debug, marker::PhantomData, task::Context, task::Poll}; /// Wrapper around a protocol handler that turns the input event into something else. +#[derive(Debug)] pub struct MapInEvent { inner: TConnectionHandler, map: TMap, @@ -45,13 +46,12 @@ impl ConnectionHandler for MapInEvent where TConnectionHandler: ConnectionHandler, - TMap: Fn(TNewIn) -> Option, + TMap: Fn(TNewIn) -> Option, TNewIn: Debug + Send + 'static, TMap: Send + 'static, { - type InEvent = TNewIn; - type OutEvent = TConnectionHandler::OutEvent; - type Error = TConnectionHandler::Error; + type FromBehaviour = TNewIn; + type ToBehaviour = TConnectionHandler::ToBehaviour; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -67,7 +67,7 @@ where } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner.connection_keep_alive() } @@ -75,16 +75,15 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_close(cx) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index 773df2b6681..f877bfa6f64 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -19,12 +19,14 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, KeepAlive, SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; +use futures::ready; use std::fmt::Debug; use std::task::{Context, Poll}; /// Wrapper around a protocol handler that turns the output event into something else. +#[derive(Debug)] pub struct MapOutEvent { inner: TConnectionHandler, map: TMap, @@ -40,13 +42,12 @@ impl MapOutEvent { impl ConnectionHandler for MapOutEvent where TConnectionHandler: ConnectionHandler, - TMap: FnMut(TConnectionHandler::OutEvent) -> TNewOut, + TMap: FnMut(TConnectionHandler::ToBehaviour) -> TNewOut, TNewOut: Debug + Send + 'static, TMap: Send + 'static, { - type InEvent = TConnectionHandler::InEvent; - type OutEvent = TNewOut; - type Error = TConnectionHandler::Error; + type FromBehaviour = TConnectionHandler::FromBehaviour; + type ToBehaviour = TNewOut; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -56,11 +57,11 @@ where self.inner.listen_protocol() } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { self.inner.on_behaviour_event(event) } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.inner.connection_keep_alive() } @@ -68,22 +69,29 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx).map(|ev| match ev { - ConnectionHandlerEvent::Custom(ev) => ConnectionHandlerEvent::Custom((self.map)(ev)), - ConnectionHandlerEvent::Close(err) => ConnectionHandlerEvent::Close(err), + ConnectionHandlerEvent::NotifyBehaviour(ev) => { + ConnectionHandlerEvent::NotifyBehaviour((self.map)(ev)) + } ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } } + ConnectionHandlerEvent::ReportRemoteProtocols(support) => { + ConnectionHandlerEvent::ReportRemoteProtocols(support) + } }) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(e) = ready!(self.inner.poll_close(cx)) else { + return Poll::Ready(None); + }; + + Poll::Ready(Some((self.map)(e))) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index e14c75376cc..0b4549ed733 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -21,19 +21,13 @@ //! A [`ConnectionHandler`] implementation that combines multiple other [`ConnectionHandler`]s //! indexed by some key. -#[allow(deprecated)] -use crate::handler::IntoConnectionHandler; use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, - ConnectionHandlerUpgrErr, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - KeepAlive, ListenUpgradeError, SubstreamProtocol, + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, SubstreamProtocol, }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; -use crate::NegotiatedSubstream; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::upgrade::{NegotiationError, ProtocolError, UpgradeError}; -use libp2p_core::ConnectedPoint; -use libp2p_identity::PeerId; +use crate::Stream; +use futures::{future::BoxFuture, prelude::*, ready}; use rand::Rng; use std::{ cmp, @@ -89,128 +83,20 @@ where fn on_listen_upgrade_error( &mut self, - ListenUpgradeError { error, mut info }: ListenUpgradeError< + ListenUpgradeError { + error: (key, error), + mut info, + }: ListenUpgradeError< ::InboundOpenInfo, ::InboundProtocol, >, ) { - match error { - ConnectionHandlerUpgrErr::Timer => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Timer, - }, - )); - } - } - } - ConnectionHandlerUpgrErr::Timeout => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Timeout, - }, - )); - } - } - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), - }, - )); - } - } - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::ProtocolError(e), - )) => match e { - ProtocolError::IoError(e) => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - let e = NegotiationError::ProtocolError(ProtocolError::IoError( - e.kind().into(), - )); - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - e, - )), - }, - )); - } - } - } - ProtocolError::InvalidMessage => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - let e = NegotiationError::ProtocolError(ProtocolError::InvalidMessage); - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - e, - )), - }, - )); - } - } - } - ProtocolError::InvalidProtocol => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - let e = NegotiationError::ProtocolError(ProtocolError::InvalidProtocol); - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - e, - )), - }, - )); - } - } - } - ProtocolError::TooManyProtocols => { - for (k, h) in &mut self.handlers { - if let Some(i) = info.take(k) { - let e = - NegotiationError::ProtocolError(ProtocolError::TooManyProtocols); - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - e, - )), - }, - )); - } - } - } - }, - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply((k, e))) => { - if let Some(h) = self.handlers.get_mut(&k) { - if let Some(i) = info.take(&k) { - h.on_connection_event(ConnectionEvent::ListenUpgradeError( - ListenUpgradeError { - info: i, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)), - }, - )); - } - } + if let Some(h) = self.handlers.get_mut(&key) { + if let Some(i) = info.take(&key) { + h.on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { + info: i, + error, + })); } } } @@ -223,9 +109,8 @@ where H::InboundProtocol: InboundUpgradeSend, H::OutboundProtocol: OutboundUpgradeSend, { - type InEvent = (K, ::InEvent); - type OutEvent = (K, ::OutEvent); - type Error = ::Error; + type FromBehaviour = (K, ::FromBehaviour); + type ToBehaviour = (K, ::ToBehaviour); type InboundProtocol = Upgrade::InboundProtocol>; type OutboundProtocol = ::OutboundProtocol; type InboundOpenInfo = Info::InboundOpenInfo>; @@ -275,7 +160,7 @@ where }, )); } else { - log::error!("FullyNegotiatedOutbound: no handler for key") + tracing::error!("FullyNegotiatedOutbound: no handler for key") } } ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -292,7 +177,7 @@ where )); } } else { - log::error!("FullyNegotiatedInbound: no handler for key") + tracing::error!("FullyNegotiatedInbound: no handler for key") } } ConnectionEvent::AddressChange(AddressChange { new_address }) => { @@ -312,41 +197,50 @@ where error, })); } else { - log::error!("DialUpgradeError: no handler for protocol") + tracing::error!("DialUpgradeError: no handler for protocol") } } ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } + ConnectionEvent::LocalProtocolsChange(supported_protocols) => { + for h in self.handlers.values_mut() { + h.on_connection_event(ConnectionEvent::LocalProtocolsChange( + supported_protocols.clone(), + )); + } + } + ConnectionEvent::RemoteProtocolsChange(supported_protocols) => { + for h in self.handlers.values_mut() { + h.on_connection_event(ConnectionEvent::RemoteProtocolsChange( + supported_protocols.clone(), + )); + } + } } } - fn on_behaviour_event(&mut self, (key, event): Self::InEvent) { + fn on_behaviour_event(&mut self, (key, event): Self::FromBehaviour) { if let Some(h) = self.handlers.get_mut(&key) { h.on_behaviour_event(event) } else { - log::error!("on_behaviour_event: no handler for key") + tracing::error!("on_behaviour_event: no handler for key") } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { self.handlers .values() .map(|h| h.connection_keep_alive()) .max() - .unwrap_or(KeepAlive::No) + .unwrap_or(false) } fn poll( &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to avoid // that situation. @@ -377,6 +271,17 @@ where Poll::Pending } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + for (k, h) in self.handlers.iter_mut() { + let Some(e) = ready!(h.poll_close(cx)) else { + continue; + }; + return Poll::Ready(Some((k.clone(), e))); + } + + Poll::Ready(None) + } } /// Split [`MultiHandler`] into parts. @@ -389,76 +294,6 @@ impl IntoIterator for MultiHandler { } } -/// A [`IntoConnectionHandler`] for multiple other `IntoConnectionHandler`s. -#[derive(Clone)] -#[deprecated(note = "Use `MultiHandler` directly.")] -pub struct IntoMultiHandler { - handlers: HashMap, -} - -#[allow(deprecated)] -impl fmt::Debug for IntoMultiHandler -where - K: fmt::Debug + Eq + Hash, - H: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("IntoMultiHandler") - .field("handlers", &self.handlers) - .finish() - } -} - -#[allow(deprecated)] -impl IntoMultiHandler -where - K: Hash + Eq, - H: IntoConnectionHandler, -{ - /// Create and populate an `IntoMultiHandler` from the given iterator. - /// - /// It is an error for any two protocols handlers to share the same protocol name. - pub fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - let m = IntoMultiHandler { - handlers: HashMap::from_iter(iter), - }; - uniq_proto_names(m.handlers.values().map(|h| h.inbound_protocol()))?; - Ok(m) - } -} - -#[allow(deprecated)] -impl IntoConnectionHandler for IntoMultiHandler -where - K: Debug + Clone + Eq + Hash + Send + 'static, - H: IntoConnectionHandler, -{ - type Handler = MultiHandler; - - fn into_handler(self, p: &PeerId, c: &ConnectedPoint) -> Self::Handler { - MultiHandler { - handlers: self - .handlers - .into_iter() - .map(|(k, h)| (k, h.into_handler(p, c))) - .collect(), - } - } - - fn inbound_protocol(&self) -> ::InboundProtocol { - Upgrade { - upgrades: self - .handlers - .iter() - .map(|(k, h)| (k.clone(), h.inbound_protocol())) - .collect(), - } - } -} - /// Index and protocol name pair used as `UpgradeInfo::Info`. #[derive(Debug, Clone)] pub struct IndexedProtoName(usize, H); @@ -542,7 +377,7 @@ where type Error = (K, ::Error); type Future = BoxFuture<'static, Result>; - fn upgrade_inbound(mut self, resource: NegotiatedSubstream, info: Self::Info) -> Self::Future { + fn upgrade_inbound(mut self, resource: Stream, info: Self::Info) -> Self::Future { let IndexedProtoName(index, info) = info; let (key, upgrade) = self.upgrades.remove(index); upgrade @@ -564,7 +399,7 @@ where type Error = (K, ::Error); type Future = BoxFuture<'static, Result>; - fn upgrade_outbound(mut self, resource: NegotiatedSubstream, info: Self::Info) -> Self::Future { + fn upgrade_outbound(mut self, resource: Stream, info: Self::Info) -> Self::Future { let IndexedProtoName(index, info) = info; let (key, upgrade) = self.upgrades.remove(index); upgrade diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index 29ba45ab678..b1fc41e9098 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -19,12 +19,11 @@ // DEALINGS IN THE SOFTWARE. use crate::handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, - DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, - SubstreamProtocol, + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, SubstreamProtocol, }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend}; -use instant::Instant; +use crate::StreamUpgradeError; use smallvec::SmallVec; use std::{error, fmt::Debug, task::Context, task::Poll, time::Duration}; @@ -36,16 +35,12 @@ where { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, - /// If `Some`, something bad happened and we should shut down the handler with an error. - pending_error: Option::Error>>, /// Queue of events to produce in `poll()`. - events_out: SmallVec<[TEvent; 4]>, + events_out: SmallVec<[Result>; 4]>, /// Queue of outbound substreams to open. dial_queue: SmallVec<[TOutbound; 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, - /// Value to return from `connection_keep_alive`. - keep_alive: KeepAlive, /// The configuration container for the handler config: OneShotHandlerConfig, } @@ -61,11 +56,9 @@ where ) -> Self { OneShotHandler { listen_protocol, - pending_error: None, events_out: SmallVec::new(), dial_queue: SmallVec::new(), dial_negotiated: 0, - keep_alive: KeepAlive::Yes, config, } } @@ -93,7 +86,6 @@ where /// Opens an outbound substream with `upgrade`. pub fn send_request(&mut self, upgrade: TOutbound) { - self.keep_alive = KeepAlive::Yes; self.dial_queue.push(upgrade); } } @@ -121,9 +113,8 @@ where SubstreamProtocol: Clone, TEvent: Debug + Send + 'static, { - type InEvent = TOutbound; - type OutEvent = TEvent; - type Error = ConnectionHandlerUpgrErr<::Error>; + type FromBehaviour = TOutbound; + type ToBehaviour = Result>; type InboundProtocol = TInbound; type OutboundProtocol = TOutbound; type OutboundOpenInfo = (); @@ -133,31 +124,20 @@ where self.listen_protocol.clone() } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { self.send_request(event); } - fn connection_keep_alive(&self) -> KeepAlive { - self.keep_alive - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { - if let Some(err) = self.pending_error.take() { - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - if !self.events_out.is_empty() { - return Poll::Ready(ConnectionHandlerEvent::Custom(self.events_out.remove(0))); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + self.events_out.remove(0), + )); } else { self.events_out.shrink_to_fit(); } @@ -173,10 +153,6 @@ where } } else { self.dial_queue.shrink_to_fit(); - - if self.dial_negotiated == 0 && self.keep_alive.is_yes() { - self.keep_alive = KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); - } } Poll::Pending @@ -196,28 +172,22 @@ where protocol: out, .. }) => { - // If we're shutting down the connection for inactivity, reset the timeout. - if !self.keep_alive.is_yes() { - self.keep_alive = - KeepAlive::Until(Instant::now() + self.config.keep_alive_timeout); - } - - self.events_out.push(out.into()); + self.events_out.push(Ok(out.into())); } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { protocol: out, .. }) => { self.dial_negotiated -= 1; - self.events_out.push(out.into()); + self.events_out.push(Ok(out.into())); } ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { - if self.pending_error.is_none() { - log::debug!("DialUpgradeError: {error}"); - self.keep_alive = KeepAlive::No; - } + self.events_out.push(Err(error)); } - ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) => {} + ConnectionEvent::AddressChange(_) + | ConnectionEvent::ListenUpgradeError(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} } } } @@ -225,8 +195,6 @@ where /// Configuration parameters for the `OneShotHandler` #[derive(Debug)] pub struct OneShotHandlerConfig { - /// Keep-alive timeout for idle connections. - pub keep_alive_timeout: Duration, /// Timeout for outbound substream upgrades. pub outbound_substream_timeout: Duration, /// Maximum number of concurrent outbound substreams being opened. @@ -236,7 +204,6 @@ pub struct OneShotHandlerConfig { impl Default for OneShotHandlerConfig { fn default() -> Self { OneShotHandlerConfig { - keep_alive_timeout: Duration::from_secs(10), outbound_substream_timeout: Duration::from_secs(10), max_dial_negotiated: 8, } @@ -265,9 +232,6 @@ mod tests { } })); - assert!(matches!( - handler.connection_keep_alive(), - KeepAlive::Until(_) - )); + assert!(matches!(handler.connection_keep_alive(), false)); } } diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index a39e498c3f2..23b9adcfd90 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -21,7 +21,7 @@ use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - FullyNegotiatedOutbound, KeepAlive, SubstreamProtocol, + FullyNegotiatedOutbound, SubstreamProtocol, }; use libp2p_core::upgrade::PendingUpgrade; use std::task::{Context, Poll}; @@ -40,9 +40,8 @@ impl PendingConnectionHandler { } impl ConnectionHandler for PendingConnectionHandler { - type InEvent = Void; - type OutEvent = Void; - type Error = Void; + type FromBehaviour = Void; + type ToBehaviour = Void; type InboundProtocol = PendingUpgrade; type OutboundProtocol = PendingUpgrade; type OutboundOpenInfo = Void; @@ -52,24 +51,15 @@ impl ConnectionHandler for PendingConnectionHandler { SubstreamProtocol::new(PendingUpgrade::new(self.protocol_name.clone()), ()) } - fn on_behaviour_event(&mut self, v: Self::InEvent) { + fn on_behaviour_event(&mut self, v: Self::FromBehaviour) { void::unreachable(v) } - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::No - } - fn poll( &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } @@ -99,7 +89,9 @@ impl ConnectionHandler for PendingConnectionHandler { } ConnectionEvent::AddressChange(_) | ConnectionEvent::DialUpgradeError(_) - | ConnectionEvent::ListenUpgradeError(_) => {} + | ConnectionEvent::ListenUpgradeError(_) + | ConnectionEvent::LocalProtocolsChange(_) + | ConnectionEvent::RemoteProtocolsChange(_) => {} } } } diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index edb9a9154b1..e049252d448 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -18,70 +18,17 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -#[allow(deprecated)] -use crate::handler::IntoConnectionHandler; use crate::handler::{ - AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, - ConnectionHandlerUpgrErr, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, - InboundUpgradeSend, KeepAlive, ListenUpgradeError, OutboundUpgradeSend, SubstreamProtocol, + AddressChange, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, + FullyNegotiatedInbound, FullyNegotiatedOutbound, InboundUpgradeSend, ListenUpgradeError, + OutboundUpgradeSend, StreamUpgradeError, SubstreamProtocol, }; use crate::upgrade::SendWrapper; use either::Either; -use futures::future; -use libp2p_core::{ - upgrade::{NegotiationError, ProtocolError, SelectUpgrade, UpgradeError}, - ConnectedPoint, -}; -use libp2p_identity::PeerId; +use futures::{future, ready}; +use libp2p_core::upgrade::SelectUpgrade; use std::{cmp, task::Context, task::Poll}; -/// Implementation of `IntoConnectionHandler` that combines two protocols into one. -#[derive(Debug, Clone)] -pub struct IntoConnectionHandlerSelect { - /// The first protocol. - proto1: TProto1, - /// The second protocol. - proto2: TProto2, -} - -impl IntoConnectionHandlerSelect { - /// Builds a `IntoConnectionHandlerSelect`. - pub(crate) fn new(proto1: TProto1, proto2: TProto2) -> Self { - IntoConnectionHandlerSelect { proto1, proto2 } - } - - pub fn into_inner(self) -> (TProto1, TProto2) { - (self.proto1, self.proto2) - } -} - -#[allow(deprecated)] -impl IntoConnectionHandler for IntoConnectionHandlerSelect -where - TProto1: IntoConnectionHandler, - TProto2: IntoConnectionHandler, -{ - type Handler = ConnectionHandlerSelect; - - fn into_handler( - self, - remote_peer_id: &PeerId, - connected_point: &ConnectedPoint, - ) -> Self::Handler { - ConnectionHandlerSelect { - proto1: self.proto1.into_handler(remote_peer_id, connected_point), - proto2: self.proto2.into_handler(remote_peer_id, connected_point), - } - } - - fn inbound_protocol(&self) -> ::InboundProtocol { - SelectUpgrade::new( - SendWrapper(self.proto1.inbound_protocol()), - SendWrapper(self.proto2.inbound_protocol()), - ) - } -} - /// Implementation of [`ConnectionHandler`] that combines two protocols into one. #[derive(Debug, Clone)] pub struct ConnectionHandlerSelect { @@ -163,61 +110,32 @@ where match self { DialUpgradeError { info: Either::Left(info), - error: ConnectionHandlerUpgrErr::Timer, + error: StreamUpgradeError::Apply(Either::Left(err)), } => Either::Left(DialUpgradeError { info, - error: ConnectionHandlerUpgrErr::Timer, - }), - DialUpgradeError { - info: Either::Left(info), - error: ConnectionHandlerUpgrErr::Timeout, - } => Either::Left(DialUpgradeError { - info, - error: ConnectionHandlerUpgrErr::Timeout, - }), - DialUpgradeError { - info: Either::Left(info), - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - } => Either::Left(DialUpgradeError { - info, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - }), - DialUpgradeError { - info: Either::Left(info), - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Left(err))), - } => Either::Left(DialUpgradeError { - info, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), + error: StreamUpgradeError::Apply(err), }), DialUpgradeError { info: Either::Right(info), - error: ConnectionHandlerUpgrErr::Timer, + error: StreamUpgradeError::Apply(Either::Right(err)), } => Either::Right(DialUpgradeError { info, - error: ConnectionHandlerUpgrErr::Timer, + error: StreamUpgradeError::Apply(err), }), DialUpgradeError { - info: Either::Right(info), - error: ConnectionHandlerUpgrErr::Timeout, - } => Either::Right(DialUpgradeError { - info, - error: ConnectionHandlerUpgrErr::Timeout, - }), - DialUpgradeError { - info: Either::Right(info), - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), - } => Either::Right(DialUpgradeError { + info: Either::Left(info), + error: e, + } => Either::Left(DialUpgradeError { info, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), + error: e.map_upgrade_err(|_| panic!("already handled above")), }), DialUpgradeError { info: Either::Right(info), - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Right(err))), + error: e, } => Either::Right(DialUpgradeError { info, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), + error: e.map_upgrade_err(|_| panic!("already handled above")), }), - _ => panic!("Wrong API usage; the upgrade error doesn't match the outbound open info"), } } } @@ -238,96 +156,18 @@ where >, ) { match error { - ConnectionHandlerUpgrErr::Timer => { - self.proto1 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i1, - error: ConnectionHandlerUpgrErr::Timer, - })); - - self.proto2 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i2, - error: ConnectionHandlerUpgrErr::Timer, - })); - } - ConnectionHandlerUpgrErr::Timeout => { - self.proto1 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i1, - error: ConnectionHandlerUpgrErr::Timeout, - })); - - self.proto2 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i2, - error: ConnectionHandlerUpgrErr::Timeout, - })); - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => { + Either::Left(error) => { self.proto1 .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: i1, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), - })); - - self.proto2 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i2, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::Failed, - )), + error, })); } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select( - NegotiationError::ProtocolError(e), - )) => { - let (e1, e2); - match e { - ProtocolError::IoError(e) => { - e1 = NegotiationError::ProtocolError(ProtocolError::IoError( - e.kind().into(), - )); - e2 = NegotiationError::ProtocolError(ProtocolError::IoError(e)) - } - ProtocolError::InvalidMessage => { - e1 = NegotiationError::ProtocolError(ProtocolError::InvalidMessage); - e2 = NegotiationError::ProtocolError(ProtocolError::InvalidMessage) - } - ProtocolError::InvalidProtocol => { - e1 = NegotiationError::ProtocolError(ProtocolError::InvalidProtocol); - e2 = NegotiationError::ProtocolError(ProtocolError::InvalidProtocol) - } - ProtocolError::TooManyProtocols => { - e1 = NegotiationError::ProtocolError(ProtocolError::TooManyProtocols); - e2 = NegotiationError::ProtocolError(ProtocolError::TooManyProtocols) - } - } - self.proto1 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i1, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(e1)), - })); + Either::Right(error) => { self.proto2 .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: i2, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(e2)), - })); - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Left(e))) => { - self.proto1 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i1, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)), - })); - } - ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(Either::Right(e))) => { - self.proto2 - .on_connection_event(ConnectionEvent::ListenUpgradeError(ListenUpgradeError { - info: i2, - error: ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)), + error, })); } } @@ -339,9 +179,8 @@ where TProto1: ConnectionHandler, TProto2: ConnectionHandler, { - type InEvent = Either; - type OutEvent = Either; - type Error = Either; + type FromBehaviour = Either; + type ToBehaviour = Either; type InboundProtocol = SelectUpgrade< SendWrapper<::InboundProtocol>, SendWrapper<::InboundProtocol>, @@ -361,14 +200,14 @@ where SubstreamProtocol::new(choice, (i1, i2)).with_timeout(timeout) } - fn on_behaviour_event(&mut self, event: Self::InEvent) { + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { Either::Left(event) => self.proto1.on_behaviour_event(event), Either::Right(event) => self.proto2.on_behaviour_event(event), } } - fn connection_keep_alive(&self) -> KeepAlive { + fn connection_keep_alive(&self) -> bool { cmp::max( self.proto1.connection_keep_alive(), self.proto2.connection_keep_alive(), @@ -379,19 +218,11 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, + ConnectionHandlerEvent, > { match self.proto1.poll(cx) { - Poll::Ready(ConnectionHandlerEvent::Custom(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(Either::Left(event))); - } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Left(event))); + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Either::Left(event))); } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { @@ -400,15 +231,17 @@ where .map_info(Either::Left), }); } + Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols(support)) => { + return Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols(support)); + } Poll::Pending => (), }; match self.proto2.poll(cx) { - Poll::Ready(ConnectionHandlerEvent::Custom(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Custom(Either::Right(event))); - } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Right(event))); + Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Either::Right( + event, + ))); } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { @@ -417,12 +250,27 @@ where .map_info(Either::Right), }); } + Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols(support)) => { + return Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols(support)); + } Poll::Pending => (), }; Poll::Pending } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(e) = ready!(self.proto1.poll_close(cx)) { + return Poll::Ready(Some(Either::Left(e))); + } + + if let Some(e) = ready!(self.proto2.poll_close(cx)) { + return Poll::Ready(Some(Either::Right(e))); + } + + Poll::Ready(None) + } + fn on_connection_event( &mut self, event: ConnectionEvent< @@ -477,6 +325,26 @@ where ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } + ConnectionEvent::LocalProtocolsChange(supported_protocols) => { + self.proto1 + .on_connection_event(ConnectionEvent::LocalProtocolsChange( + supported_protocols.clone(), + )); + self.proto2 + .on_connection_event(ConnectionEvent::LocalProtocolsChange( + supported_protocols, + )); + } + ConnectionEvent::RemoteProtocolsChange(supported_protocols) => { + self.proto1 + .on_connection_event(ConnectionEvent::RemoteProtocolsChange( + supported_protocols.clone(), + )); + self.proto2 + .on_connection_event(ConnectionEvent::RemoteProtocolsChange( + supported_protocols, + )); + } } } } diff --git a/swarm/src/keep_alive.rs b/swarm/src/keep_alive.rs deleted file mode 100644 index c22a926afe4..00000000000 --- a/swarm/src/keep_alive.rs +++ /dev/null @@ -1,142 +0,0 @@ -use crate::behaviour::{FromSwarm, NetworkBehaviour, PollParameters, ToSwarm}; -use crate::connection::ConnectionId; -use crate::handler::{ - ConnectionEvent, ConnectionHandlerEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound, - KeepAlive, SubstreamProtocol, -}; -use crate::{ConnectionDenied, THandler, THandlerInEvent, THandlerOutEvent}; -use libp2p_core::upgrade::DeniedUpgrade; -use libp2p_core::{Endpoint, Multiaddr}; -use libp2p_identity::PeerId; -use std::task::{Context, Poll}; -use void::Void; - -/// Implementation of [`NetworkBehaviour`] that doesn't do anything other than keep all connections alive. -/// -/// This is primarily useful for test code. In can however occasionally be useful for production code too. -/// The caveat is that open connections consume system resources and should thus be shutdown when -/// they are not in use. Connections can also fail at any time so really, your application should be -/// designed to establish them when necessary, making the use of this behaviour likely redundant. -#[derive(Default)] -pub struct Behaviour; - -impl NetworkBehaviour for Behaviour { - type ConnectionHandler = ConnectionHandler; - type OutEvent = Void; - - fn handle_established_inbound_connection( - &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: &Multiaddr, - ) -> Result, ConnectionDenied> { - Ok(ConnectionHandler) - } - - fn handle_established_outbound_connection( - &mut self, - _: ConnectionId, - _: PeerId, - _: &Multiaddr, - _: Endpoint, - ) -> Result, ConnectionDenied> { - Ok(ConnectionHandler) - } - - fn on_connection_handler_event( - &mut self, - _: PeerId, - _: ConnectionId, - event: THandlerOutEvent, - ) { - void::unreachable(event) - } - - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll>> { - Poll::Pending - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } -} - -/// Implementation of [`ConnectionHandler`] that doesn't handle anything but keeps the connection alive. -#[derive(Clone, Debug)] -pub struct ConnectionHandler; - -impl crate::handler::ConnectionHandler for ConnectionHandler { - type InEvent = Void; - type OutEvent = Void; - type Error = Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = DeniedUpgrade; - type InboundOpenInfo = (); - type OutboundOpenInfo = Void; - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade, ()) - } - - fn on_behaviour_event(&mut self, v: Self::InEvent) { - void::unreachable(v) - } - - fn connection_keep_alive(&self) -> KeepAlive { - KeepAlive::Yes - } - - fn poll( - &mut self, - _: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::OutEvent, - Self::Error, - >, - > { - Poll::Pending - } - - fn on_connection_event( - &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, - ) { - match event { - ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol, .. - }) => void::unreachable(protocol), - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol, .. - }) => void::unreachable(protocol), - ConnectionEvent::DialUpgradeError(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::AddressChange(_) => {} - } - } -} diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 9c87389334e..0354f39cfdc 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -57,7 +57,7 @@ mod connection; mod executor; -mod registry; +mod stream; mod stream_protocol; #[cfg(test)] mod test; @@ -67,7 +67,7 @@ pub mod behaviour; pub mod dial_opts; pub mod dummy; pub mod handler; -pub mod keep_alive; +mod listen_opts; /// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro. #[doc(hidden)] @@ -76,13 +76,14 @@ pub mod derive_prelude { pub use crate::behaviour::ConnectionClosed; pub use crate::behaviour::ConnectionEstablished; pub use crate::behaviour::DialFailure; - pub use crate::behaviour::ExpiredExternalAddr; pub use crate::behaviour::ExpiredListenAddr; + pub use crate::behaviour::ExternalAddrConfirmed; + pub use crate::behaviour::ExternalAddrExpired; pub use crate::behaviour::FromSwarm; pub use crate::behaviour::ListenFailure; pub use crate::behaviour::ListenerClosed; pub use crate::behaviour::ListenerError; - pub use crate::behaviour::NewExternalAddr; + pub use crate::behaviour::NewExternalAddrCandidate; pub use crate::behaviour::NewListenAddr; pub use crate::behaviour::NewListener; pub use crate::connection::ConnectionId; @@ -91,9 +92,6 @@ pub mod derive_prelude { pub use crate::ConnectionHandlerSelect; pub use crate::DialError; pub use crate::NetworkBehaviour; - #[allow(deprecated)] - pub use crate::NetworkBehaviourAction; - pub use crate::PollParameters; pub use crate::THandler; pub use crate::THandlerInEvent; pub use crate::THandlerOutEvent; @@ -107,32 +105,26 @@ pub mod derive_prelude { pub use libp2p_identity::PeerId; } -#[allow(deprecated)] -pub use crate::connection::ConnectionLimit; -#[allow(deprecated)] -pub use behaviour::NetworkBehaviourAction; pub use behaviour::{ - AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredExternalAddr, - ExpiredListenAddr, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, - ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddr, NewListenAddr, NotifyHandler, - PollParameters, ToSwarm, + AddressChange, CloseConnection, ConnectionClosed, DialFailure, ExpiredListenAddr, + ExternalAddrExpired, ExternalAddresses, FromSwarm, ListenAddresses, ListenFailure, + ListenerClosed, ListenerError, NetworkBehaviour, NewExternalAddrCandidate, NewListenAddr, + NotifyHandler, ToSwarm, }; -#[allow(deprecated)] -pub use connection::pool::{ConnectionCounters, ConnectionLimits}; -pub use connection::{ConnectionError, ConnectionId}; +pub use connection::pool::ConnectionCounters; +pub use connection::{ConnectionError, ConnectionId, SupportedProtocols}; pub use executor::Executor; -#[allow(deprecated)] -pub use handler::IntoConnectionHandler; pub use handler::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, ConnectionHandlerUpgrErr, - IntoConnectionHandlerSelect, KeepAlive, OneShotHandler, OneShotHandlerConfig, - SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerSelect, OneShotHandler, + OneShotHandlerConfig, StreamUpgradeError, SubstreamProtocol, }; #[cfg(feature = "macros")] pub use libp2p_swarm_derive::NetworkBehaviour; -pub use registry::{AddAddressResult, AddressRecord, AddressScore}; +pub use listen_opts::ListenOpts; +pub use stream::Stream; pub use stream_protocol::{InvalidProtocol, StreamProtocol}; +use crate::behaviour::ExternalAddrConfirmed; use crate::handler::UpgradeInfoSend; use connection::pool::{EstablishedConnection, Pool, PoolConfig, PoolEvent}; use connection::IncomingInfo; @@ -140,62 +132,52 @@ use connection::{ PendingConnectionError, PendingInboundConnectionError, PendingOutboundConnectionError, }; use dial_opts::{DialOpts, PeerCondition}; -use futures::{executor::ThreadPoolBuilder, prelude::*, stream::FusedStream}; -use libp2p_core::muxing::SubstreamBox; +use futures::{prelude::*, stream::FusedStream}; use libp2p_core::{ connection::ConnectedPoint, - multiaddr, - multihash::Multihash, muxing::StreamMuxerBox, transport::{self, ListenerId, TransportError, TransportEvent}, - Endpoint, Multiaddr, Negotiated, Transport, + Endpoint, Multiaddr, Transport, }; use libp2p_identity::PeerId; -use registry::{AddressIntoIter, Addresses}; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; +use std::time::Duration; use std::{ convert::TryFrom, error, fmt, io, pin::Pin, task::{Context, Poll}, }; - -/// Substream for which a protocol has been chosen. -/// -/// Implements the [`AsyncRead`](futures::io::AsyncRead) and -/// [`AsyncWrite`](futures::io::AsyncWrite) traits. -pub type NegotiatedSubstream = Negotiated; +use tracing::Instrument; /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. -type TBehaviourOutEvent = ::OutEvent; +type TBehaviourOutEvent = ::ToSwarm; /// [`ConnectionHandler`] of the [`NetworkBehaviour`] for all the protocols the [`NetworkBehaviour`] /// supports. -#[allow(deprecated)] -pub type THandler = - <::ConnectionHandler as IntoConnectionHandler>::Handler; +pub type THandler = ::ConnectionHandler; /// Custom event that can be received by the [`ConnectionHandler`] of the /// [`NetworkBehaviour`]. -pub type THandlerInEvent = as ConnectionHandler>::InEvent; +pub type THandlerInEvent = as ConnectionHandler>::FromBehaviour; /// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. -pub type THandlerOutEvent = as ConnectionHandler>::OutEvent; - -/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. -pub type THandlerErr = as ConnectionHandler>::Error; +pub type THandlerOutEvent = as ConnectionHandler>::ToBehaviour; /// Event generated by the `Swarm`. #[derive(Debug)] -pub enum SwarmEvent { +#[non_exhaustive] +pub enum SwarmEvent { /// Event generated by the `NetworkBehaviour`. Behaviour(TBehaviourOutEvent), /// A connection to the given peer has been opened. ConnectionEstablished { /// Identity of the peer that we have connected to. peer_id: PeerId, + /// Identifier of the connection. + connection_id: ConnectionId, /// Endpoint of the connection that has been opened. endpoint: ConnectedPoint, /// Number of established connections to this peer, including the one that has just been @@ -213,21 +195,24 @@ pub enum SwarmEvent { ConnectionClosed { /// Identity of the peer that we have connected to. peer_id: PeerId, + /// Identifier of the connection. + connection_id: ConnectionId, /// Endpoint of the connection that has been closed. endpoint: ConnectedPoint, /// Number of other remaining connections to this same peer. num_established: u32, /// Reason for the disconnection, if it was not a successful /// active close. - cause: Option>, + cause: Option, }, /// A new connection arrived on a listener and is in the process of protocol negotiation. /// - /// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished), - /// [`BannedPeer`](SwarmEvent::BannedPeer), or + /// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) or /// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event will later be /// generated for this connection. IncomingConnection { + /// Identifier of the connection. + connection_id: ConnectionId, /// Local connection address. /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) /// event. @@ -240,6 +225,8 @@ pub enum SwarmEvent { /// This can include, for example, an error during the handshake of the encryption layer, or /// the connection unexpectedly closed. IncomingConnectionError { + /// Identifier of the connection. + connection_id: ConnectionId, /// Local connection address. /// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr) /// event. @@ -251,19 +238,13 @@ pub enum SwarmEvent { }, /// An error happened on an outbound connection. OutgoingConnectionError { + /// Identifier of the connection. + connection_id: ConnectionId, /// If known, [`PeerId`] of the peer we tried to reach. peer_id: Option, /// Error that has been encountered. error: DialError, }, - /// We connected to a peer, but we immediately closed the connection because that peer is banned. - #[deprecated(note = "Use `libp2p::allow_block_list` instead.", since = "0.42.1")] - BannedPeer { - /// Identity of the banned peer. - peer_id: PeerId, - /// Endpoint of the connection that has been closed. - endpoint: ConnectedPoint, - }, /// One of our listeners has reported a new local listening address. NewListenAddr { /// The listener that is listening on the new address. @@ -304,10 +285,22 @@ pub enum SwarmEvent { /// reported if the dialing attempt succeeds, otherwise a /// [`OutgoingConnectionError`](SwarmEvent::OutgoingConnectionError) event /// is reported. - Dialing(PeerId), + Dialing { + /// Identity of the peer that we are connecting to. + peer_id: Option, + + /// Identifier of the connection. + connection_id: ConnectionId, + }, + /// We have discovered a new candidate for an external address for us. + NewExternalAddrCandidate { address: Multiaddr }, + /// An external address of the local node was confirmed. + ExternalAddrConfirmed { address: Multiaddr }, + /// An external address of the local node expired, i.e. is no-longer confirmed. + ExternalAddrExpired { address: Multiaddr }, } -impl SwarmEvent { +impl SwarmEvent { /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { @@ -342,137 +335,46 @@ where /// List of protocols that the behaviour says it supports. supported_protocols: SmallVec<[Vec; 16]>, + confirmed_external_addr: HashSet, + /// Multiaddresses that our listeners are listening on, listened_addrs: HashMap>, - /// List of multiaddresses we're listening on, after account for external IP addresses and - /// similar mechanisms. - external_addrs: Addresses, - - /// List of nodes for which we deny any incoming connection. - banned_peers: HashSet, - /// Pending event to be delivered to connection handlers /// (or dropped if the peer disconnected) before the `behaviour` /// can be polled again. - pending_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + pending_handler_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + + pending_swarm_events: VecDeque>, } impl Unpin for Swarm where TBehaviour: NetworkBehaviour {} -#[allow(deprecated)] impl Swarm where TBehaviour: NetworkBehaviour, { - /// Builds a new `Swarm` with a provided executor. - #[deprecated(note = "Use `SwarmBuilder::with_executor` instead.")] - pub fn with_executor( + /// Creates a new [`Swarm`] from the given [`Transport`], [`NetworkBehaviour`], [`PeerId`] and + /// [`Config`]. + pub fn new( transport: transport::Boxed<(PeerId, StreamMuxerBox)>, behaviour: TBehaviour, local_peer_id: PeerId, - executor: impl Executor + Send + 'static, + config: Config, ) -> Self { - SwarmBuilder::with_executor(transport, behaviour, local_peer_id, executor).build() - } + tracing::info!(%local_peer_id); - /// Builds a new `Swarm` with a tokio executor. - #[cfg(all( - feature = "tokio", - not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) - ))] - #[deprecated(note = "Use `SwarmBuilder::with_tokio_executor` instead.")] - pub fn with_tokio_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, + Swarm { local_peer_id, - crate::executor::TokioExecutor, - ) - } - - /// Builds a new `Swarm` with an async-std executor. - #[cfg(all( - feature = "async-std", - not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) - ))] - #[deprecated(note = "Use `SwarmBuilder::with_async_std_executor` instead.")] - pub fn with_async_std_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( transport, + pool: Pool::new(local_peer_id, config.pool_config), behaviour, - local_peer_id, - crate::executor::AsyncStdExecutor, - ) - } - - /// Builds a new `Swarm` with a threadpool executor. - #[deprecated( - note = "The `futures::executor::ThreadPool` executor is deprecated. See https://github.com/libp2p/rust-libp2p/issues/3107." - )] - pub fn with_threadpool_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - let builder = match ThreadPoolBuilder::new() - .name_prefix("libp2p-swarm-task-") - .create() - { - Ok(tp) => SwarmBuilder::with_executor(transport, behaviour, local_peer_id, tp), - Err(err) => { - log::warn!("Failed to create executor thread pool: {:?}", err); - SwarmBuilder::without_executor(transport, behaviour, local_peer_id) - } - }; - builder.build() - } - - /// Builds a new `Swarm` with a wasm executor. - /// Background tasks will be executed by the browser on the next micro-tick. - /// - /// Spawning a task is similar too: - /// ```typescript - /// function spawn(task: () => Promise) { - /// task() - /// } - /// ``` - #[cfg(feature = "wasm-bindgen")] - #[deprecated(note = "Use `SwarmBuilder::with_wasm_executor` instead.")] - pub fn with_wasm_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::WasmBindgenExecutor, - ) - } - - /// Builds a new `Swarm` without an executor, instead using the current task. - /// - /// ## ⚠️ Performance warning - /// All connections will be polled on the current task, thus quite bad performance - /// characteristics should be expected. Whenever possible use an executor and - /// [`Swarm::with_executor`]. - #[deprecated(note = "Use `SwarmBuilder::without_executor` instead.")] - pub fn without_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - SwarmBuilder::without_executor(transport, behaviour, local_peer_id).build() + supported_protocols: Default::default(), + confirmed_external_addr: Default::default(), + listened_addrs: HashMap::new(), + pending_handler_event: None, + pending_swarm_events: VecDeque::default(), + } } /// Returns information about the connections underlying the [`Swarm`]. @@ -491,11 +393,9 @@ where /// Listeners report their new listening addresses as [`SwarmEvent::NewListenAddr`]. /// Depending on the underlying transport, one listener may have multiple listening addresses. pub fn listen_on(&mut self, addr: Multiaddr) -> Result> { - let id = self.transport.listen_on(addr)?; - self.behaviour - .on_swarm_event(FromSwarm::NewListener(behaviour::NewListener { - listener_id: id, - })); + let opts = ListenOpts::new(addr); + let id = opts.listener_id(); + self.add_listener(opts)?; Ok(id) } @@ -512,39 +412,43 @@ where /// See also [`DialOpts`]. /// /// ``` - /// # use libp2p_swarm::SwarmBuilder; + /// # use libp2p_swarm::Swarm; /// # use libp2p_swarm::dial_opts::{DialOpts, PeerCondition}; - /// # use libp2p_core::{Multiaddr, PeerId, Transport}; + /// # use libp2p_core::{Multiaddr, Transport}; /// # use libp2p_core::transport::dummy::DummyTransport; /// # use libp2p_swarm::dummy; + /// # use libp2p_identity::PeerId; /// # - /// let mut swarm = SwarmBuilder::without_executor( - /// DummyTransport::new().boxed(), - /// dummy::Behaviour, - /// PeerId::random(), - /// ).build(); + /// # #[tokio::main] + /// # async fn main() { + /// let mut swarm = build_swarm(); /// /// // Dial a known peer. /// swarm.dial(PeerId::random()); /// /// // Dial an unknown peer. /// swarm.dial("/ip6/::1/tcp/12345".parse::().unwrap()); + /// # } + /// + /// # fn build_swarm() -> Swarm { + /// # Swarm::new(DummyTransport::new().boxed(), dummy::Behaviour, PeerId::random(), libp2p_swarm::Config::with_tokio_executor()) + /// # } /// ``` pub fn dial(&mut self, opts: impl Into) -> Result<(), DialError> { let dial_opts = opts.into(); - let peer_id = dial_opts - .get_or_parse_peer_id() - .map_err(DialError::InvalidPeerId)?; + let peer_id = dial_opts.get_peer_id(); let condition = dial_opts.peer_condition(); let connection_id = dial_opts.connection_id(); let should_dial = match (condition, peer_id) { + (_, None) => true, (PeerCondition::Always, _) => true, - (PeerCondition::Disconnected, None) => true, - (PeerCondition::NotDialing, None) => true, (PeerCondition::Disconnected, Some(peer_id)) => !self.pool.is_connected(peer_id), (PeerCondition::NotDialing, Some(peer_id)) => !self.pool.is_dialing(peer_id), + (PeerCondition::DisconnectedAndNotDialing, Some(peer_id)) => { + !self.pool.is_dialing(peer_id) && !self.pool.is_connected(peer_id) + } }; if !should_dial { @@ -560,22 +464,6 @@ where return Err(e); } - if let Some(peer_id) = peer_id { - // Check if peer is banned. - if self.banned_peers.contains(&peer_id) { - #[allow(deprecated)] - let error = DialError::Banned; - self.behaviour - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id: Some(peer_id), - error: &error, - connection_id, - })); - - return Err(error); - } - } - let addresses = { let mut addresses_from_opts = dial_opts.get_addresses(); @@ -592,7 +480,11 @@ where let num_addresses = addresses.len(); if num_addresses > 0 { - log::debug!("discarding {num_addresses} addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection {connection_id:?}") + tracing::debug!( + connection=%connection_id, + discarded_addresses_count=%num_addresses, + "discarding addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection" + ) } } } @@ -632,15 +524,24 @@ where let dials = addresses .into_iter() - .map(|a| match p2p_addr(peer_id, a) { + .map(|a| match peer_id.map_or(Ok(a.clone()), |p| a.with_p2p(p)) { Ok(address) => { - let dial = match dial_opts.role_override() { - Endpoint::Dialer => self.transport.dial(address.clone()), - Endpoint::Listener => self.transport.dial_as_listener(address.clone()), + let (dial, span) = match dial_opts.role_override() { + Endpoint::Dialer => ( + self.transport.dial(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial", %address), + ), + Endpoint::Listener => ( + self.transport.dial_as_listener(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial_as_listener", %address), + ), }; + span.follows_from(tracing::Span::current()); + match dial { Ok(fut) => fut .map(|r| (address, r.map_err(TransportError::Other))) + .instrument(span) .boxed(), Err(err) => futures::future::ready((address, Err(err))).boxed(), } @@ -653,27 +554,15 @@ where }) .collect(); - match self.pool.add_outgoing( + self.pool.add_outgoing( dials, peer_id, dial_opts.role_override(), dial_opts.dial_concurrency_override(), connection_id, - ) { - Ok(()) => Ok(()), - Err(connection_limit) => { - #[allow(deprecated)] - let error = DialError::ConnectionLimit(connection_limit); - self.behaviour - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id, - error: &error, - connection_id, - })); + ); - Err(error) - } - } + Ok(()) } /// Returns an iterator that produces the list of addresses we're listening on. @@ -686,93 +575,60 @@ where &self.local_peer_id } - /// Returns an iterator for [`AddressRecord`]s of external addresses - /// of the local node, in decreasing order of their current - /// [score](AddressScore). - pub fn external_addresses(&self) -> impl Iterator { - self.external_addrs.iter() + /// List all **confirmed** external address for the local node. + pub fn external_addresses(&self) -> impl Iterator { + self.confirmed_external_addr.iter() } - /// Adds an external address record for the local node. - /// - /// An external address is an address of the local node known to - /// be (likely) reachable for other nodes, possibly taking into - /// account NAT. The external addresses of the local node may be - /// shared with other nodes by the `NetworkBehaviour`. - /// - /// The associated score determines both the position of the address - /// in the list of external addresses (which can determine the - /// order in which addresses are used to connect to) as well as - /// how long the address is retained in the list, depending on - /// how frequently it is reported by the `NetworkBehaviour` via - /// [`ToSwarm::ReportObservedAddr`] or explicitly - /// through this method. - pub fn add_external_address(&mut self, a: Multiaddr, s: AddressScore) -> AddAddressResult { - let result = self.external_addrs.add(a.clone(), s); - let expired = match &result { - AddAddressResult::Inserted { expired } => { - self.behaviour - .on_swarm_event(FromSwarm::NewExternalAddr(NewExternalAddr { addr: &a })); - expired - } - AddAddressResult::Updated { expired } => expired, - }; - for a in expired { + fn add_listener(&mut self, opts: ListenOpts) -> Result<(), TransportError> { + let addr = opts.address(); + let listener_id = opts.listener_id(); + + if let Err(e) = self.transport.listen_on(listener_id, addr.clone()) { self.behaviour - .on_swarm_event(FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { - addr: &a.addr, + .on_swarm_event(FromSwarm::ListenerError(behaviour::ListenerError { + listener_id, + err: &e, })); - } - result - } - /// Removes an external address of the local node, regardless of - /// its current score. See [`Swarm::add_external_address`] - /// for details. - /// - /// Returns `true` if the address existed and was removed, `false` - /// otherwise. - pub fn remove_external_address(&mut self, addr: &Multiaddr) -> bool { - if self.external_addrs.remove(addr) { - self.behaviour - .on_swarm_event(FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr })); - true - } else { - false + return Err(e); } + + self.behaviour + .on_swarm_event(FromSwarm::NewListener(behaviour::NewListener { + listener_id, + })); + + Ok(()) } - /// Bans a peer by its peer ID. + /// Add a **confirmed** external address for the local node. /// - /// Any incoming connection and any dialing attempt will immediately be rejected. - /// This function has no effect if the peer is already banned. - #[deprecated(note = "Use `libp2p::allow_block_list` instead.", since = "0.42.1")] - pub fn ban_peer_id(&mut self, peer_id: PeerId) { - if self.banned_peers.insert(peer_id) { - // Note that established connections to the now banned peer are closed but not - // added to [`Swarm::banned_peer_connections`]. They have been previously reported - // as open to the behaviour and need be reported as closed once closing the - // connection finishes. - self.pool.disconnect(peer_id); - } + /// This function should only be called with addresses that are guaranteed to be reachable. + /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`]. + pub fn add_external_address(&mut self, a: Multiaddr) { + self.behaviour + .on_swarm_event(FromSwarm::ExternalAddrConfirmed(ExternalAddrConfirmed { + addr: &a, + })); + self.confirmed_external_addr.insert(a); } - /// Unbans a peer. - #[deprecated(note = "Use `libp2p::allow_block_list` instead.", since = "0.42.1")] - pub fn unban_peer_id(&mut self, peer_id: PeerId) { - self.banned_peers.remove(&peer_id); + /// Remove an external address for the local node. + /// + /// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. + pub fn remove_external_address(&mut self, addr: &Multiaddr) { + self.behaviour + .on_swarm_event(FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr })); + self.confirmed_external_addr.remove(addr); } /// Disconnects a peer by its peer ID, closing all connections to said peer. /// /// Returns `Ok(())` if there was one or more established connections to the peer. /// - /// Note: Closing a connection via [`Swarm::disconnect_peer_id`] does - /// not inform the corresponding [`ConnectionHandler`]. - /// Closing a connection via a [`ConnectionHandler`] can be done either in a - /// collaborative manner across [`ConnectionHandler`]s - /// with [`ConnectionHandler::connection_keep_alive`] or directly with - /// [`ConnectionHandlerEvent::Close`]. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. + /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -785,6 +641,24 @@ where } } + /// Attempt to gracefully close a connection. + /// + /// Closing a connection is asynchronous but this function will return immediately. + /// A [`SwarmEvent::ConnectionClosed`] event will be emitted once the connection is actually closed. + /// + /// # Returns + /// + /// - `true` if the connection was established and is now being closed. + /// - `false` if the connection was not found or is no longer established. + pub fn close_connection(&mut self, connection_id: ConnectionId) -> bool { + if let Some(established) = self.pool.get_established(connection_id) { + established.start_close(); + return true; + } + + false + } + /// Checks whether there is an established connection to a peer. pub fn is_connected(&self, peer_id: &PeerId) -> bool { self.pool.is_connected(*peer_id) @@ -805,10 +679,7 @@ where &mut self.behaviour } - fn handle_pool_event( - &mut self, - event: PoolEvent>, - ) -> Option>> { + fn handle_pool_event(&mut self, event: PoolEvent>) { match event { PoolEvent::ConnectionEstablished { peer_id, @@ -818,11 +689,6 @@ where concurrent_dial_errors, established_in, } => { - if self.banned_peers.contains(&peer_id) { - #[allow(deprecated)] - return Some(SwarmEvent::BannedPeer { peer_id, endpoint }); - } - let handler = match endpoint.clone() { ConnectedPoint::Dialer { address, @@ -845,10 +711,14 @@ where }, )); - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: Some(peer_id), - error: dial_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::OutgoingConnectionError { + peer_id: Some(peer_id), + connection_id: id, + error: dial_error, + }, + ); + return; } } } @@ -874,11 +744,15 @@ where }, )); - return Some(SwarmEvent::IncomingConnectionError { - send_back_addr, - local_addr, - error: listen_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::IncomingConnectionError { + connection_id: id, + send_back_addr, + local_addr, + error: listen_error, + }, + ); + return; } } } @@ -902,11 +776,11 @@ where self.pool .spawn_connection(id, peer_id, &endpoint, connection, handler); - log::debug!( - "Connection established: {:?} {:?}; Total (peer): {}.", - peer_id, - endpoint, - num_established, + tracing::debug!( + peer=%peer_id, + ?endpoint, + total_peers=%num_established, + "Connection established" ); let failed_addresses = concurrent_dial_errors .as_ref() @@ -928,13 +802,15 @@ where }, )); self.supported_protocols = supported_protocols; - return Some(SwarmEvent::ConnectionEstablished { - peer_id, - num_established, - endpoint, - concurrent_dial_errors, - established_in, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id: id, + num_established, + endpoint, + concurrent_dial_errors, + established_in, + }); } PoolEvent::PendingOutboundConnectionError { id: connection_id, @@ -951,15 +827,17 @@ where })); if let Some(peer) = peer { - log::debug!("Connection attempt to {:?} failed with {:?}.", peer, error,); + tracing::debug!(%peer, "Connection attempt to peer failed with {:?}.", error,); } else { - log::debug!("Connection attempt to unknown peer failed with {:?}", error); + tracing::debug!("Connection attempt to unknown peer failed with {:?}", error); } - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: peer, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::OutgoingConnectionError { + peer_id: peer, + connection_id, + error, + }); } PoolEvent::PendingInboundConnectionError { id, @@ -969,7 +847,7 @@ where } => { let error = error.into(); - log::debug!("Incoming connection failed: {:?}", error); + tracing::debug!("Incoming connection failed: {:?}", error); self.behaviour .on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr: &local_addr, @@ -977,32 +855,33 @@ where error: &error, connection_id: id, })); - return Some(SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id: id, + local_addr, + send_back_addr, + error, + }); } PoolEvent::ConnectionClosed { id, connected, error, remaining_established_connection_ids, - handler, .. } => { if let Some(error) = error.as_ref() { - log::debug!( - "Connection closed with error {:?}: {:?}; Total (peer): {}.", + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed with error {:?}: {:?}", error, connected, - remaining_established_connection_ids.len() ); } else { - log::debug!( - "Connection closed: {:?}; Total (peer): {}.", - connected, - remaining_established_connection_ids.len() + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed: {:?}", + connected ); } let peer_id = connected.peer_id; @@ -1015,15 +894,16 @@ where peer_id, connection_id: id, endpoint: &endpoint, - handler, remaining_established: num_established as usize, })); - return Some(SwarmEvent::ConnectionClosed { - peer_id, - endpoint, - cause: error, - num_established, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionClosed { + peer_id, + connection_id: id, + endpoint, + cause: error, + num_established, + }); } PoolEvent::ConnectionEvent { peer_id, id, event } => { self.behaviour @@ -1044,8 +924,6 @@ where })); } } - - None } fn handle_transport_event( @@ -1054,7 +932,7 @@ where as Transport>::ListenerUpgrade, io::Error, >, - ) -> Option>> { + ) { match event { TransportEvent::Incoming { listener_id: _, @@ -1081,47 +959,42 @@ where connection_id, })); - return Some(SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error: listen_error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error: listen_error, + }); + return; } } - match self.pool.add_incoming( + self.pool.add_incoming( upgrade, IncomingInfo { local_addr: &local_addr, send_back_addr: &send_back_addr, }, connection_id, - ) { - Ok(()) => { - return Some(SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - }); - } - Err(connection_limit) => { - #[allow(deprecated)] - let error = ListenError::ConnectionLimit(connection_limit); - self.behaviour - .on_swarm_event(FromSwarm::ListenFailure(ListenFailure { - local_addr: &local_addr, - send_back_addr: &send_back_addr, - error: &error, - connection_id, - })); - log::debug!("Incoming connection rejected: {:?}", connection_limit); - } - }; + ); + + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + }) } TransportEvent::NewAddress { listener_id, listen_addr, } => { - log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "New listener address" + ); let addrs = self.listened_addrs.entry(listener_id).or_default(); if !addrs.contains(&listen_addr) { addrs.push(listen_addr.clone()) @@ -1131,19 +1004,20 @@ where listener_id, addr: &listen_addr, })); - return Some(SwarmEvent::NewListenAddr { - listener_id, - address: listen_addr, - }); + self.pending_swarm_events + .push_back(SwarmEvent::NewListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::AddressExpired { listener_id, listen_addr, } => { - log::debug!( - "Listener {:?}; Expired address {:?}.", - listener_id, - listen_addr + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "Expired listener address" ); if let Some(addrs) = self.listened_addrs.get_mut(&listener_id) { addrs.retain(|a| a != &listen_addr); @@ -1153,16 +1027,21 @@ where listener_id, addr: &listen_addr, })); - return Some(SwarmEvent::ExpiredListenAddr { - listener_id, - address: listen_addr, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ExpiredListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::ListenerClosed { listener_id, reason, } => { - log::debug!("Listener {:?}; Closed by {:?}.", listener_id, reason); + tracing::debug!( + listener=?listener_id, + ?reason, + "Listener closed" + ); let addrs = self.listened_addrs.remove(&listener_id).unwrap_or_default(); for addr in addrs.iter() { self.behaviour.on_swarm_event(FromSwarm::ExpiredListenAddr( @@ -1174,11 +1053,12 @@ where listener_id, reason: reason.as_ref().copied(), })); - return Some(SwarmEvent::ListenerClosed { - listener_id, - addresses: addrs.to_vec(), - reason, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ListenerClosed { + listener_id, + addresses: addrs.to_vec(), + reason, + }) } TransportEvent::ListenerError { listener_id, error } => { self.behaviour @@ -1186,32 +1066,44 @@ where listener_id, err: &error, })); - return Some(SwarmEvent::ListenerError { listener_id, error }); + self.pending_swarm_events + .push_back(SwarmEvent::ListenerError { listener_id, error }) } } - None } fn handle_behaviour_event( &mut self, - event: ToSwarm>, - ) -> Option>> { + event: ToSwarm>, + ) { match event { - ToSwarm::GenerateEvent(event) => return Some(SwarmEvent::Behaviour(event)), + ToSwarm::GenerateEvent(event) => { + self.pending_swarm_events + .push_back(SwarmEvent::Behaviour(event)); + } ToSwarm::Dial { opts } => { - let peer_id = opts.get_or_parse_peer_id(); + let peer_id = opts.get_peer_id(); + let connection_id = opts.connection_id(); if let Ok(()) = self.dial(opts) { - if let Ok(Some(peer_id)) = peer_id { - return Some(SwarmEvent::Dialing(peer_id)); - } + self.pending_swarm_events.push_back(SwarmEvent::Dialing { + peer_id, + connection_id, + }); } } + ToSwarm::ListenOn { opts } => { + // Error is dispatched internally, safe to ignore. + let _ = self.add_listener(opts); + } + ToSwarm::RemoveListener { id } => { + self.remove_listener(id); + } ToSwarm::NotifyHandler { peer_id, handler, event, } => { - assert!(self.pending_event.is_none()); + assert!(self.pending_handler_event.is_none()); let handler = match handler { NotifyHandler::One(connection) => PendingNotifyHandler::One(connection), NotifyHandler::Any => { @@ -1223,27 +1115,17 @@ where } }; - self.pending_event = Some((peer_id, handler, event)); + self.pending_handler_event = Some((peer_id, handler, event)); } - ToSwarm::ReportObservedAddr { address, score } => { - // Maps the given `observed_addr`, representing an address of the local - // node observed by a remote peer, onto the locally known listen addresses - // to yield one or more addresses of the local node that may be publicly - // reachable. - // - // I.e. self method incorporates the view of other peers into the listen - // addresses seen by the local node to account for possible IP and port - // mappings performed by intermediate network devices in an effort to - // obtain addresses for the local peer that are also reachable for peers - // other than the peer who reported the `observed_addr`. - // - // The translation is transport-specific. See [`Transport::address_translation`]. + ToSwarm::NewExternalAddrCandidate(addr) => { + // Apply address translation to the candidate address. + // For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address. let translated_addresses = { let mut addrs: Vec<_> = self .listened_addrs .values() .flatten() - .filter_map(|server| self.transport.address_translation(server, &address)) + .filter_map(|server| self.transport.address_translation(server, &addr)) .collect(); // remove duplicates @@ -1251,10 +1133,36 @@ where addrs.dedup(); addrs }; - for addr in translated_addresses { - self.add_external_address(addr, score); + + // If address translation yielded nothing, broacast the original candidate address. + if translated_addresses.is_empty() { + self.behaviour + .on_swarm_event(FromSwarm::NewExternalAddrCandidate( + NewExternalAddrCandidate { addr: &addr }, + )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); + } else { + for addr in translated_addresses { + self.behaviour + .on_swarm_event(FromSwarm::NewExternalAddrCandidate( + NewExternalAddrCandidate { addr: &addr }, + )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); + } } } + ToSwarm::ExternalAddrConfirmed(addr) => { + self.add_external_address(addr.clone()); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrConfirmed { address: addr }); + } + ToSwarm::ExternalAddrExpired(addr) => { + self.remove_external_address(&addr); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrExpired { address: addr }); + } ToSwarm::CloseConnection { peer_id, connection, @@ -1269,17 +1177,16 @@ where } }, } - - None } /// Internal function used by everything event-related. /// /// Polls the `Swarm` for the next event. + #[tracing::instrument(level = "debug", name = "Swarm::poll", skip(self, cx))] fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll> { // We use a `this` variable because the compiler can't mutably borrow multiple times // across a `Deref`. let this = &mut *self; @@ -1294,7 +1201,11 @@ where // // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. loop { - match this.pending_event.take() { + if let Some(swarm_event) = this.pending_swarm_events.pop_front() { + return Poll::Ready(swarm_event); + } + + match this.pending_handler_event.take() { // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous // iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { @@ -1303,7 +1214,7 @@ where Some(conn) => match notify_one(conn, event, cx) { None => continue, Some(event) => { - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } }, None => continue, @@ -1314,57 +1225,36 @@ where None => continue, Some((event, ids)) => { let handler = PendingNotifyHandler::Any(ids); - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } } } }, // No pending event. Allow the [`NetworkBehaviour`] to make progress. - None => { - let behaviour_poll = { - let mut parameters = SwarmPollParameters { - local_peer_id: &this.local_peer_id, - supported_protocols: &this.supported_protocols, - listened_addrs: this.listened_addrs.values().flatten().collect(), - external_addrs: &this.external_addrs, - }; - this.behaviour.poll(cx, &mut parameters) - }; + None => match this.behaviour.poll(cx) { + Poll::Pending => {} + Poll::Ready(behaviour_event) => { + this.handle_behaviour_event(behaviour_event); - match behaviour_poll { - Poll::Pending => {} - Poll::Ready(behaviour_event) => { - if let Some(swarm_event) = this.handle_behaviour_event(behaviour_event) - { - return Poll::Ready(swarm_event); - } - - continue; - } + continue; } - } + }, } // Poll the known peers. match this.pool.poll(cx) { Poll::Pending => {} Poll::Ready(pool_event) => { - if let Some(swarm_event) = this.handle_pool_event(pool_event) { - return Poll::Ready(swarm_event); - } - + this.handle_pool_event(pool_event); continue; } - }; + } // Poll the listener(s) for new connections. match Pin::new(&mut this.transport).poll(cx) { Poll::Pending => {} Poll::Ready(transport_event) => { - if let Some(swarm_event) = this.handle_transport_event(transport_event) { - return Poll::Ready(swarm_event); - } - + this.handle_transport_event(transport_event); continue; } } @@ -1428,8 +1318,8 @@ fn notify_any( where TBehaviour: NetworkBehaviour, THandler: ConnectionHandler< - InEvent = THandlerInEvent, - OutEvent = THandlerOutEvent, + FromBehaviour = THandlerInEvent, + ToBehaviour = THandlerOutEvent, >, { let mut pending = SmallVec::new(); @@ -1466,12 +1356,12 @@ where /// connection and listener status. See [`SwarmEvent`] for details. /// /// Note: This stream is infinite and it is guaranteed that -/// [`Stream::poll_next`] will never return `Poll::Ready(None)`. -impl Stream for Swarm +/// [`futures::Stream::poll_next`] will never return `Poll::Ready(None)`. +impl futures::Stream for Swarm where TBehaviour: NetworkBehaviour, { - type Item = SwarmEvent, THandlerErr>; + type Item = SwarmEvent>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.as_mut().poll_next_event(cx).map(Some) @@ -1488,66 +1378,16 @@ where } } -/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to. -// TODO: #[derive(Debug)] -pub struct SwarmPollParameters<'a> { - local_peer_id: &'a PeerId, - supported_protocols: &'a [Vec], - listened_addrs: Vec<&'a Multiaddr>, - external_addrs: &'a Addresses, -} - -impl<'a> PollParameters for SwarmPollParameters<'a> { - type SupportedProtocolsIter = std::iter::Cloned>>; - type ListenedAddressesIter = std::iter::Cloned>; - type ExternalAddressesIter = AddressIntoIter; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - self.supported_protocols.iter().cloned() - } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - self.listened_addrs.clone().into_iter().cloned() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - self.external_addrs.clone().into_iter() - } - - fn local_peer_id(&self) -> &PeerId { - self.local_peer_id - } -} - -/// A [`SwarmBuilder`] provides an API for configuring and constructing a [`Swarm`]. -pub struct SwarmBuilder { - local_peer_id: PeerId, - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, +pub struct Config { pool_config: PoolConfig, - #[allow(deprecated)] - connection_limits: ConnectionLimits, } -impl SwarmBuilder -where - TBehaviour: NetworkBehaviour, -{ - /// Creates a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and - /// executor. The `Swarm` with its underlying `Network` is obtained via - /// [`SwarmBuilder::build`]. - pub fn with_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - executor: impl Executor + Send + 'static, - ) -> Self { +impl Config { + /// Creates a new [`Config`] from the given executor. The [`Swarm`] is obtained via + /// [`Swarm::new`]. + pub fn with_executor(executor: impl Executor + Send + 'static) -> Self { Self { - local_peer_id, - transport, - behaviour, pool_config: PoolConfig::new(Some(Box::new(executor))), - connection_limits: Default::default(), } } @@ -1561,76 +1401,26 @@ where /// } /// ``` #[cfg(feature = "wasm-bindgen")] - pub fn with_wasm_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::WasmBindgenExecutor, - ) + pub fn with_wasm_executor() -> Self { + Self::with_executor(crate::executor::WasmBindgenExecutor) } - /// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a - /// `tokio` executor. + /// Builds a new [`Config`] from the given `tokio` executor. #[cfg(all( feature = "tokio", not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) ))] - pub fn with_tokio_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::TokioExecutor, - ) + pub fn with_tokio_executor() -> Self { + Self::with_executor(crate::executor::TokioExecutor) } - /// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a - /// `async-std` executor. + /// Builds a new [`Config`] from the given `async-std` executor. #[cfg(all( feature = "async-std", not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")) ))] - pub fn with_async_std_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self::with_executor( - transport, - behaviour, - local_peer_id, - crate::executor::AsyncStdExecutor, - ) - } - - /// Creates a new [`SwarmBuilder`] from the given transport, behaviour and local peer ID. The - /// `Swarm` with its underlying `Network` is obtained via [`SwarmBuilder::build`]. - /// - /// ## ⚠️ Performance warning - /// All connections will be polled on the current task, thus quite bad performance - /// characteristics should be expected. Whenever possible use an executor and - /// [`SwarmBuilder::with_executor`]. - pub fn without_executor( - transport: transport::Boxed<(PeerId, StreamMuxerBox)>, - behaviour: TBehaviour, - local_peer_id: PeerId, - ) -> Self { - Self { - local_peer_id, - transport, - behaviour, - pool_config: PoolConfig::new(None), - connection_limits: Default::default(), - } + pub fn with_async_std_executor() -> Self { + Self::with_executor(crate::executor::AsyncStdExecutor) } /// Configures the number of events from the [`NetworkBehaviour`] in @@ -1642,7 +1432,7 @@ where /// volume of events. If this value is too low, then the [`Swarm`] will /// be sleeping more often than necessary. Increasing this value increases /// the overall memory usage. - pub fn notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { + pub fn with_notify_handler_buffer_size(mut self, n: NonZeroUsize) -> Self { self.pool_config = self.pool_config.with_notify_handler_buffer_size(n); self } @@ -1658,24 +1448,17 @@ where /// usage, and more importantly the latency between the moment when an /// event is emitted and the moment when it is received by the /// [`NetworkBehaviour`]. - pub fn per_connection_event_buffer_size(mut self, n: usize) -> Self { + pub fn with_per_connection_event_buffer_size(mut self, n: usize) -> Self { self.pool_config = self.pool_config.with_per_connection_event_buffer_size(n); self } /// Number of addresses concurrently dialed for a single outbound connection attempt. - pub fn dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { + pub fn with_dial_concurrency_factor(mut self, factor: NonZeroU8) -> Self { self.pool_config = self.pool_config.with_dial_concurrency_factor(factor); self } - /// Configures the connection limits. - #[allow(deprecated)] - pub fn connection_limits(mut self, limits: ConnectionLimits) -> Self { - self.connection_limits = limits; - self - } - /// Configures an override for the substream upgrade protocol to use. /// /// The subtream upgrade protocol is the multistream-select protocol @@ -1686,7 +1469,10 @@ where /// > **Note**: If configured, specific upgrade protocols for /// > individual [`SubstreamProtocol`]s emitted by the `NetworkBehaviour` /// > are ignored. - pub fn substream_upgrade_protocol_override(mut self, v: libp2p_core::upgrade::Version) -> Self { + pub fn with_substream_upgrade_protocol_override( + mut self, + v: libp2p_core::upgrade::Version, + ) -> Self { self.pool_config = self.pool_config.with_substream_upgrade_protocol_override(v); self } @@ -1699,56 +1485,35 @@ where /// negotiating inbound streams. The total number of inbound streams on a /// connection is the sum of negotiating and negotiated streams. A limit on /// the total number of streams can be enforced at the - /// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level. - pub fn max_negotiating_inbound_streams(mut self, v: usize) -> Self { + /// [`StreamMuxerBox`] level. + pub fn with_max_negotiating_inbound_streams(mut self, v: usize) -> Self { self.pool_config = self.pool_config.with_max_negotiating_inbound_streams(v); self } - /// Builds a `Swarm` with the current configuration. - pub fn build(self) -> Swarm { - Swarm { - local_peer_id: self.local_peer_id, - transport: self.transport, - pool: Pool::new(self.local_peer_id, self.pool_config, self.connection_limits), - behaviour: self.behaviour, - supported_protocols: Default::default(), - listened_addrs: HashMap::new(), - external_addrs: Addresses::default(), - banned_peers: HashSet::new(), - pending_event: None, - } + /// How long to keep a connection alive once it is idling. + /// + /// Defaults to 0. + pub fn with_idle_connection_timeout(mut self, timeout: Duration) -> Self { + self.pool_config.idle_connection_timeout = timeout; + self } } /// Possible errors when trying to establish or upgrade an outbound connection. #[derive(Debug)] pub enum DialError { - /// The peer is currently banned. - #[deprecated(note = "Use `libp2p::allow_block_list` instead.", since = "0.42.1")] - Banned, - /// The configured limit for simultaneous outgoing connections - /// has been reached. - #[deprecated( - note = "Use `libp2p::connection_limits` instead and handle `{Dial,Listen}Error::Denied::cause`.", - since = "0.42.1" - )] - #[allow(deprecated)] - ConnectionLimit(ConnectionLimit), /// The peer identity obtained on the connection matches the local peer. LocalPeerId { endpoint: ConnectedPoint, }, - /// [`NetworkBehaviour::addresses_of_peer`] returned no addresses - /// for the peer to dial. + /// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] and [`DialOpts`]. NoAddresses, /// The provided [`dial_opts::PeerCondition`] evaluated to false and thus /// the dial was aborted. DialPeerConditionFalse(dial_opts::PeerCondition), /// Pending connection attempt has been aborted. Aborted, - /// The provided peer identity is invalid. - InvalidPeerId(Multihash), /// The peer identity obtained on the connection did not match the one that was expected. WrongPeerId { obtained: PeerId, @@ -1764,8 +1529,6 @@ pub enum DialError { impl From for DialError { fn from(error: PendingOutboundConnectionError) -> Self { match error { - #[allow(deprecated)] - PendingConnectionError::ConnectionLimit(limit) => DialError::ConnectionLimit(limit), PendingConnectionError::Aborted => DialError::Aborted, PendingConnectionError::WrongPeerId { obtained, endpoint } => { DialError::WrongPeerId { obtained, endpoint } @@ -1779,25 +1542,19 @@ impl From for DialError { impl fmt::Display for DialError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - #[allow(deprecated)] - DialError::ConnectionLimit(err) => write!(f, "Dial error: {err}"), DialError::NoAddresses => write!(f, "Dial error: no addresses for peer."), DialError::LocalPeerId { endpoint } => write!( f, "Dial error: tried to dial local peer id at {endpoint:?}." ), - #[allow(deprecated)] - DialError::Banned => write!(f, "Dial error: peer is banned."), - DialError::DialPeerConditionFalse(c) => { - write!(f, "Dial error: condition {c:?} for dialing peer was false.") - } + DialError::DialPeerConditionFalse(PeerCondition::Disconnected) => write!(f, "Dial error: dial condition was configured to only happen when disconnected (`PeerCondition::Disconnected`), but node is already connected, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::NotDialing) => write!(f, "Dial error: dial condition was configured to only happen if there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but a dial is in progress, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::DisconnectedAndNotDialing) => write!(f, "Dial error: dial condition was configured to only happen when both disconnected (`PeerCondition::Disconnected`) and there is currently no ongoing dialing attempt (`PeerCondition::NotDialing`), but node is already connected or dial is in progress, thus cancelling new dial."), + DialError::DialPeerConditionFalse(PeerCondition::Always) => unreachable!("Dial peer condition is by definition true."), DialError::Aborted => write!( f, "Dial error: Pending connection attempt has been aborted." ), - DialError::InvalidPeerId(multihash) => { - write!(f, "Dial error: multihash {multihash:?} is not a PeerId") - } DialError::WrongPeerId { obtained, endpoint } => write!( f, "Dial error: Unexpected peer ID {obtained} at {endpoint:?}." @@ -1834,15 +1591,10 @@ fn print_error_chain(f: &mut fmt::Formatter<'_>, e: &dyn error::Error) -> fmt::R impl error::Error for DialError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - #[allow(deprecated)] - DialError::ConnectionLimit(err) => Some(err), DialError::LocalPeerId { .. } => None, DialError::NoAddresses => None, - #[allow(deprecated)] - DialError::Banned => None, DialError::DialPeerConditionFalse(_) => None, DialError::Aborted => None, - DialError::InvalidPeerId { .. } => None, DialError::WrongPeerId { .. } => None, DialError::Transport(_) => None, DialError::Denied { cause } => Some(cause), @@ -1853,14 +1605,6 @@ impl error::Error for DialError { /// Possible errors when upgrading an inbound connection. #[derive(Debug)] pub enum ListenError { - /// The configured limit for simultaneous outgoing connections - /// has been reached. - #[deprecated( - note = "Use `libp2p::connection_limits` instead and handle `{Dial,Listen}Error::Denied::cause`.", - since = "0.42.1" - )] - #[allow(deprecated)] - ConnectionLimit(ConnectionLimit), /// Pending connection attempt has been aborted. Aborted, /// The peer identity obtained on the connection did not match the one that was expected. @@ -1883,10 +1627,6 @@ impl From for ListenError { fn from(error: PendingInboundConnectionError) -> Self { match error { PendingInboundConnectionError::Transport(inner) => ListenError::Transport(inner), - #[allow(deprecated)] - PendingInboundConnectionError::ConnectionLimit(inner) => { - ListenError::ConnectionLimit(inner) - } PendingInboundConnectionError::Aborted => ListenError::Aborted, PendingInboundConnectionError::WrongPeerId { obtained, endpoint } => { ListenError::WrongPeerId { obtained, endpoint } @@ -1901,8 +1641,6 @@ impl From for ListenError { impl fmt::Display for ListenError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - #[allow(deprecated)] - ListenError::ConnectionLimit(_) => write!(f, "Listen error"), ListenError::Aborted => write!( f, "Listen error: Pending connection attempt has been aborted." @@ -1914,8 +1652,8 @@ impl fmt::Display for ListenError { ListenError::Transport(_) => { write!(f, "Listen error: Failed to negotiate transport protocol(s)") } - ListenError::Denied { .. } => { - write!(f, "Listen error") + ListenError::Denied { cause } => { + write!(f, "Listen error: Denied: {cause}") } ListenError::LocalPeerId { endpoint } => { write!(f, "Listen error: Local peer ID at {endpoint:?}.") @@ -1927,8 +1665,6 @@ impl fmt::Display for ListenError { impl error::Error for ListenError { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - #[allow(deprecated)] - ListenError::ConnectionLimit(err) => Some(err), ListenError::WrongPeerId { .. } => None, ListenError::Transport(err) => Some(err), ListenError::Aborted => None, @@ -1947,9 +1683,9 @@ pub struct ConnectionDenied { } impl ConnectionDenied { - pub fn new(cause: impl error::Error + Send + Sync + 'static) -> Self { + pub fn new(cause: impl Into>) -> Self { Self { - inner: Box::new(cause), + inner: cause.into(), } } @@ -1965,6 +1701,14 @@ impl ConnectionDenied { Ok(*inner) } + + /// Attempt to downcast to a particular reason for why the connection was denied. + pub fn downcast_ref(&self) -> Option<&E> + where + E: error::Error + Send + Sync + 'static, + { + self.inner.downcast_ref::() + } } impl fmt::Display for ConnectionDenied { @@ -2001,51 +1745,21 @@ impl NetworkInfo { } } -/// Ensures a given `Multiaddr` is a `/p2p/...` address for the given peer. -/// -/// If the given address is already a `p2p` address for the given peer, -/// i.e. the last encapsulated protocol is `/p2p/`, this is a no-op. -/// -/// If the given address is already a `p2p` address for a different peer -/// than the one given, the given `Multiaddr` is returned as an `Err`. -/// -/// If the given address is not yet a `p2p` address for the given peer, -/// the `/p2p/` protocol is appended to the returned address. -fn p2p_addr(peer: Option, addr: Multiaddr) -> Result { - let peer = match peer { - Some(p) => p, - None => return Ok(addr), - }; - - if let Some(multiaddr::Protocol::P2p(hash)) = addr.iter().last() { - if &hash != peer.as_ref() { - return Err(addr); - } - Ok(addr) - } else { - Ok(addr.with(multiaddr::Protocol::P2p(peer.into()))) - } -} - #[cfg(test)] mod tests { use super::*; + use crate::dummy; use crate::test::{CallTraceBehaviour, MockBehaviour}; - use either::Either; - use futures::executor::block_on; - use futures::executor::ThreadPool; - use futures::future::poll_fn; - use futures::{executor, future, ready}; + use futures::future; use libp2p_core::multiaddr::multiaddr; use libp2p_core::transport::memory::MemoryTransportError; use libp2p_core::transport::TransportEvent; + use libp2p_core::Endpoint; use libp2p_core::{multiaddr, transport, upgrade}; - use libp2p_core::{Endpoint, UpgradeError}; use libp2p_identity as identity; use libp2p_plaintext as plaintext; use libp2p_yamux as yamux; use quickcheck::*; - use void::Void; // Test execution state. // Connection => Disconnecting => Connecting. @@ -2054,30 +1768,24 @@ mod tests { Disconnecting, } - fn new_test_swarm( - handler_proto: T, - ) -> SwarmBuilder>> - where - T: ConnectionHandler + Clone, - T::OutEvent: Clone, - O: Send + 'static, - { + fn new_test_swarm( + config: Config, + ) -> Swarm>> { let id_keys = identity::Keypair::generate_ed25519(); let local_public_key = id_keys.public(); let transport = transport::MemoryTransport::default() .upgrade(upgrade::Version::V1) - .authenticate(plaintext::PlainText2Config { - local_public_key: local_public_key.clone(), - }) + .authenticate(plaintext::Config::new(&id_keys)) .multiplex(yamux::Config::default()) .boxed(); - let behaviour = CallTraceBehaviour::new(MockBehaviour::new(handler_proto)); - match ThreadPool::new().ok() { - Some(tp) => { - SwarmBuilder::with_executor(transport, behaviour, local_public_key.into(), tp) - } - None => SwarmBuilder::without_executor(transport, behaviour, local_public_key.into()), - } + let behaviour = CallTraceBehaviour::new(MockBehaviour::new(dummy::ConnectionHandler)); + + Swarm::new( + transport, + behaviour, + local_public_key.into(), + config.with_idle_connection_timeout(Duration::from_secs(5)), + ) } fn swarms_connected( @@ -2121,149 +1829,15 @@ mod tests { && !swarm2.is_connected(swarm1.local_peer_id()) } - /// Establishes multiple connections between two peers, - /// after which one peer bans the other. - /// - /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] - /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - /// while unbanned. - /// - /// While the ban is in effect, further dials occur. For these connections no - /// [`FromSwarm::ConnectionEstablished`], [`FromSwarm::ConnectionClosed`] - /// calls should be registered. - #[test] - #[allow(deprecated)] - fn test_connect_disconnect_ban() { - let _ = env_logger::try_init(); - - // Since the test does not try to open any substreams, we can - // use keep alive protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); - - let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); - - swarm1.listen_on(addr1).unwrap(); - swarm2.listen_on(addr2.clone()).unwrap(); - - let swarm1_id = *swarm1.local_peer_id(); - - #[derive(Debug)] - enum Stage { - /// Waiting for the peers to connect. Banning has not occurred. - Connecting, - /// Ban occurred. - Banned, - // Ban is in place and a dial is ongoing. - BannedDial, - // Mid-ban dial was registered and the peer was unbanned. - Unbanned, - // There are dial attempts ongoing for the no longer banned peers. - Reconnecting, - } - - let num_connections = 10; - - for _ in 0..num_connections { - swarm1.dial(addr2.clone()).unwrap(); - } - - let mut s1_expected_conns = num_connections; - let mut s2_expected_conns = num_connections; - - let mut stage = Stage::Connecting; - - executor::block_on(future::poll_fn(move |cx| loop { - let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); - let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); - match stage { - Stage::Connecting => { - if swarm1.behaviour.assert_connected(s1_expected_conns, 1) - && swarm2.behaviour.assert_connected(s2_expected_conns, 1) - { - // Setup to test that already established connections are correctly closed - // and reported as such after the peer is banned. - swarm2.ban_peer_id(swarm1_id); - stage = Stage::Banned; - } - } - Stage::Banned => { - if swarm1.behaviour.assert_disconnected(s1_expected_conns, 1) - && swarm2.behaviour.assert_disconnected(s2_expected_conns, 1) - { - // Setup to test that new connections of banned peers are not reported. - swarm1.dial(addr2.clone()).unwrap(); - s1_expected_conns += 1; - stage = Stage::BannedDial; - } - } - Stage::BannedDial => { - if swarm1.behaviour.assert_disconnected(s1_expected_conns, 2) { - // The banned connection was established. Given the ban, swarm2 closed the - // connection. Check that it was not reported to the behaviour of the - // banning swarm. - assert_eq!( - swarm2.behaviour.on_connection_established.len(), - s2_expected_conns, - "No additional closed connections should be reported for the banned peer" - ); - - // Setup to test that the banned connection is not reported upon closing - // even if the peer is unbanned. - swarm2.unban_peer_id(swarm1_id); - stage = Stage::Unbanned; - } - } - Stage::Unbanned => { - if swarm1.network_info().num_peers() == 0 - && swarm2.network_info().num_peers() == 0 - { - // The banned connection has closed. Check that it was not reported. - assert_eq!( - swarm2.behaviour.on_connection_closed.len(), s2_expected_conns, - "No additional closed connections should be reported for the banned peer" - ); - - // Setup to test that a ban lifted does not affect future connections. - for _ in 0..num_connections { - swarm1.dial(addr2.clone()).unwrap(); - } - s1_expected_conns += num_connections; - s2_expected_conns += num_connections; - stage = Stage::Reconnecting; - } - } - Stage::Reconnecting => { - if swarm1.behaviour.on_connection_established.len() == s1_expected_conns - && swarm2.behaviour.assert_connected(s2_expected_conns, 2) - { - return Poll::Ready(()); - } - } - } - - if poll1.is_pending() && poll2.is_pending() { - return Poll::Pending; - } - })) - } - /// Establishes multiple connections between two peers, /// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`]. /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_swarm_disconnect() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + #[tokio::test] + async fn test_swarm_disconnect() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2281,7 +1855,7 @@ mod tests { } let mut state = State::Connecting; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -2313,7 +1887,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } /// Establishes multiple connections between two peers, @@ -2322,14 +1897,10 @@ mod tests { /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_behaviour_disconnect_all() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + #[tokio::test] + async fn test_behaviour_disconnect_all() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2347,7 +1918,7 @@ mod tests { } let mut state = State::Connecting; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -2383,7 +1954,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } /// Establishes multiple connections between two peers, @@ -2392,14 +1964,10 @@ mod tests { /// /// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`] /// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`] - #[test] - fn test_behaviour_disconnect_one() { - // Since the test does not try to open any substreams, we can - // use the dummy protocols handler. - let handler_proto = keep_alive::ConnectionHandler; - - let mut swarm1 = new_test_swarm::<_, ()>(handler_proto.clone()).build(); - let mut swarm2 = new_test_swarm::<_, ()>(handler_proto).build(); + #[tokio::test] + async fn test_behaviour_disconnect_one() { + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); let addr1: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); let addr2: Multiaddr = multiaddr::Protocol::Memory(rand::random::()).into(); @@ -2417,7 +1985,7 @@ mod tests { let mut state = State::Connecting; let mut disconnected_conn_id = None; - executor::block_on(future::poll_fn(move |cx| loop { + future::poll_fn(move |cx| loop { let poll1 = Swarm::poll_next_event(Pin::new(&mut swarm1), cx); let poll2 = Swarm::poll_next_event(Pin::new(&mut swarm2), cx); match state { @@ -2461,7 +2029,8 @@ mod tests { if poll1.is_pending() && poll2.is_pending() { return Poll::Pending; } - })) + }) + .await } #[test] @@ -2476,10 +2045,11 @@ mod tests { } fn prop(concurrency_factor: DialConcurrencyFactor) { - block_on(async { - let mut swarm = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) - .dial_concurrency_factor(concurrency_factor.0) - .build(); + tokio::runtime::Runtime::new().unwrap().block_on(async { + let mut swarm = new_test_swarm( + Config::with_tokio_executor() + .with_dial_concurrency_factor(concurrency_factor.0), + ); // Listen on `concurrency_factor + 1` addresses. // @@ -2489,7 +2059,9 @@ mod tests { let mut transports = Vec::new(); for _ in 0..num_listen_addrs { let mut transport = transport::MemoryTransport::default().boxed(); - transport.listen_on("/memory/0".parse().unwrap()).unwrap(); + transport + .listen_on(ListenerId::next(), "/memory/0".parse().unwrap()) + .unwrap(); match transport.select_next_some().await { TransportEvent::NewAddress { listen_addr, .. } => { @@ -2511,19 +2083,14 @@ mod tests { ) .unwrap(); for mut transport in transports.into_iter() { - loop { - match futures::future::select(transport.select_next_some(), swarm.next()) - .await - { - future::Either::Left((TransportEvent::Incoming { .. }, _)) => { - break; - } - future::Either::Left(_) => { - panic!("Unexpected transport event.") - } - future::Either::Right((e, _)) => { - panic!("Expect swarm to not emit any event {e:?}") - } + match futures::future::select(transport.select_next_some(), swarm.next()).await + { + future::Either::Left((TransportEvent::Incoming { .. }, _)) => {} + future::Either::Left(_) => { + panic!("Unexpected transport event.") + } + future::Either::Right((e, _)) => { + panic!("Expect swarm to not emit any event {e:?}") } } } @@ -2538,203 +2105,29 @@ mod tests { QuickCheck::new().tests(10).quickcheck(prop as fn(_) -> _); } - #[test] - #[allow(deprecated)] - fn max_outgoing() { - use rand::Rng; - - let outgoing_limit = rand::thread_rng().gen_range(1..10); - - let limits = ConnectionLimits::default().with_max_pending_outgoing(Some(outgoing_limit)); - let mut network = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) - .connection_limits(limits) - .build(); - - let addr: Multiaddr = "/memory/1234".parse().unwrap(); - - let target = PeerId::random(); - for _ in 0..outgoing_limit { - network - .dial( - DialOpts::peer_id(target) - .addresses(vec![addr.clone()]) - .build(), - ) - .expect("Unexpected connection limit."); - } - - match network - .dial(DialOpts::peer_id(target).addresses(vec![addr]).build()) - .expect_err("Unexpected dialing success.") - { - #[allow(deprecated)] - DialError::ConnectionLimit(limit) => { - assert_eq!(limit.current, outgoing_limit); - assert_eq!(limit.limit, outgoing_limit); - } - e => panic!("Unexpected error: {e:?}"), - } - - let info = network.network_info(); - assert_eq!(info.num_peers(), 0); - assert_eq!( - info.connection_counters().num_pending_outgoing(), - outgoing_limit - ); - } - - #[test] - fn max_established_incoming() { - #[derive(Debug, Clone)] - struct Limit(u32); - - impl Arbitrary for Limit { - fn arbitrary(g: &mut Gen) -> Self { - Self(g.gen_range(1..10)) - } - } - - #[allow(deprecated)] - fn limits(limit: u32) -> ConnectionLimits { - ConnectionLimits::default().with_max_established_incoming(Some(limit)) - } - - fn prop(limit: Limit) { - let limit = limit.0; - - #[allow(deprecated)] - let mut network1 = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) - .connection_limits(limits(limit)) - .build(); - #[allow(deprecated)] - let mut network2 = new_test_swarm::<_, ()>(keep_alive::ConnectionHandler) - .connection_limits(limits(limit)) - .build(); - - let _ = network1.listen_on(multiaddr![Memory(0u64)]).unwrap(); - let listen_addr = async_std::task::block_on(poll_fn(|cx| { - match ready!(network1.poll_next_unpin(cx)).unwrap() { - SwarmEvent::NewListenAddr { address, .. } => Poll::Ready(address), - e => panic!("Unexpected network event: {e:?}"), - } - })); - - // Spawn and block on the dialer. - async_std::task::block_on({ - let mut n = 0; - network2.dial(listen_addr.clone()).unwrap(); - - let mut expected_closed = false; - let mut network_1_established = false; - let mut network_2_established = false; - let mut network_1_limit_reached = false; - let mut network_2_limit_reached = false; - poll_fn(move |cx| { - loop { - let mut network_1_pending = false; - let mut network_2_pending = false; - - match network1.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::IncomingConnection { .. })) => {} - Poll::Ready(Some(SwarmEvent::ConnectionEstablished { .. })) => { - network_1_established = true; - } - #[allow(deprecated)] - Poll::Ready(Some(SwarmEvent::IncomingConnectionError { - error: ListenError::ConnectionLimit(err), - .. - })) => { - assert_eq!(err.limit, limit); - assert_eq!(err.limit, err.current); - let info = network1.network_info(); - let counters = info.connection_counters(); - assert_eq!(counters.num_established_incoming(), limit); - assert_eq!(counters.num_established(), limit); - network_1_limit_reached = true; - } - Poll::Pending => { - network_1_pending = true; - } - e => panic!("Unexpected network event: {e:?}"), - } - - match network2.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::ConnectionEstablished { .. })) => { - network_2_established = true; - } - Poll::Ready(Some(SwarmEvent::ConnectionClosed { .. })) => { - assert!(expected_closed); - let info = network2.network_info(); - let counters = info.connection_counters(); - assert_eq!(counters.num_established_outgoing(), limit); - assert_eq!(counters.num_established(), limit); - network_2_limit_reached = true; - } - Poll::Pending => { - network_2_pending = true; - } - e => panic!("Unexpected network event: {e:?}"), - } - - if network_1_pending && network_2_pending { - return Poll::Pending; - } - - if network_1_established && network_2_established { - network_1_established = false; - network_2_established = false; - - if n <= limit { - // Dial again until the limit is exceeded. - n += 1; - network2.dial(listen_addr.clone()).unwrap(); - - if n == limit { - // The the next dialing attempt exceeds the limit, this - // is the connection we expected to get closed. - expected_closed = true; - } - } else { - panic!("Expect networks not to establish connections beyond the limit.") - } - } - - if network_1_limit_reached && network_2_limit_reached { - return Poll::Ready(()); - } - } - }) - }); - } - - quickcheck(prop as fn(_)); - } - - #[test] - fn invalid_peer_id() { + #[tokio::test] + async fn invalid_peer_id() { // Checks whether dialing an address containing the wrong peer id raises an error // for the expected peer id instead of the obtained peer id. - let mut swarm1 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); - let mut swarm2 = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm1 = new_test_swarm(Config::with_tokio_executor()); + let mut swarm2 = new_test_swarm(Config::with_tokio_executor()); swarm1.listen_on("/memory/0".parse().unwrap()).unwrap(); - let address = - futures::executor::block_on(future::poll_fn(|cx| match swarm1.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => { - Poll::Ready(address) - } - Poll::Pending => Poll::Pending, - _ => panic!("Was expecting the listen address to be reported"), - })); + let address = future::poll_fn(|cx| match swarm1.poll_next_unpin(cx) { + Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => Poll::Ready(address), + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), + }) + .await; let other_id = PeerId::random(); - let other_addr = address.with(multiaddr::Protocol::P2p(other_id.into())); + let other_addr = address.with(multiaddr::Protocol::P2p(other_id)); swarm2.dial(other_addr.clone()).unwrap(); - let (peer_id, error) = futures::executor::block_on(future::poll_fn(|cx| { + let (peer_id, error) = future::poll_fn(|cx| { if let Poll::Ready(Some(SwarmEvent::IncomingConnection { .. })) = swarm1.poll_next_unpin(cx) {} @@ -2746,7 +2139,8 @@ mod tests { Poll::Ready(x) => panic!("unexpected {x:?}"), Poll::Pending => Poll::Pending, } - })); + }) + .await; assert_eq!(peer_id.unwrap(), other_id); match error { DialError::WrongPeerId { obtained, endpoint } => { @@ -2763,8 +2157,8 @@ mod tests { } } - #[test] - fn dial_self() { + #[tokio::test] + async fn dial_self() { // Check whether dialing ourselves correctly fails. // // Dialing the same address we're listening should result in three events: @@ -2775,17 +2169,15 @@ mod tests { // // The last two can happen in any order. - let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm = new_test_swarm(Config::with_tokio_executor()); swarm.listen_on("/memory/0".parse().unwrap()).unwrap(); - let local_address = - futures::executor::block_on(future::poll_fn(|cx| match swarm.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => { - Poll::Ready(address) - } - Poll::Pending => Poll::Pending, - _ => panic!("Was expecting the listen address to be reported"), - })); + let local_address = future::poll_fn(|cx| match swarm.poll_next_unpin(cx) { + Poll::Ready(Some(SwarmEvent::NewListenAddr { address, .. })) => Poll::Ready(address), + Poll::Pending => Poll::Pending, + _ => panic!("Was expecting the listen address to be reported"), + }) + .await; swarm.listened_addrs.clear(); // This is a hack to actually execute the dial to ourselves which would otherwise be filtered. @@ -2793,7 +2185,7 @@ mod tests { let mut got_dial_err = false; let mut got_inc_err = false; - futures::executor::block_on(future::poll_fn(|cx| -> Poll> { + future::poll_fn(|cx| -> Poll> { loop { match swarm.poll_next_unpin(cx) { Poll::Ready(Some(SwarmEvent::OutgoingConnectionError { @@ -2827,26 +2219,27 @@ mod tests { Poll::Pending => break Poll::Pending, } } - })) + }) + .await .unwrap(); } - #[test] - fn dial_self_by_id() { + #[tokio::test] + async fn dial_self_by_id() { // Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first // place. - let swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let swarm = new_test_swarm(Config::with_tokio_executor()); let peer_id = *swarm.local_peer_id(); assert!(!swarm.is_connected(&peer_id)); } - #[async_std::test] + #[tokio::test] async fn multiple_addresses_err() { // Tries dialing multiple addresses, and makes sure there's one dialing error per address. let target = PeerId::random(); - let mut swarm = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut swarm = new_test_swarm(Config::with_tokio_executor()); let addresses = HashSet::from([ multiaddr![Ip4([0, 0, 0, 0]), Tcp(rand::random::())], @@ -2872,13 +2265,14 @@ mod tests { peer_id, // multiaddr, error: DialError::Transport(errors), + .. } => { assert_eq!(target, peer_id.unwrap()); let failed_addresses = errors.into_iter().map(|(addr, _)| addr).collect::>(); let expected_addresses = addresses .into_iter() - .map(|addr| addr.with(multiaddr::Protocol::P2p(target.into()))) + .map(|addr| addr.with(multiaddr::Protocol::P2p(target))) .collect::>(); assert_eq!(expected_addresses, failed_addresses); @@ -2887,16 +2281,18 @@ mod tests { } } - #[test] - fn aborting_pending_connection_surfaces_error() { - let _ = env_logger::try_init(); + #[tokio::test] + async fn aborting_pending_connection_surfaces_error() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); - let mut dialer = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); - let mut listener = new_test_swarm::<_, ()>(dummy::ConnectionHandler).build(); + let mut dialer = new_test_swarm(Config::with_tokio_executor()); + let mut listener = new_test_swarm(Config::with_tokio_executor()); let listener_peer_id = *listener.local_peer_id(); listener.listen_on(multiaddr![Memory(0u64)]).unwrap(); - let listener_address = match block_on(listener.next()).unwrap() { + let listener_address = match listener.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => address, e => panic!("Unexpected network event: {e:?}"), }; @@ -2913,7 +2309,7 @@ mod tests { .disconnect_peer_id(listener_peer_id) .expect_err("Expect peer to not yet be connected."); - match block_on(dialer.next()).unwrap() { + match dialer.next().await.unwrap() { SwarmEvent::OutgoingConnectionError { error: DialError::Aborted, .. @@ -2929,15 +2325,13 @@ mod tests { "/ip4/127.0.0.1/tcp/80".parse().unwrap(), TransportError::Other(io::Error::new( io::ErrorKind::Other, - Either::<_, Void>::Left(Either::::Right(UpgradeError::Apply( - MemoryTransportError::Unreachable, - ))), + MemoryTransportError::Unreachable, )), )]); let string = format!("{error}"); // Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do. - assert_eq!("Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : Handshake failed: No listener on the given port.)]", string) + assert_eq!("Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on the given port.)]", string) } } diff --git a/swarm/src/listen_opts.rs b/swarm/src/listen_opts.rs new file mode 100644 index 00000000000..9c4d69a6fa0 --- /dev/null +++ b/swarm/src/listen_opts.rs @@ -0,0 +1,33 @@ +use crate::ListenerId; +use libp2p_core::Multiaddr; + +#[derive(Debug)] +pub struct ListenOpts { + id: ListenerId, + address: Multiaddr, +} + +impl ListenOpts { + pub fn new(address: Multiaddr) -> ListenOpts { + ListenOpts { + id: ListenerId::next(), + address, + } + } + + /// Get the [`ListenerId`] of this listen attempt + pub fn listener_id(&self) -> ListenerId { + self.id + } + + /// Get the [`Multiaddr`] that is being listened on + pub fn address(&self) -> &Multiaddr { + &self.address + } +} + +impl From for ListenOpts { + fn from(addr: Multiaddr) -> Self { + ListenOpts::new(addr) + } +} diff --git a/swarm/src/registry.rs b/swarm/src/registry.rs deleted file mode 100644 index 7f8225a6a25..00000000000 --- a/swarm/src/registry.rs +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use libp2p_core::Multiaddr; -use smallvec::SmallVec; -use std::ops::{Add, Sub}; -use std::{cmp::Ordering, collections::VecDeque, num::NonZeroUsize}; - -/// A ranked collection of [`Multiaddr`] values. -/// -/// Every address has an associated [score](`AddressScore`) and iterating -/// over the addresses will return them in order from highest to lowest score. -/// -/// In addition to the currently held addresses and their score, the collection -/// keeps track of a limited history of the most-recently added addresses. -/// This history determines how address scores are reduced over time as old -/// scores expire in the context of new addresses being added: -/// -/// * An address's score is increased by a given amount whenever it is -/// [(re-)added](Addresses::add) to the collection. -/// * An address's score is decreased by the same amount used when it -/// was added when the least-recently seen addition is (as per the -/// limited history) for this address in the context of [`Addresses::add`]. -/// * If an address's score reaches 0 in the context of [`Addresses::add`], -/// it is removed from the collection. -/// -#[derive(Debug, Clone)] -pub(crate) struct Addresses { - /// The ranked sequence of addresses, from highest to lowest score. - /// - /// By design, the number of finitely scored addresses stored here is - /// never larger (but may be smaller) than the number of historic `reports` - /// at any time. - registry: SmallVec<[AddressRecord; 8]>, - /// The configured limit of the `reports` history of added addresses, - /// and thus also of the size of the `registry` w.r.t. finitely scored - /// addresses. - limit: NonZeroUsize, - /// The limited history of added addresses. If the queue reaches the `limit`, - /// the first record, i.e. the least-recently added, is removed in the - /// context of [`Addresses::add`] and the corresponding record in the - /// `registry` has its score reduced accordingly. - reports: VecDeque, -} - -/// An record in a prioritised list of addresses. -#[derive(Clone, Debug, PartialEq, Eq)] -#[non_exhaustive] -pub struct AddressRecord { - pub addr: Multiaddr, - pub score: AddressScore, -} - -/// A report tracked for a finitely scored address. -#[derive(Debug, Clone)] -struct Report { - addr: Multiaddr, - score: u32, -} - -impl AddressRecord { - fn new(addr: Multiaddr, score: AddressScore) -> Self { - AddressRecord { addr, score } - } -} - -/// The "score" of an address w.r.t. an ordered collection of addresses. -/// -/// A score is a measure of the trusworthyness of a particular -/// observation of an address. The same address may be repeatedly -/// reported with the same or differing scores. -#[derive(PartialEq, Eq, Debug, Clone, Copy, Hash)] -pub enum AddressScore { - /// The score is "infinite", i.e. an address with this score is never - /// purged from the associated address records and remains sorted at - /// the beginning (possibly with other `Infinite`ly scored addresses). - Infinite, - /// The score is finite, i.e. an address with this score has - /// its score increased and decreased as per the frequency of - /// reports (i.e. additions) of the same address relative to - /// the reports of other addresses. - Finite(u32), -} - -impl AddressScore { - fn is_zero(&self) -> bool { - &AddressScore::Finite(0) == self - } -} - -impl PartialOrd for AddressScore { - fn partial_cmp(&self, other: &AddressScore) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for AddressScore { - fn cmp(&self, other: &AddressScore) -> Ordering { - // Semantics of cardinal numbers with a single infinite cardinal. - match (self, other) { - (AddressScore::Infinite, AddressScore::Infinite) => Ordering::Equal, - (AddressScore::Infinite, AddressScore::Finite(_)) => Ordering::Greater, - (AddressScore::Finite(_), AddressScore::Infinite) => Ordering::Less, - (AddressScore::Finite(a), AddressScore::Finite(b)) => a.cmp(b), - } - } -} - -impl Add for AddressScore { - type Output = AddressScore; - - fn add(self, rhs: AddressScore) -> Self::Output { - // Semantics of cardinal numbers with a single infinite cardinal. - match (self, rhs) { - (AddressScore::Infinite, AddressScore::Infinite) => AddressScore::Infinite, - (AddressScore::Infinite, AddressScore::Finite(_)) => AddressScore::Infinite, - (AddressScore::Finite(_), AddressScore::Infinite) => AddressScore::Infinite, - (AddressScore::Finite(a), AddressScore::Finite(b)) => { - AddressScore::Finite(a.saturating_add(b)) - } - } - } -} - -impl Sub for AddressScore { - type Output = AddressScore; - - fn sub(self, rhs: u32) -> Self::Output { - // Semantics of cardinal numbers with a single infinite cardinal. - match self { - AddressScore::Infinite => AddressScore::Infinite, - AddressScore::Finite(score) => AddressScore::Finite(score.saturating_sub(rhs)), - } - } -} - -impl Default for Addresses { - fn default() -> Self { - Addresses::new(NonZeroUsize::new(200).expect("200 > 0")) - } -} - -/// The result of adding an address to an ordered list of -/// addresses with associated scores. -pub enum AddAddressResult { - Inserted { - expired: SmallVec<[AddressRecord; 8]>, - }, - Updated { - expired: SmallVec<[AddressRecord; 8]>, - }, -} - -impl Addresses { - /// Create a new ranked address collection with the given size limit - /// for [finitely scored](AddressScore::Finite) addresses. - pub(crate) fn new(limit: NonZeroUsize) -> Self { - Addresses { - registry: SmallVec::new(), - limit, - reports: VecDeque::with_capacity(limit.get()), - } - } - - /// Add a [`Multiaddr`] to the collection. - /// - /// If the given address already exists in the collection, - /// the given score is added to the current score of the address. - /// - /// If the collection has already observed the configured - /// number of address additions, the least-recently added address - /// as per this limited history has its score reduced by the amount - /// used in this prior report, with removal from the collection - /// occurring when the score drops to 0. - pub(crate) fn add(&mut self, addr: Multiaddr, score: AddressScore) -> AddAddressResult { - // If enough reports (i.e. address additions) occurred, reduce - // the score of the least-recently added address. - if self.reports.len() == self.limit.get() { - let old_report = self.reports.pop_front().expect("len = limit > 0"); - // If the address is still in the collection, decrease its score. - if let Some(record) = self.registry.iter_mut().find(|r| r.addr == old_report.addr) { - record.score = record.score - old_report.score; - isort(&mut self.registry); - } - } - - // Remove addresses that have a score of 0. - let mut expired = SmallVec::new(); - while self - .registry - .last() - .map(|e| e.score.is_zero()) - .unwrap_or(false) - { - if let Some(addr) = self.registry.pop() { - expired.push(addr); - } - } - - // If the address score is finite, remember this report. - if let AddressScore::Finite(score) = score { - self.reports.push_back(Report { - addr: addr.clone(), - score, - }); - } - - // If the address is already in the collection, increase its score. - for r in &mut self.registry { - if r.addr == addr { - r.score = r.score + score; - isort(&mut self.registry); - return AddAddressResult::Updated { expired }; - } - } - - // It is a new record. - self.registry.push(AddressRecord::new(addr, score)); - AddAddressResult::Inserted { expired } - } - - /// Explicitly remove an address from the collection. - /// - /// Returns `true` if the address existed in the collection - /// and was thus removed, false otherwise. - pub(crate) fn remove(&mut self, addr: &Multiaddr) -> bool { - if let Some(pos) = self.registry.iter().position(|r| &r.addr == addr) { - self.registry.remove(pos); - true - } else { - false - } - } - - /// Return an iterator over all [`Multiaddr`] values. - /// - /// The iteration is ordered by descending score. - pub(crate) fn iter(&self) -> AddressIter<'_> { - AddressIter { - items: &self.registry, - offset: 0, - } - } - - /// Return an iterator over all [`Multiaddr`] values. - /// - /// The iteration is ordered by descending score. - pub(crate) fn into_iter(self) -> AddressIntoIter { - AddressIntoIter { - items: self.registry, - } - } -} - -/// An iterator over [`Multiaddr`] values. -#[derive(Clone)] -pub(crate) struct AddressIter<'a> { - items: &'a [AddressRecord], - offset: usize, -} - -impl<'a> Iterator for AddressIter<'a> { - type Item = &'a AddressRecord; - - fn next(&mut self) -> Option { - if self.offset == self.items.len() { - return None; - } - let item = &self.items[self.offset]; - self.offset += 1; - Some(item) - } - - fn size_hint(&self) -> (usize, Option) { - let n = self.items.len() - self.offset; - (n, Some(n)) - } -} - -impl<'a> ExactSizeIterator for AddressIter<'a> {} - -/// An iterator over [`Multiaddr`] values. -#[derive(Clone)] -pub struct AddressIntoIter { - items: SmallVec<[AddressRecord; 8]>, -} - -impl Iterator for AddressIntoIter { - type Item = AddressRecord; - - fn next(&mut self) -> Option { - if !self.items.is_empty() { - Some(self.items.remove(0)) - } else { - None - } - } - - fn size_hint(&self) -> (usize, Option) { - let n = self.items.len(); - (n, Some(n)) - } -} - -impl ExactSizeIterator for AddressIntoIter {} - -// Reverse insertion sort. -fn isort(xs: &mut [AddressRecord]) { - for i in 1..xs.len() { - for j in (1..=i).rev() { - if xs[j].score <= xs[j - 1].score { - break; - } - xs.swap(j, j - 1) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use libp2p_core::multiaddr::{Multiaddr, Protocol}; - use quickcheck::*; - use std::num::{NonZeroU8, NonZeroUsize}; - - impl Arbitrary for AddressScore { - fn arbitrary(g: &mut Gen) -> AddressScore { - if g.gen_range(0..10u8) == 0 { - // ~10% "Infinitely" scored addresses - AddressScore::Infinite - } else { - AddressScore::Finite(Arbitrary::arbitrary(g)) - } - } - } - - impl Arbitrary for AddressRecord { - fn arbitrary(g: &mut Gen) -> Self { - let addr = Protocol::Tcp(g.gen_range(0..256)).into(); - let score = AddressScore::arbitrary(g); - AddressRecord::new(addr, score) - } - } - - #[test] - fn isort_sorts() { - fn property(xs: Vec) { - let mut xs = xs - .into_iter() - .map(|score| AddressRecord::new(Multiaddr::empty(), score)) - .collect::>(); - - isort(&mut xs); - - for i in 1..xs.len() { - assert!(xs[i - 1].score >= xs[i].score) - } - } - - quickcheck(property as fn(_)); - } - - #[test] - fn score_retention() { - fn prop(first: AddressRecord, other: AddressRecord) -> TestResult { - if first.addr == other.addr || first.score.is_zero() { - return TestResult::discard(); - } - - let mut addresses = Addresses::default(); - - // Add the first address. - addresses.add(first.addr.clone(), first.score); - assert!(addresses.iter().any(|a| a.addr == first.addr)); - - // Add another address so often that the initial report of - // the first address may be purged and, since it was the - // only report, the address removed. - for _ in 0..addresses.limit.get() + 1 { - addresses.add(other.addr.clone(), other.score); - } - - let exists = addresses.iter().any(|a| a.addr == first.addr); - - match (first.score, other.score) { - // Only finite scores push out other finite scores. - (AddressScore::Finite(_), AddressScore::Finite(_)) => assert!(!exists), - _ => assert!(exists), - } - - TestResult::passed() - } - - quickcheck(prop as fn(_, _) -> _); - } - - #[test] - fn score_retention_finite_0() { - let first = { - let addr = Protocol::Tcp(42).into(); - let score = AddressScore::Finite(0); - AddressRecord::new(addr, score) - }; - let other = { - let addr = Protocol::Udp(42).into(); - let score = AddressScore::Finite(42); - AddressRecord::new(addr, score) - }; - - let mut addresses = Addresses::default(); - - // Add the first address. - addresses.add(first.addr.clone(), first.score); - assert!(addresses.iter().any(|a| a.addr == first.addr)); - - // Add another address so the first will address be purged, - // because its score is finite(0) - addresses.add(other.addr.clone(), other.score); - - assert!(addresses.iter().any(|a| a.addr == other.addr)); - assert!(!addresses.iter().any(|a| a.addr == first.addr)); - } - - #[test] - fn finitely_scored_address_limit() { - fn prop(reports: Vec, limit: NonZeroU8) { - let mut addresses = Addresses::new(limit.into()); - - // Add all reports. - for r in reports { - addresses.add(r.addr, r.score); - } - - // Count the finitely scored addresses. - let num_finite = addresses - .iter() - .filter(|r| { - matches!( - r, - AddressRecord { - score: AddressScore::Finite(_), - .. - } - ) - }) - .count(); - - // Check against the limit. - assert!(num_finite <= limit.get() as usize); - } - - quickcheck(prop as fn(_, _)); - } - - #[test] - fn record_score_sum() { - fn prop(records: Vec) -> bool { - // Make sure the address collection can hold all reports. - let n = std::cmp::max(records.len(), 1); - let mut addresses = Addresses::new(NonZeroUsize::new(n).unwrap()); - - // Add all address reports to the collection. - for r in records.iter() { - addresses.add(r.addr.clone(), r.score); - } - - // Check that each address in the registry has the expected score. - for r in &addresses.registry { - let expected_score = records.iter().fold(None::, |sum, rec| { - if rec.addr == r.addr { - sum.map_or(Some(rec.score), |s| Some(s + rec.score)) - } else { - sum - } - }); - - if Some(r.score) != expected_score { - return false; - } - } - - true - } - - quickcheck(prop as fn(_) -> _) - } -} diff --git a/swarm/src/stream.rs b/swarm/src/stream.rs new file mode 100644 index 00000000000..871352f3c6a --- /dev/null +++ b/swarm/src/stream.rs @@ -0,0 +1,98 @@ +use futures::{AsyncRead, AsyncWrite}; +use libp2p_core::muxing::SubstreamBox; +use libp2p_core::Negotiated; +use std::{ + io::{IoSlice, IoSliceMut}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +/// Counter for the number of active streams on a connection. +#[derive(Debug, Clone)] +pub(crate) struct ActiveStreamCounter(Arc<()>); + +impl ActiveStreamCounter { + pub(crate) fn default() -> Self { + Self(Arc::new(())) + } + + pub(crate) fn has_no_active_streams(&self) -> bool { + self.num_alive_streams() == 1 + } + + fn num_alive_streams(&self) -> usize { + Arc::strong_count(&self.0) + } +} + +#[derive(Debug)] +pub struct Stream { + stream: Negotiated, + counter: Option, +} + +impl Stream { + pub(crate) fn new(stream: Negotiated, counter: ActiveStreamCounter) -> Self { + Self { + stream, + counter: Some(counter), + } + } + + /// Ignore this stream in the [Swarm](crate::Swarm)'s connection-keep-alive algorithm. + /// + /// By default, any active stream keeps a connection alive. For most protocols, + /// this is a good default as it ensures that the protocol is completed before + /// a connection is shut down. + /// Some protocols like libp2p's [ping](https://github.com/libp2p/specs/blob/master/ping/ping.md) + /// for example never complete and are of an auxiliary nature. + /// These protocols should opt-out of the keep alive algorithm using this method. + pub fn ignore_for_keep_alive(&mut self) { + self.counter.take(); + } +} + +impl AsyncRead for Stream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_read(cx, buf) + } + + fn poll_read_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_read_vectored(cx, bufs) + } +} + +impl AsyncWrite for Stream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_write(cx, buf) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_write_vectored(cx, bufs) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().stream).poll_close(cx) + } +} diff --git a/swarm/src/stream_protocol.rs b/swarm/src/stream_protocol.rs index bce0ec51279..f746429a3d7 100644 --- a/swarm/src/stream_protocol.rs +++ b/swarm/src/stream_protocol.rs @@ -7,7 +7,7 @@ use std::sync::Arc; /// /// libp2p nodes use stream protocols to negotiate what to do with a newly opened stream. /// Stream protocols are string-based and must start with a forward slash: `/`. -#[derive(Debug, Clone, Eq)] +#[derive(Clone, Eq)] pub struct StreamProtocol { inner: Either<&'static str, Arc>, } @@ -50,6 +50,12 @@ impl AsRef for StreamProtocol { } } +impl fmt::Debug for StreamProtocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + either::for_both!(&self.inner, s => s.fmt(f)) + } +} + impl fmt::Display for StreamProtocol { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(f) @@ -102,3 +108,25 @@ impl fmt::Display for InvalidProtocol { } impl std::error::Error for InvalidProtocol {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn stream_protocol_print() { + let protocol = StreamProtocol::new("/foo/bar/1.0.0"); + + let debug = format!("{protocol:?}"); + let display = format!("{protocol}"); + + assert_eq!( + debug, r#""/foo/bar/1.0.0""#, + "protocol to debug print as string with quotes" + ); + assert_eq!( + display, "/foo/bar/1.0.0", + "protocol to display print as string without quotes" + ); + } +} diff --git a/swarm/src/test.rs b/swarm/src/test.rs index 90738b6a6cf..547277550bb 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -19,12 +19,12 @@ // DEALINGS IN THE SOFTWARE. use crate::behaviour::{ - ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredExternalAddr, ExpiredListenAddr, - FromSwarm, ListenerClosed, ListenerError, NewExternalAddr, NewListenAddr, NewListener, + ConnectionClosed, ConnectionEstablished, DialFailure, ExpiredListenAddr, ExternalAddrExpired, + FromSwarm, ListenerClosed, ListenerError, NewExternalAddrCandidate, NewListenAddr, NewListener, }; use crate::{ - ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, PollParameters, THandler, - THandlerInEvent, THandlerOutEvent, ToSwarm, + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, }; use libp2p_core::{multiaddr::Multiaddr, transport::ListenerId, ConnectedPoint, Endpoint}; use libp2p_identity::PeerId; @@ -37,24 +37,24 @@ use std::task::{Context, Poll}; pub(crate) struct MockBehaviour where THandler: ConnectionHandler + Clone, - THandler::OutEvent: Clone, + THandler::ToBehaviour: Clone, TOutEvent: Send + 'static, { /// The prototype protocols handler that is cloned for every - /// invocation of `new_handler`. + /// invocation of [`NetworkBehaviour::handle_established_inbound_connection`] and [`NetworkBehaviour::handle_established_outbound_connection`] pub(crate) handler_proto: THandler, - /// The addresses to return from `addresses_of_peer`. + /// The addresses to return from [`NetworkBehaviour::handle_established_outbound_connection`]. pub(crate) addresses: HashMap>, /// The next action to return from `poll`. /// /// An action is only returned once. - pub(crate) next_action: Option>, + pub(crate) next_action: Option>, } impl MockBehaviour where THandler: ConnectionHandler + Clone, - THandler::OutEvent: Clone, + THandler::ToBehaviour: Clone, TOutEvent: Send + 'static, { pub(crate) fn new(handler_proto: THandler) -> Self { @@ -69,11 +69,11 @@ where impl NetworkBehaviour for MockBehaviour where THandler: ConnectionHandler + Clone, - THandler::OutEvent: Clone, + THandler::ToBehaviour: Clone, TOutEvent: Send + 'static, { type ConnectionHandler = THandler; - type OutEvent = TOutEvent; + type ToSwarm = TOutEvent; fn handle_established_inbound_connection( &mut self, @@ -110,30 +110,11 @@ where Ok(self.addresses.get(&p).map_or(Vec::new(), |v| v.clone())) } - fn poll( - &mut self, - _: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { self.next_action.take().map_or(Poll::Pending, Poll::Ready) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, @@ -235,29 +216,6 @@ where .count() } - /// Checks that when the expected number of closed connection notifications are received, a - /// given number of expected disconnections have been received as well. - /// - /// Returns if the first condition is met. - pub(crate) fn assert_disconnected( - &self, - expected_closed_connections: usize, - expected_disconnections: usize, - ) -> bool { - if self.on_connection_closed.len() == expected_closed_connections { - assert_eq!( - self.on_connection_closed - .iter() - .filter(|(.., remaining_established)| { *remaining_established == 0 }) - .count(), - expected_disconnections - ); - return true; - } - - false - } - /// Checks that when the expected number of established connection notifications are received, /// a given number of expected connections have been received as well. /// @@ -342,9 +300,8 @@ where peer_id, connection_id, endpoint, - handler, remaining_established, - }: ConnectionClosed<::ConnectionHandler>, + }: ConnectionClosed, ) { let mut other_closed_connections = self .on_connection_established @@ -392,7 +349,6 @@ where peer_id, connection_id, endpoint, - handler, remaining_established, })); } @@ -404,7 +360,7 @@ where THandlerOutEvent: Clone, { type ConnectionHandler = TInner::ConnectionHandler; - type OutEvent = TInner::OutEvent; + type ToSwarm = TInner::ToSwarm; fn handle_pending_inbound_connection( &mut self, @@ -480,7 +436,9 @@ where .handle_established_outbound_connection(connection_id, peer, addr, role_override) } - fn on_swarm_event(&mut self, event: FromSwarm) { + fn on_swarm_event(&mut self, event: FromSwarm) { + self.inner.on_swarm_event(event); + match event { FromSwarm::ConnectionEstablished(connection_established) => { self.on_connection_established(connection_established) @@ -488,66 +446,33 @@ where FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } - FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - }) => { + FromSwarm::DialFailure(DialFailure { peer_id, .. }) => { self.on_dial_failure.push(peer_id); - self.inner - .on_swarm_event(FromSwarm::DialFailure(DialFailure { - peer_id, - connection_id, - error, - })); } FromSwarm::NewListener(NewListener { listener_id }) => { self.on_new_listener.push(listener_id); - self.inner - .on_swarm_event(FromSwarm::NewListener(NewListener { listener_id })); } FromSwarm::NewListenAddr(NewListenAddr { listener_id, addr }) => { self.on_new_listen_addr.push((listener_id, addr.clone())); - self.inner - .on_swarm_event(FromSwarm::NewListenAddr(NewListenAddr { - listener_id, - addr, - })); } FromSwarm::ExpiredListenAddr(ExpiredListenAddr { listener_id, addr }) => { self.on_expired_listen_addr .push((listener_id, addr.clone())); - self.inner - .on_swarm_event(FromSwarm::ExpiredListenAddr(ExpiredListenAddr { - listener_id, - addr, - })); } - FromSwarm::NewExternalAddr(NewExternalAddr { addr }) => { + FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { self.on_new_external_addr.push(addr.clone()); - self.inner - .on_swarm_event(FromSwarm::NewExternalAddr(NewExternalAddr { addr })); } - FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr }) => { + FromSwarm::ExternalAddrExpired(ExternalAddrExpired { addr }) => { self.on_expired_external_addr.push(addr.clone()); - self.inner - .on_swarm_event(FromSwarm::ExpiredExternalAddr(ExpiredExternalAddr { addr })); } - FromSwarm::ListenerError(ListenerError { listener_id, err }) => { + FromSwarm::ListenerError(ListenerError { listener_id, .. }) => { self.on_listener_error.push(listener_id); - self.inner - .on_swarm_event(FromSwarm::ListenerError(ListenerError { listener_id, err })); } FromSwarm::ListenerClosed(ListenerClosed { listener_id, reason, }) => { self.on_listener_closed.push((listener_id, reason.is_ok())); - self.inner - .on_swarm_event(FromSwarm::ListenerClosed(ListenerClosed { - listener_id, - reason, - })); } _ => {} } @@ -579,10 +504,9 @@ where fn poll( &mut self, - cx: &mut Context, - args: &mut impl PollParameters, - ) -> Poll>> { + cx: &mut Context<'_>, + ) -> Poll>> { self.poll += 1; - self.inner.poll(cx, args) + self.inner.poll(cx) } } diff --git a/swarm/src/upgrade.rs b/swarm/src/upgrade.rs index b584dfae9fd..53b627458c9 100644 --- a/swarm/src/upgrade.rs +++ b/swarm/src/upgrade.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::NegotiatedSubstream; +use crate::Stream; use futures::prelude::*; use libp2p_core::upgrade; @@ -66,12 +66,12 @@ pub trait OutboundUpgradeSend: UpgradeInfoSend { type Future: Future> + Send + 'static; /// Equivalent to [`OutboundUpgrade::upgrade_outbound`](upgrade::OutboundUpgrade::upgrade_outbound). - fn upgrade_outbound(self, socket: NegotiatedSubstream, info: Self::Info) -> Self::Future; + fn upgrade_outbound(self, socket: Stream, info: Self::Info) -> Self::Future; } impl OutboundUpgradeSend for T where - T: upgrade::OutboundUpgrade + UpgradeInfoSend, + T: upgrade::OutboundUpgrade + UpgradeInfoSend, TInfo: AsRef + Clone + Send + 'static, T::Output: Send + 'static, T::Error: Send + 'static, @@ -81,7 +81,7 @@ where type Error = T::Error; type Future = T::Future; - fn upgrade_outbound(self, socket: NegotiatedSubstream, info: TInfo) -> Self::Future { + fn upgrade_outbound(self, socket: Stream, info: TInfo) -> Self::Future { upgrade::OutboundUpgrade::upgrade_outbound(self, socket, info) } } @@ -100,12 +100,12 @@ pub trait InboundUpgradeSend: UpgradeInfoSend { type Future: Future> + Send + 'static; /// Equivalent to [`InboundUpgrade::upgrade_inbound`](upgrade::InboundUpgrade::upgrade_inbound). - fn upgrade_inbound(self, socket: NegotiatedSubstream, info: Self::Info) -> Self::Future; + fn upgrade_inbound(self, socket: Stream, info: Self::Info) -> Self::Future; } impl InboundUpgradeSend for T where - T: upgrade::InboundUpgrade + UpgradeInfoSend, + T: upgrade::InboundUpgrade + UpgradeInfoSend, TInfo: AsRef + Clone + Send + 'static, T::Output: Send + 'static, T::Error: Send + 'static, @@ -115,7 +115,7 @@ where type Error = T::Error; type Future = T::Future; - fn upgrade_inbound(self, socket: NegotiatedSubstream, info: TInfo) -> Self::Future { + fn upgrade_inbound(self, socket: Stream, info: TInfo) -> Self::Future { upgrade::InboundUpgrade::upgrade_inbound(self, socket, info) } } @@ -137,22 +137,22 @@ impl upgrade::UpgradeInfo for SendWrapper { } } -impl upgrade::OutboundUpgrade for SendWrapper { +impl upgrade::OutboundUpgrade for SendWrapper { type Output = T::Output; type Error = T::Error; type Future = T::Future; - fn upgrade_outbound(self, socket: NegotiatedSubstream, info: T::Info) -> Self::Future { + fn upgrade_outbound(self, socket: Stream, info: T::Info) -> Self::Future { OutboundUpgradeSend::upgrade_outbound(self.0, socket, info) } } -impl upgrade::InboundUpgrade for SendWrapper { +impl upgrade::InboundUpgrade for SendWrapper { type Output = T::Output; type Error = T::Error; type Future = T::Future; - fn upgrade_inbound(self, socket: NegotiatedSubstream, info: T::Info) -> Self::Future { + fn upgrade_inbound(self, socket: Stream, info: T::Info) -> Self::Future { InboundUpgradeSend::upgrade_inbound(self.0, socket, info) } } diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs new file mode 100644 index 00000000000..4efe8d17e49 --- /dev/null +++ b/swarm/tests/connection_close.rs @@ -0,0 +1,146 @@ +use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_core::{Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::handler::ConnectionEvent; +use libp2p_swarm::{ + ConnectionDenied, ConnectionHandler, ConnectionHandlerEvent, ConnectionId, FromSwarm, + NetworkBehaviour, SubstreamProtocol, Swarm, SwarmEvent, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; +use libp2p_swarm_test::SwarmExt; +use std::task::{Context, Poll}; +use void::Void; + +#[async_std::test] +async fn sends_remaining_events_to_behaviour_on_connection_close() { + let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(3)); + let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(3)); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + swarm1.disconnect_peer_id(*swarm2.local_peer_id()).unwrap(); + + match libp2p_swarm_test::drive(&mut swarm1, &mut swarm2).await { + ([SwarmEvent::ConnectionClosed { .. }], [SwarmEvent::ConnectionClosed { .. }]) => { + assert_eq!(swarm1.behaviour().state, 0); + assert_eq!(swarm2.behaviour().state, 0); + } + (e1, e2) => panic!("Unexpected events: {:?} {:?}", e1, e2), + } +} + +struct HandlerWithState { + precious_state: u64, +} + +struct Behaviour { + state: u64, +} + +impl Behaviour { + fn new(state: u64) -> Self { + Behaviour { state } + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = HandlerWithState; + type ToSwarm = (); + + fn handle_established_inbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(HandlerWithState { + precious_state: self.state, + }) + } + + fn handle_established_outbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(HandlerWithState { + precious_state: self.state, + }) + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + if let FromSwarm::ConnectionClosed(_) = event { + assert_eq!(self.state, 0); + } + } + + fn on_connection_handler_event( + &mut self, + _peer_id: PeerId, + _connection_id: ConnectionId, + event: THandlerOutEvent, + ) { + assert_eq!(self.state, event); + self.state -= 1; + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + Poll::Pending + } +} + +impl ConnectionHandler for HandlerWithState { + type FromBehaviour = Void; + type ToBehaviour = u64; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = DeniedUpgrade; + type InboundOpenInfo = (); + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn connection_keep_alive(&self) -> bool { + true + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll< + ConnectionHandlerEvent, + > { + Poll::Pending + } + + fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { + if self.precious_state > 0 { + let state = self.precious_state; + self.precious_state -= 1; + + return Poll::Ready(Some(state)); + } + + Poll::Ready(None) + } + + fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { + void::unreachable(event) + } + + fn on_connection_event( + &mut self, + _: ConnectionEvent< + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + } +} diff --git a/swarm/tests/listener.rs b/swarm/tests/listener.rs new file mode 100644 index 00000000000..8d22acc90e2 --- /dev/null +++ b/swarm/tests/listener.rs @@ -0,0 +1,139 @@ +use std::{ + collections::{HashSet, VecDeque}, + task::{Context, Poll}, +}; + +use libp2p_core::{multiaddr::Protocol, transport::ListenerId, Endpoint, Multiaddr}; +use libp2p_identity::PeerId; +use libp2p_swarm::{ + derive_prelude::NewListener, dummy, ConnectionDenied, ConnectionId, FromSwarm, ListenOpts, + ListenerClosed, ListenerError, NetworkBehaviour, NewListenAddr, Swarm, SwarmEvent, THandler, + THandlerInEvent, THandlerOutEvent, ToSwarm, +}; + +use libp2p_swarm_test::SwarmExt; + +#[async_std::test] +async fn behaviour_listener() { + let mut swarm = Swarm::new_ephemeral(|_| Behaviour::default()); + let addr: Multiaddr = Protocol::Memory(0).into(); + let id = swarm.behaviour_mut().listen(addr.clone()); + + let address = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + listener_id, + address, + } => { + assert_eq!(listener_id, id); + Some(address) + } + _ => None, + }) + .await; + + swarm.behaviour_mut().stop_listening(id); + + swarm + .wait(|e| match e { + SwarmEvent::ListenerClosed { + listener_id, + addresses, + reason, + } => { + assert_eq!(listener_id, id); + assert!(addresses.contains(&address)); + assert!(reason.is_ok()); + Some(()) + } + _ => None, + }) + .await; +} + +#[derive(Default)] +struct Behaviour { + events: VecDeque::ToSwarm, THandlerInEvent>>, + listeners: HashSet, +} + +impl Behaviour { + pub(crate) fn listen(&mut self, addr: Multiaddr) -> ListenerId { + let opts = ListenOpts::new(addr); + let listener_id = opts.listener_id(); + assert!(!self.listeners.contains(&listener_id)); + self.events.push_back(ToSwarm::ListenOn { opts }); + self.listeners.insert(listener_id); + + listener_id + } + + pub(crate) fn stop_listening(&mut self, id: ListenerId) { + self.events.push_back(ToSwarm::RemoveListener { id }); + } +} + +impl NetworkBehaviour for Behaviour { + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = void::Void; + + fn handle_established_inbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _: ConnectionId, + _: PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_connection_handler_event( + &mut self, + _: PeerId, + _: ConnectionId, + _: THandlerOutEvent, + ) { + } + + fn on_swarm_event(&mut self, event: FromSwarm) { + match event { + FromSwarm::NewListener(NewListener { listener_id }) => { + assert!(self.listeners.contains(&listener_id)); + } + FromSwarm::NewListenAddr(NewListenAddr { listener_id, .. }) => { + assert!(self.listeners.contains(&listener_id)); + } + FromSwarm::ListenerError(ListenerError { listener_id, err }) => { + panic!("Error for listener {listener_id:?}: {err}"); + } + FromSwarm::ListenerClosed(ListenerClosed { + listener_id, + reason, + }) => { + assert!(self.listeners.contains(&listener_id)); + assert!(reason.is_ok()); + self.listeners.remove(&listener_id); + assert!(!self.listeners.contains(&listener_id)); + } + _ => {} + } + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event); + } + + Poll::Pending + } +} diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index 87f99e35736..707abb03d6e 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -56,7 +56,7 @@ fn one_field() { clippy::used_underscore_binding )] fn foo() { - let _out_event: ::OutEvent = unimplemented!(); + let _out_event: ::ToSwarm = unimplemented!(); match _out_event { FooEvent::Ping(ping::Event { .. }) => {} } @@ -80,7 +80,7 @@ fn two_fields() { clippy::used_underscore_binding )] fn foo() { - let _out_event: ::OutEvent = unimplemented!(); + let _out_event: ::ToSwarm = unimplemented!(); match _out_event { FooEvent::Ping(ping::Event { .. }) => {} FooEvent::Identify(event) => { @@ -98,7 +98,7 @@ fn three_fields() { struct Foo { ping: ping::Behaviour, identify: identify::Behaviour, - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, } #[allow( @@ -108,14 +108,14 @@ fn three_fields() { clippy::used_underscore_binding )] fn foo() { - let _out_event: ::OutEvent = unimplemented!(); + let _out_event: ::ToSwarm = unimplemented!(); match _out_event { FooEvent::Ping(ping::Event { .. }) => {} FooEvent::Identify(event) => { let _: identify::Event = event; } FooEvent::Kad(event) => { - let _: libp2p_kad::KademliaEvent = event; + let _: libp2p_kad::Event = event; } } } @@ -125,7 +125,7 @@ fn three_fields() { fn custom_event() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(out_event = "MyEvent", prelude = "libp2p_swarm::derive_prelude")] + #[behaviour(to_swarm = "MyEvent", prelude = "libp2p_swarm::derive_prelude")] struct Foo { ping: ping::Behaviour, identify: identify::Behaviour, @@ -159,7 +159,7 @@ fn custom_event() { fn custom_event_mismatching_field_names() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(out_event = "MyEvent", prelude = "libp2p_swarm::derive_prelude")] + #[behaviour(to_swarm = "MyEvent", prelude = "libp2p_swarm::derive_prelude")] struct Foo { a: ping::Behaviour, b: identify::Behaviour, @@ -196,7 +196,7 @@ fn bound() { #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Foo where - ::OutEvent: Debug, + ::ToSwarm: Debug, { ping: ping::Behaviour, bar: T, @@ -211,7 +211,7 @@ fn where_clause() { struct Foo where T: Copy + NetworkBehaviour, - ::OutEvent: Debug, + ::ToSwarm: Debug, { ping: ping::Behaviour, bar: T, @@ -241,7 +241,7 @@ fn nested_derives_with_import() { clippy::used_underscore_binding )] fn foo() { - let _out_event: ::OutEvent = unimplemented!(); + let _out_event: ::ToSwarm = unimplemented!(); match _out_event { BarEvent::Foo(FooEvent::Ping(ping::Event { .. })) => {} } @@ -271,7 +271,7 @@ fn custom_event_emit_event_through_poll() { #[allow(dead_code, clippy::large_enum_variant)] #[derive(NetworkBehaviour)] #[behaviour( - out_event = "BehaviourOutEvent", + to_swarm = "BehaviourOutEvent", prelude = "libp2p_swarm::derive_prelude" )] struct Foo { @@ -327,7 +327,7 @@ fn with_either() { #[derive(NetworkBehaviour)] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Foo { - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } @@ -350,10 +350,7 @@ fn with_generics() { #[allow(dead_code)] fn foo() { require_net_behaviour::< - Foo< - libp2p_kad::Kademlia, - libp2p_ping::Behaviour, - >, + Foo, libp2p_ping::Behaviour>, >(); } } @@ -370,8 +367,92 @@ fn with_generics_mixed() { #[allow(dead_code)] fn foo() { - require_net_behaviour::>>( - ); + require_net_behaviour::>>(); + } +} + +#[test] +fn with_generics_constrained() { + use std::task::{Context, Poll}; + trait Mark {} + struct Marked; + impl Mark for Marked {} + + /// A struct with a generic constraint, for which we manually implement `NetworkBehaviour`. + #[allow(dead_code)] + struct Bar { + a: A, + } + + impl NetworkBehaviour for Bar { + type ConnectionHandler = dummy::ConnectionHandler; + type ToSwarm = void::Void; + + fn handle_established_inbound_connection( + &mut self, + _: libp2p_swarm::ConnectionId, + _: libp2p_identity::PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn handle_established_outbound_connection( + &mut self, + _: libp2p_swarm::ConnectionId, + _: libp2p_identity::PeerId, + _: &Multiaddr, + _: Endpoint, + ) -> Result, ConnectionDenied> { + Ok(dummy::ConnectionHandler) + } + + fn on_swarm_event(&mut self, _event: FromSwarm) {} + + fn on_connection_handler_event( + &mut self, + _: libp2p_identity::PeerId, + _: libp2p_swarm::ConnectionId, + _: THandlerOutEvent, + ) { + } + + fn poll( + &mut self, + _: &mut Context<'_>, + ) -> Poll>> { + Poll::Pending + } + } + + /// A struct which uses the above, inheriting the generic constraint, + /// for which we want to derive the `NetworkBehaviour`. + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Foo1 { + bar: Bar, + } + + /// A struct which uses the above, inheriting the generic constraint, + /// for which we want to derive the `NetworkBehaviour`. + /// + /// Using a where clause instead of inline constraint. + #[allow(dead_code)] + #[derive(NetworkBehaviour)] + #[behaviour(prelude = "libp2p_swarm::derive_prelude")] + struct Foo2 + where + A: Mark, + { + bar: Bar, + } + + #[allow(dead_code)] + fn foo() { + require_net_behaviour::>(); + require_net_behaviour::>(); } } @@ -381,12 +462,12 @@ fn custom_event_with_either() { #[allow(clippy::large_enum_variant)] enum BehaviourOutEvent { - Kad(libp2p_kad::KademliaEvent), + Kad(libp2p_kad::Event), PingOrIdentify(Either), } - impl From for BehaviourOutEvent { - fn from(event: libp2p_kad::KademliaEvent) -> Self { + impl From for BehaviourOutEvent { + fn from(event: libp2p_kad::Event) -> Self { BehaviourOutEvent::Kad(event) } } @@ -400,11 +481,11 @@ fn custom_event_with_either() { #[allow(dead_code)] #[derive(NetworkBehaviour)] #[behaviour( - out_event = "BehaviourOutEvent", + to_swarm = "BehaviourOutEvent", prelude = "libp2p_swarm::derive_prelude" )] struct Foo { - kad: libp2p_kad::Kademlia, + kad: libp2p_kad::Behaviour, ping_or_identify: Either, } @@ -426,7 +507,7 @@ fn generated_out_event_derive_debug() { fn require_debug() where T: NetworkBehaviour, - ::OutEvent: Debug, + ::ToSwarm: Debug, { } @@ -437,7 +518,7 @@ fn generated_out_event_derive_debug() { fn multiple_behaviour_attributes() { #[allow(dead_code)] #[derive(NetworkBehaviour)] - #[behaviour(out_event = "FooEvent")] + #[behaviour(to_swarm = "FooEvent")] #[behaviour(prelude = "libp2p_swarm::derive_prelude")] struct Foo { ping: ping::Behaviour, @@ -457,7 +538,7 @@ fn multiple_behaviour_attributes() { #[test] fn custom_out_event_no_type_parameters() { use libp2p_identity::PeerId; - use libp2p_swarm::{ConnectionId, PollParameters, ToSwarm}; + use libp2p_swarm::{ConnectionId, ToSwarm}; use std::task::Context; use std::task::Poll; @@ -467,7 +548,7 @@ fn custom_out_event_no_type_parameters() { impl NetworkBehaviour for TemplatedBehaviour { type ConnectionHandler = dummy::ConnectionHandler; - type OutEvent = void::Void; + type ToSwarm = void::Void; fn handle_established_inbound_connection( &mut self, @@ -500,32 +581,16 @@ fn custom_out_event_no_type_parameters() { fn poll( &mut self, - _ctx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll>> { + _: &mut Context<'_>, + ) -> Poll>> { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddr(_) - | FromSwarm::ExpiredExternalAddr(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } #[derive(NetworkBehaviour)] - #[behaviour(out_event = "OutEvent", prelude = "libp2p_swarm::derive_prelude")] + #[behaviour(to_swarm = "OutEvent", prelude = "libp2p_swarm::derive_prelude")] struct Behaviour { custom: TemplatedBehaviour, } @@ -544,3 +609,9 @@ fn custom_out_event_no_type_parameters() { require_net_behaviour::>(); require_net_behaviour::>(); } + +#[test] +fn ui() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/fail/*.rs"); +} diff --git a/swarm/tests/ui/fail/prelude_not_string.rs b/swarm/tests/ui/fail/prelude_not_string.rs new file mode 100644 index 00000000000..727f2439ec0 --- /dev/null +++ b/swarm/tests/ui/fail/prelude_not_string.rs @@ -0,0 +1,11 @@ +use libp2p_ping as ping; + +#[derive(libp2p_swarm::NetworkBehaviour)] +#[behaviour(prelude = libp2p_swarm::derive_prelude)] +struct Foo { + ping: ping::Behaviour, +} + +fn main() { + +} diff --git a/swarm/tests/ui/fail/prelude_not_string.stderr b/swarm/tests/ui/fail/prelude_not_string.stderr new file mode 100644 index 00000000000..2c2a79805d9 --- /dev/null +++ b/swarm/tests/ui/fail/prelude_not_string.stderr @@ -0,0 +1,5 @@ +error: expected a string literal + --> tests/ui/fail/prelude_not_string.rs:4:23 + | +4 | #[behaviour(prelude = libp2p_swarm::derive_prelude)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/swarm/tests/ui/fail/to_swarm_not_string.rs b/swarm/tests/ui/fail/to_swarm_not_string.rs new file mode 100644 index 00000000000..e0ff56e41a7 --- /dev/null +++ b/swarm/tests/ui/fail/to_swarm_not_string.rs @@ -0,0 +1,19 @@ +use libp2p_ping as ping; + +#[derive(libp2p_swarm::NetworkBehaviour)] +#[behaviour(out_event = FooEvent, prelude = "libp2p_swarm::derive_prelude")] +struct Foo { + ping: ping::Behaviour, +} + +struct FooEvent; + +impl From for FooEvent { + fn from(_: ping::Event) -> Self { + unimplemented!() + } +} + +fn main() { + +} diff --git a/swarm/tests/ui/fail/to_swarm_not_string.stderr b/swarm/tests/ui/fail/to_swarm_not_string.stderr new file mode 100644 index 00000000000..f2fbba685cb --- /dev/null +++ b/swarm/tests/ui/fail/to_swarm_not_string.stderr @@ -0,0 +1,5 @@ +error: expected a string literal + --> tests/ui/fail/to_swarm_not_string.rs:4:25 + | +4 | #[behaviour(out_event = FooEvent, prelude = "libp2p_swarm::derive_prelude")] + | ^^^^^^^^ diff --git a/transports/deflate/CHANGELOG.md b/transports/deflate/CHANGELOG.md deleted file mode 100644 index 2261d0aa92f..00000000000 --- a/transports/deflate/CHANGELOG.md +++ /dev/null @@ -1,106 +0,0 @@ -## 0.40.0 - unreleased - -- Raise MSRV to 1.65. - See [PR 3715]. - -[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 - -## 0.39.0 - -- Update to `libp2p-core` `v0.39.0`. - -## 0.38.0 - -- Update to `libp2p-core` `v0.38.0`. - -- Update `rust-version` to reflect the actual MSRV: 1.60.0. See [PR 3090]. - -[PR 3090]: https://github.com/libp2p/rust-libp2p/pull/3090 - -## 0.37.0 - -- Update to `libp2p-core` `v0.37.0`. - -## 0.36.0 - -- Update to `libp2p-core` `v0.36.0`. - -## 0.35.0 - -- Update to `libp2p-core` `v0.35.0`. - -## 0.34.0 - -- Update to `libp2p-core` `v0.34.0`. - -## 0.33.0 - -- Update to `libp2p-core` `v0.33.0`. - -## 0.32.0 [2022-02-22] - -- Update to `libp2p-core` `v0.32.0`. - -## 0.31.0 [2022-01-27] - -- Update dependencies. - -- Migrate to Rust edition 2021 (see [PR 2339]). - -[PR 2339]: https://github.com/libp2p/rust-libp2p/pull/2339 - -## 0.30.0 [2021-11-01] - -- Make default features of `libp2p-core` optional. - [PR 2181](https://github.com/libp2p/rust-libp2p/pull/2181) - -- Update dependencies. - -## 0.29.0 [2021-07-12] - -- Update dependencies. - -## 0.28.0 [2021-03-17] - -- Update `libp2p-core`. - -## 0.27.1 [2021-01-27] - -- Ensure read buffers are initialised. - [PR 1933](https://github.com/libp2p/rust-libp2p/pull/1933). - -## 0.27.0 [2021-01-12] - -- Update dependencies. - -## 0.26.0 [2020-12-17] - -- Update `libp2p-core`. - -## 0.25.0 [2020-11-25] - -- Update `libp2p-core`. - -## 0.24.0 [2020-11-09] - -- Update dependencies. - -## 0.23.0 [2020-10-16] - -- Bump `libp2p-core` dependency. - -## 0.22.0 [2020-09-09] - -- Bump `libp2p-core` dependency. - -## 0.21.0 [2020-08-18] - -- Bump `libp2p-core` dependency. - -## 0.20.0 [2020-07-01] - -- Updated dependencies. - -## 0.19.2 [2020-06-22] - -- Updated dependencies. diff --git a/transports/deflate/Cargo.toml b/transports/deflate/Cargo.toml deleted file mode 100644 index 3d46e77352f..00000000000 --- a/transports/deflate/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "libp2p-deflate" -edition = "2021" -rust-version = { workspace = true } -description = "Deflate encryption protocol for libp2p" -version = "0.40.0" -authors = ["Parity Technologies "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[dependencies] -futures = "0.3.28" -libp2p-core = { workspace = true } -flate2 = "1.0" - -[dev-dependencies] -async-std = "1.6.2" -libp2p-tcp = { workspace = true, features = ["async-io"] } -quickcheck = { workspace = true } -rand = "0.8" -futures_ringbuf = "0.3.1" - -# Passing arguments to the docsrs builder in order to properly document cfg's. -# More information: https://docs.rs/about/builds#cross-compiling -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] -rustc-args = ["--cfg", "docsrs"] diff --git a/transports/deflate/src/lib.rs b/transports/deflate/src/lib.rs deleted file mode 100644 index 0d83713888e..00000000000 --- a/transports/deflate/src/lib.rs +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use futures::{prelude::*, ready}; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; -use std::{io, iter, pin::Pin, task::Context, task::Poll}; - -#[derive(Debug, Copy, Clone)] -pub struct DeflateConfig { - compression: flate2::Compression, -} - -impl Default for DeflateConfig { - fn default() -> Self { - DeflateConfig { - compression: flate2::Compression::fast(), - } - } -} - -impl UpgradeInfo for DeflateConfig { - type Info = &'static str; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - iter::once("/deflate/1.0.0") - } -} - -impl InboundUpgrade for DeflateConfig -where - C: AsyncRead + AsyncWrite, -{ - type Output = DeflateOutput; - type Error = io::Error; - type Future = future::Ready>; - - fn upgrade_inbound(self, r: C, _: Self::Info) -> Self::Future { - future::ok(DeflateOutput::new(r, self.compression)) - } -} - -impl OutboundUpgrade for DeflateConfig -where - C: AsyncRead + AsyncWrite, -{ - type Output = DeflateOutput; - type Error = io::Error; - type Future = future::Ready>; - - fn upgrade_outbound(self, w: C, _: Self::Info) -> Self::Future { - future::ok(DeflateOutput::new(w, self.compression)) - } -} - -/// Decodes and encodes traffic using DEFLATE. -#[derive(Debug)] -pub struct DeflateOutput { - /// Inner stream where we read compressed data from and write compressed data to. - inner: S, - /// Internal object used to hold the state of the compression. - compress: flate2::Compress, - /// Internal object used to hold the state of the decompression. - decompress: flate2::Decompress, - /// Temporary buffer between `compress` and `inner`. Stores compressed bytes that need to be - /// sent out once `inner` is ready to accept more. - write_out: Vec, - /// Temporary buffer between `decompress` and `inner`. Stores compressed bytes that need to be - /// given to `decompress`. - read_interm: Vec, - /// When we read from `inner` and `Ok(0)` is returned, we set this to `true` so that we don't - /// read from it again. - inner_read_eof: bool, -} - -impl DeflateOutput { - fn new(inner: S, compression: flate2::Compression) -> Self { - DeflateOutput { - inner, - compress: flate2::Compress::new(compression, false), - decompress: flate2::Decompress::new(false), - write_out: Vec::with_capacity(256), - read_interm: Vec::with_capacity(256), - inner_read_eof: false, - } - } - - /// Tries to write the content of `self.write_out` to `self.inner`. - /// Returns `Ready(Ok(()))` if `self.write_out` is empty. - fn flush_write_out(&mut self, cx: &mut Context<'_>) -> Poll> - where - S: AsyncWrite + Unpin, - { - loop { - if self.write_out.is_empty() { - return Poll::Ready(Ok(())); - } - - match AsyncWrite::poll_write(Pin::new(&mut self.inner), cx, &self.write_out) { - Poll::Ready(Ok(0)) => return Poll::Ready(Err(io::ErrorKind::WriteZero.into())), - Poll::Ready(Ok(n)) => self.write_out = self.write_out.split_off(n), - Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), - Poll::Pending => return Poll::Pending, - }; - } - } -} - -impl AsyncRead for DeflateOutput -where - S: AsyncRead + Unpin, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - loop { - // Read from `self.inner` into `self.read_interm` if necessary. - if this.read_interm.is_empty() && !this.inner_read_eof { - this.read_interm - .resize(this.read_interm.capacity() + 256, 0); - - match AsyncRead::poll_read(Pin::new(&mut this.inner), cx, &mut this.read_interm) { - Poll::Ready(Ok(0)) => { - this.inner_read_eof = true; - this.read_interm.clear(); - } - Poll::Ready(Ok(n)) => this.read_interm.truncate(n), - Poll::Ready(Err(err)) => { - this.read_interm.clear(); - return Poll::Ready(Err(err)); - } - Poll::Pending => { - this.read_interm.clear(); - return Poll::Pending; - } - } - } - debug_assert!(!this.read_interm.is_empty() || this.inner_read_eof); - - let before_out = this.decompress.total_out(); - let before_in = this.decompress.total_in(); - let ret = this.decompress.decompress( - &this.read_interm, - buf, - if this.inner_read_eof { - flate2::FlushDecompress::Finish - } else { - flate2::FlushDecompress::None - }, - )?; - - // Remove from `self.read_interm` the bytes consumed by the decompressor. - let consumed = (this.decompress.total_in() - before_in) as usize; - this.read_interm = this.read_interm.split_off(consumed); - - let read = (this.decompress.total_out() - before_out) as usize; - if read != 0 || ret == flate2::Status::StreamEnd { - return Poll::Ready(Ok(read)); - } - } - } -} - -impl AsyncWrite for DeflateOutput -where - S: AsyncWrite + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - // We don't want to accumulate too much data in `self.write_out`, so we only proceed if it - // is empty. - ready!(this.flush_write_out(cx))?; - - // We special-case this, otherwise an empty buffer would make the loop below infinite. - if buf.is_empty() { - return Poll::Ready(Ok(0)); - } - - // Unfortunately, the compressor might be in a "flushing mode", not accepting any input - // data. We don't want to return `Ok(0)` in that situation, as that would be wrong. - // Instead, we invoke the compressor in a loop until it accepts some of our data. - loop { - let before_in = this.compress.total_in(); - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - let ret = this.compress.compress_vec( - buf, - &mut this.write_out, - flate2::FlushCompress::None, - )?; - let written = (this.compress.total_in() - before_in) as usize; - - if written != 0 || ret == flate2::Status::StreamEnd { - return Poll::Ready(Ok(written)); - } - } - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - ready!(this.flush_write_out(cx))?; - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::Sync)?; - - loop { - ready!(this.flush_write_out(cx))?; - - debug_assert!(this.write_out.is_empty()); - // We ask the compressor to flush everything into `self.write_out`. - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::None)?; - if this.write_out.is_empty() { - break; - } - } - - AsyncWrite::poll_flush(Pin::new(&mut this.inner), cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - // We use a `this` variable because the compiler doesn't allow multiple mutable borrows - // across a `Deref`. - let this = &mut *self; - - loop { - ready!(this.flush_write_out(cx))?; - - // We ask the compressor to flush everything into `self.write_out`. - debug_assert!(this.write_out.is_empty()); - this.write_out.reserve(256); // compress_vec uses the Vec's capacity - this.compress - .compress_vec(&[], &mut this.write_out, flate2::FlushCompress::Finish)?; - if this.write_out.is_empty() { - break; - } - } - - AsyncWrite::poll_close(Pin::new(&mut this.inner), cx) - } -} diff --git a/transports/deflate/tests/test.rs b/transports/deflate/tests/test.rs deleted file mode 100644 index 504888a7eca..00000000000 --- a/transports/deflate/tests/test.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::prelude::*; -use libp2p_core::OutboundUpgrade; -use libp2p_deflate::DeflateConfig; -use quickcheck::{QuickCheck, TestResult}; -use rand::RngCore; - -#[test] -fn deflate() { - fn prop(message: Vec) -> TestResult { - if message.is_empty() { - return TestResult::discard(); - } - futures::executor::block_on(run(message)); - TestResult::passed() - } - QuickCheck::new().quickcheck(prop as fn(Vec) -> TestResult) -} - -#[test] -fn lot_of_data() { - let mut v = vec![0; 2 * 1024 * 1024]; - rand::thread_rng().fill_bytes(&mut v); - futures::executor::block_on(run(v)); -} - -async fn run(message1: Vec) { - let (server, client) = futures_ringbuf::Endpoint::pair(100, 100); - - let message2 = message1.clone(); - - let client_task = async move { - let mut client = DeflateConfig::default() - .upgrade_outbound(client, "") - .await - .unwrap(); - - let mut buf = vec![0; message2.len()]; - client.read_exact(&mut buf).await.expect("read_exact"); - assert_eq!(&buf[..], &message2[..]); - - client.write_all(&message2).await.expect("write_all"); - client.close().await.expect("close") - }; - - let server_task = async move { - let mut server = DeflateConfig::default() - .upgrade_outbound(server, "") - .await - .unwrap(); - - server.write_all(&message1).await.expect("write_all"); - server.close().await.expect("close"); - - let mut buf = Vec::new(); - server.read_to_end(&mut buf).await.expect("read_to_end"); - assert_eq!(&buf[..], &message1[..]); - }; - - futures::future::join(server_task, client_task).await; -} diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index 9511b6d8cdd..91cfbc00883 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,4 +1,28 @@ -## 0.40.0 - unreleased +## 0.41.1 + +- Add hidden API that removes unnecessary async for `async-std`. + See [PR 4808](https://github.com/libp2p/rust-libp2p/pull/4808). + +## 0.41.0 + +- Make `tokio::Transport::custom` and `async_std::Transport::custom` constructors infallible. + See [PR 4464](https://github.com/libp2p/rust-libp2p/pull/4464). +- Remove deprecated type-aliases. + See [PR 4739](https://github.com/libp2p/rust-libp2p/pull/4739). +- Migrate to the `hickory-dns` project which has rebranded from `trust-dns`. + We also remove the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` features. + Users should activate these features themselves on `hickory-resolver` if so desired. + See [PR 4780](https://github.com/libp2p/rust-libp2p/pull/4780). + +## 0.40.1 + +- Remove `Dns` prefix from types like `TokioDnsConfig` and `DnsConfig` in favor of modules that describe the different variants. + Users are encouraged to import the `libp2p::dns` module and refer to types as `dns::tokio::Transport` and `dns::async_std::Transport`. + See [PR 4505]. + +[PR 4505]: https://github.com/libp2p/rust-libp2p/pull/4505 + +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index c7d51db6083..644db338ea9 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-dns" edition = "2021" rust-version = { workspace = true } description = "DNS transport implementation for libp2p" -version = "0.40.0" +version = "0.41.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -11,32 +11,32 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] +async-std-resolver = { version = "0.24", optional = true } +async-trait = "0.1.75" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.1" -futures = "0.3.28" -async-std-resolver = { version = "0.22", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.22", default-features = false, features = ["system-config"] } -smallvec = "1.6.1" +hickory-resolver = { version = "0.24.0", default-features = false, features = ["system-config"] } +smallvec = "1.11.2" +tracing = "0.1.37" [dev-dependencies] -env_logger = "0.10" +libp2p-identity = { workspace = true, features = ["rand"] } tokio-crate = { package = "tokio", version = "1.0", default-features = false, features = ["rt", "time"] } async-std-crate = { package = "async-std", version = "1.6" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] async-std = ["async-std-resolver"] -tokio = ["trust-dns-resolver/tokio-runtime"] -# The `tokio-` prefix and feature dependency is just to be explicit, -# since these features of `trust-dns-resolver` are currently only -# available for `tokio`. -tokio-dns-over-rustls = ["tokio", "trust-dns-resolver/dns-over-rustls"] -tokio-dns-over-https-rustls = ["tokio", "trust-dns-resolver/dns-over-https-rustls"] +tokio = ["hickory-resolver/tokio-runtime"] -# Passing arguments to the docsrs builder in order to properly document cfg's. +# Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 7de86ba358c..3aeac4e4154 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -21,17 +21,17 @@ //! # [DNS name resolution](https://github.com/libp2p/specs/blob/master/addressing/README.md#ip-and-name-resolution) //! [`Transport`] for libp2p. //! -//! This crate provides the type [`GenDnsConfig`] with its instantiations -//! [`DnsConfig`] and `TokioDnsConfig` for use with `async-std` and `tokio`, +//! This crate provides the type [`async_std::Transport`] and [`tokio::Transport`] +//! for use with `async-std` and `tokio`, //! respectively. //! -//! A [`GenDnsConfig`] is an address-rewriting [`Transport`] wrapper around +//! A [`Transport`] is an address-rewriting [`libp2p_core::Transport`] wrapper around //! an inner `Transport`. The composed transport behaves like the inner -//! transport, except that [`Transport::dial`] resolves `/dns/...`, `/dns4/...`, +//! transport, except that [`libp2p_core::Transport::dial`] resolves `/dns/...`, `/dns4/...`, //! `/dns6/...` and `/dnsaddr/...` components of the given `Multiaddr` through //! a DNS, replacing them with the resolved protocols (typically TCP/IP). //! -//! The `async-std` feature and hence the `DnsConfig` are +//! The `async-std` feature and hence the [`async_std::Transport`] are //! enabled by default. Tokio users can furthermore opt-in //! to the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` //! features. For more information about these features, please @@ -49,7 +49,7 @@ //! problematic on platforms like Android, where there's a lot of //! complexity hidden behind the system APIs. //! If the implementation requires different characteristics, one should -//! consider providing their own implementation of [`GenDnsConfig`] or use +//! consider providing their own implementation of [`Transport`] or use //! platform specific APIs to extract the host's DNS configuration (if possible) //! and provide a custom [`ResolverConfig`]. //! @@ -58,35 +58,120 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #[cfg(feature = "async-std")] -use async_std_resolver::{AsyncStdConnection, AsyncStdConnectionProvider}; +pub mod async_std { + use async_std_resolver::AsyncStdResolver; + use futures::FutureExt; + use hickory_resolver::{ + config::{ResolverConfig, ResolverOpts}, + system_conf, + }; + use parking_lot::Mutex; + use std::{io, sync::Arc}; + + /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses + /// using `async-std` for all async I/O. + pub type Transport = crate::Transport; + + impl Transport { + /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + pub async fn system(inner: T) -> Result, io::Error> { + let (cfg, opts) = system_conf::read_system_conf()?; + Ok(Self::custom(inner, cfg, opts).await) + } + + /// Creates a [`Transport`] with a custom resolver configuration and options. + pub async fn custom(inner: T, cfg: ResolverConfig, opts: ResolverOpts) -> Transport { + Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver(cfg, opts).await, + } + } + + // TODO: Replace `system` implementation with this + #[doc(hidden)] + pub fn system2(inner: T) -> Result, io::Error> { + Ok(Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver_from_system_conf() + .now_or_never() + .expect( + "async_std_resolver::resolver_from_system_conf did not resolve immediately", + )?, + }) + } + + // TODO: Replace `custom` implementation with this + #[doc(hidden)] + pub fn custom2(inner: T, cfg: ResolverConfig, opts: ResolverOpts) -> Transport { + Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: async_std_resolver::resolver(cfg, opts) + .now_or_never() + .expect("async_std_resolver::resolver did not resolve immediately"), + } + } + } +} + +#[cfg(feature = "tokio")] +pub mod tokio { + use hickory_resolver::{system_conf, TokioAsyncResolver}; + use parking_lot::Mutex; + use std::sync::Arc; + + /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses + /// using `tokio` for all async I/O. + pub type Transport = crate::Transport; + + impl Transport { + /// Creates a new [`Transport`] from the OS's DNS configuration and defaults. + pub fn system(inner: T) -> Result, std::io::Error> { + let (cfg, opts) = system_conf::read_system_conf()?; + Ok(Self::custom(inner, cfg, opts)) + } + + /// Creates a [`Transport`] with a custom resolver configuration + /// and options. + pub fn custom( + inner: T, + cfg: hickory_resolver::config::ResolverConfig, + opts: hickory_resolver::config::ResolverOpts, + ) -> Transport { + Transport { + inner: Arc::new(Mutex::new(inner)), + resolver: TokioAsyncResolver::tokio(cfg, opts), + } + } + } +} + +use async_trait::async_trait; use futures::{future::BoxFuture, prelude::*}; use libp2p_core::{ connection::Endpoint, multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, - Transport, }; use parking_lot::Mutex; use smallvec::SmallVec; use std::io; +use std::net::{Ipv4Addr, Ipv6Addr}; use std::{ convert::TryFrom, error, fmt, iter, - net::IpAddr, ops::DerefMut, pin::Pin, str, sync::Arc, task::{Context, Poll}, }; -#[cfg(any(feature = "async-std", feature = "tokio"))] -use trust_dns_resolver::system_conf; -use trust_dns_resolver::{proto::xfer::dns_handle::DnsHandle, AsyncResolver, ConnectionProvider}; -#[cfg(feature = "tokio")] -use trust_dns_resolver::{TokioAsyncResolver, TokioConnection, TokioConnectionProvider}; -pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts}; -pub use trust_dns_resolver::error::{ResolveError, ResolveErrorKind}; +pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; +pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; +use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; +use hickory_resolver::lookup_ip::LookupIp; +use hickory_resolver::name_server::ConnectionProvider; +use hickory_resolver::AsyncResolver; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; @@ -106,103 +191,40 @@ const MAX_DNS_LOOKUPS: usize = 32; /// result of a single `/dnsaddr` lookup. const MAX_TXT_RECORDS: usize = 16; -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses -/// using `async-std` for all async I/O. -#[cfg(feature = "async-std")] -pub type DnsConfig = GenDnsConfig; - -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses -/// using `tokio` for all async I/O. -#[cfg(feature = "tokio")] -pub type TokioDnsConfig = GenDnsConfig; - -/// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses. -pub struct GenDnsConfig -where - C: DnsHandle, - P: ConnectionProvider, -{ +/// A [`Transport`] for performing DNS lookups when dialing `Multiaddr`esses. +/// You shouldn't need to use this type directly. Use [`tokio::Transport`] or [`async_std::Transport`] instead. +#[derive(Debug)] +pub struct Transport { /// The underlying transport. inner: Arc>, /// The DNS resolver used when dialing addresses with DNS components. - resolver: AsyncResolver, + resolver: R, } -#[cfg(feature = "async-std")] -impl DnsConfig { - /// Creates a new [`DnsConfig`] from the OS's DNS configuration and defaults. - pub async fn system(inner: T) -> Result, io::Error> { - let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts).await - } - - /// Creates a [`DnsConfig`] with a custom resolver configuration and options. - pub async fn custom( - inner: T, - cfg: ResolverConfig, - opts: ResolverOpts, - ) -> Result, io::Error> { - Ok(DnsConfig { - inner: Arc::new(Mutex::new(inner)), - resolver: async_std_resolver::resolver(cfg, opts).await?, - }) - } -} - -#[cfg(feature = "tokio")] -impl TokioDnsConfig { - /// Creates a new [`TokioDnsConfig`] from the OS's DNS configuration and defaults. - pub fn system(inner: T) -> Result, io::Error> { - let (cfg, opts) = system_conf::read_system_conf()?; - Self::custom(inner, cfg, opts) - } - - /// Creates a [`TokioDnsConfig`] with a custom resolver configuration - /// and options. - pub fn custom( - inner: T, - cfg: ResolverConfig, - opts: ResolverOpts, - ) -> Result, io::Error> { - Ok(TokioDnsConfig { - inner: Arc::new(Mutex::new(inner)), - resolver: TokioAsyncResolver::tokio(cfg, opts)?, - }) - } -} - -impl fmt::Debug for GenDnsConfig -where - C: DnsHandle, - P: ConnectionProvider, - T: fmt::Debug, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_tuple("GenDnsConfig").field(&self.inner).finish() - } -} - -impl Transport for GenDnsConfig +impl libp2p_core::Transport for Transport where - T: Transport + Send + Unpin + 'static, + T: libp2p_core::Transport + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, - C: DnsHandle, - P: ConnectionProvider, + R: Clone + Send + Sync + Resolver + 'static, { type Output = T::Output; - type Error = DnsErr; + type Error = Error; type ListenerUpgrade = future::MapErr Self::Error>; type Dial = future::Either< future::MapErr Self::Error>, BoxFuture<'static, Result>, >; - fn listen_on(&mut self, addr: Multiaddr) -> Result> { + fn listen_on( + &mut self, + id: ListenerId, + addr: Multiaddr, + ) -> Result<(), TransportError> { self.inner .lock() - .listen_on(addr) - .map_err(|e| e.map(DnsErr::Transport)) + .listen_on(id, addr) + .map_err(|e| e.map(Error::Transport)) } fn remove_listener(&mut self, id: ListenerId) -> bool { @@ -229,31 +251,33 @@ where cx: &mut Context<'_>, ) -> Poll> { let mut inner = self.inner.lock(); - Transport::poll(Pin::new(inner.deref_mut()), cx).map(|event| { + libp2p_core::Transport::poll(Pin::new(inner.deref_mut()), cx).map(|event| { event - .map_upgrade(|upgr| upgr.map_err::<_, fn(_) -> _>(DnsErr::Transport)) - .map_err(DnsErr::Transport) + .map_upgrade(|upgr| upgr.map_err::<_, fn(_) -> _>(Error::Transport)) + .map_err(Error::Transport) }) } } -impl GenDnsConfig +impl Transport where - T: Transport + Send + Unpin + 'static, + T: libp2p_core::Transport + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, - C: DnsHandle, - P: ConnectionProvider, + R: Clone + Send + Sync + Resolver + 'static, { fn do_dial( &mut self, addr: Multiaddr, role_override: Endpoint, - ) -> Result<::Dial, TransportError<::Error>> { + ) -> Result< + ::Dial, + TransportError<::Error>, + > { let resolver = self.resolver.clone(); let inner = self.inner.clone(); - // Asynchronlously resolve all DNS names in the address before proceeding + // Asynchronously resolve all DNS names in the address before proceeding // with dialing on the underlying transport. Ok(async move { let mut last_err = None; @@ -278,8 +302,8 @@ where ) }) { if dns_lookups == MAX_DNS_LOOKUPS { - log::debug!("Too many DNS lookups. Dropping unresolved {}.", addr); - last_err = Some(DnsErr::TooManyLookups); + tracing::debug!(address=%addr, "Too many DNS lookups, dropping unresolved address"); + last_err = Some(Error::TooManyLookups); // There may still be fully resolved addresses in `unresolved`, // so keep going until `unresolved` is empty. continue; @@ -295,13 +319,13 @@ where last_err = Some(e); } Ok(Resolved::One(ip)) => { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); } Ok(Resolved::Many(ips)) => { for ip in ips { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); @@ -315,14 +339,14 @@ where if a.ends_with(&suffix) { if n < MAX_TXT_RECORDS { n += 1; - log::trace!("Resolved {} -> {}", name, a); + tracing::trace!(protocol=%name, resolved=%a); let addr = prefix.iter().chain(a.iter()).collect::(); unresolved.push(addr); } else { - log::debug!( - "Too many TXT records. Dropping resolved {}.", - a + tracing::debug!( + resolved=%a, + "Too many TXT records, dropping resolved" ); } } @@ -331,7 +355,7 @@ where } } else { // We have a fully resolved address, so try to dial it. - log::debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); let transport = inner.clone(); let dial = match role_override { @@ -344,23 +368,23 @@ where // actually accepted, i.e. for which it produced // a dialing future. dial_attempts += 1; - out.await.map_err(DnsErr::Transport) + out.await.map_err(Error::Transport) } Err(TransportError::MultiaddrNotSupported(a)) => { - Err(DnsErr::MultiaddrNotSupported(a)) + Err(Error::MultiaddrNotSupported(a)) } - Err(TransportError::Other(err)) => Err(DnsErr::Transport(err)), + Err(TransportError::Other(err)) => Err(Error::Transport(err)), }; match result { Ok(out) => return Ok(out), Err(err) => { - log::debug!("Dial error: {:?}.", err); + tracing::debug!("Dial error: {:?}.", err); if unresolved.is_empty() { return Err(err); } if dial_attempts == MAX_DIAL_ATTEMPTS { - log::debug!( + tracing::debug!( "Aborting dialing after {} attempts.", MAX_DIAL_ATTEMPTS ); @@ -377,7 +401,7 @@ where // for the given address to begin with (i.e. DNS lookups succeeded but // produced no records relevant for the given `addr`). Err(last_err.unwrap_or_else(|| { - DnsErr::ResolveError(ResolveErrorKind::Message("No matching records found.").into()) + Error::ResolveError(ResolveErrorKind::Message("No matching records found.").into()) })) } .boxed() @@ -385,10 +409,10 @@ where } } -/// The possible errors of a [`GenDnsConfig`] wrapped transport. +/// The possible errors of a [`Transport`] wrapped transport. #[derive(Debug)] #[allow(clippy::large_enum_variant)] -pub enum DnsErr { +pub enum Error { /// The underlying transport encountered an error. Transport(TErr), /// DNS resolution failed. @@ -404,30 +428,30 @@ pub enum DnsErr { TooManyLookups, } -impl fmt::Display for DnsErr +impl fmt::Display for Error where TErr: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - DnsErr::Transport(err) => write!(f, "{err}"), - DnsErr::ResolveError(err) => write!(f, "{err}"), - DnsErr::MultiaddrNotSupported(a) => write!(f, "Unsupported resolved address: {a}"), - DnsErr::TooManyLookups => write!(f, "Too many DNS lookups"), + Error::Transport(err) => write!(f, "{err}"), + Error::ResolveError(err) => write!(f, "{err}"), + Error::MultiaddrNotSupported(a) => write!(f, "Unsupported resolved address: {a}"), + Error::TooManyLookups => write!(f, "Too many DNS lookups"), } } } -impl error::Error for DnsErr +impl error::Error for Error where TErr: error::Error + 'static, { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - DnsErr::Transport(err) => Some(err), - DnsErr::ResolveError(err) => Some(err), - DnsErr::MultiaddrNotSupported(_) => None, - DnsErr::TooManyLookups => None, + Error::Transport(err) => Some(err), + Error::ResolveError(err) => Some(err), + Error::MultiaddrNotSupported(_) => None, + Error::TooManyLookups => None, } } } @@ -450,14 +474,10 @@ enum Resolved<'a> { /// Asynchronously resolves the domain name of a `Dns`, `Dns4`, `Dns6` or `Dnsaddr` protocol /// component. If the given protocol is of a different type, it is returned unchanged as a /// [`Resolved::One`]. -fn resolve<'a, E: 'a + Send, C, P>( +fn resolve<'a, E: 'a + Send, R: Resolver>( proto: &Protocol<'a>, - resolver: &'a AsyncResolver, -) -> BoxFuture<'a, Result, DnsErr>> -where - C: DnsHandle, - P: ConnectionProvider, -{ + resolver: &'a R, +) -> BoxFuture<'a, Result, Error>> { match proto { Protocol::Dns(ref name) => resolver .lookup_ip(name.clone().into_owned()) @@ -479,7 +499,7 @@ where Ok(Resolved::One(Protocol::from(one))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dns4(ref name) => resolver @@ -495,15 +515,15 @@ where iter::once(one) .chain(iter::once(two)) .chain(ips) - .map(IpAddr::from) + .map(Ipv4Addr::from) .map(Protocol::from) .collect(), )) } else { - Ok(Resolved::One(Protocol::from(IpAddr::from(one)))) + Ok(Resolved::One(Protocol::from(Ipv4Addr::from(one)))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dns6(ref name) => resolver @@ -519,15 +539,15 @@ where iter::once(one) .chain(iter::once(two)) .chain(ips) - .map(IpAddr::from) + .map(Ipv6Addr::from) .map(Protocol::from) .collect(), )) } else { - Ok(Resolved::One(Protocol::from(IpAddr::from(one)))) + Ok(Resolved::One(Protocol::from(Ipv6Addr::from(one)))) } } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed(), Protocol::Dnsaddr(ref name) => { @@ -542,7 +562,7 @@ where match parse_dnsaddr_txt(chars) { Err(e) => { // Skip over seemingly invalid entries. - log::debug!("Invalid TXT record: {:?}", e); + tracing::debug!("Invalid TXT record: {:?}", e); } Ok(a) => { addrs.push(a); @@ -552,7 +572,7 @@ where } Ok(Resolved::Addrs(addrs)) } - Err(e) => Err(DnsErr::ResolveError(e)), + Err(e) => Err(Error::ResolveError(e)), }) .boxed() } @@ -573,6 +593,37 @@ fn invalid_data(e: impl Into>) -> io::E io::Error::new(io::ErrorKind::InvalidData, e) } +#[async_trait::async_trait] +#[doc(hidden)] +pub trait Resolver { + async fn lookup_ip(&self, name: String) -> Result; + async fn ipv4_lookup(&self, name: String) -> Result; + async fn ipv6_lookup(&self, name: String) -> Result; + async fn txt_lookup(&self, name: String) -> Result; +} + +#[async_trait] +impl Resolver for AsyncResolver +where + C: ConnectionProvider, +{ + async fn lookup_ip(&self, name: String) -> Result { + self.lookup_ip(name).await + } + + async fn ipv4_lookup(&self, name: String) -> Result { + self.ipv4_lookup(name).await + } + + async fn ipv6_lookup(&self, name: String) -> Result { + self.ipv6_lookup(name).await + } + + async fn txt_lookup(&self, name: String) -> Result { + self.txt_lookup(name).await + } +} + #[cfg(all(test, any(feature = "tokio", feature = "async-std")))] mod tests { use super::*; @@ -586,7 +637,9 @@ mod tests { #[test] fn basic_resolve() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); #[derive(Clone)] struct CustomTransport; @@ -599,8 +652,9 @@ mod tests { fn listen_on( &mut self, + _: ListenerId, _: Multiaddr, - ) -> Result> { + ) -> Result<(), TransportError> { unreachable!() } @@ -636,13 +690,12 @@ mod tests { } } - async fn run(mut transport: GenDnsConfig) + async fn run(mut transport: super::Transport) where - C: DnsHandle, - P: ConnectionProvider, T: Transport + Clone + Send + Unpin + 'static, T::Error: Send, T::Dial: Send, + R: Clone + Send + Sync + Resolver + 'static, { // Success due to existing A record for example.com. let _ = transport @@ -692,7 +745,7 @@ mod tests { .unwrap() .await { - Err(DnsErr::ResolveError(_)) => {} + Err(Error::ResolveError(_)) => {} Err(e) => panic!("Unexpected error: {e:?}"), Ok(_) => panic!("Unexpected success."), } @@ -703,7 +756,7 @@ mod tests { .unwrap() .await { - Err(DnsErr::ResolveError(e)) => match e.kind() { + Err(Error::ResolveError(e)) => match e.kind() { ResolveErrorKind::NoRecordsFound { .. } => {} _ => panic!("Unexpected DNS error: {e:?}"), }, @@ -719,7 +772,7 @@ mod tests { let config = ResolverConfig::quad9(); let opts = ResolverOpts::default(); async_std_crate::task::block_on( - DnsConfig::custom(CustomTransport, config, opts).then(|dns| run(dns.unwrap())), + async_std::Transport::custom(CustomTransport, config, opts).then(run), ); } @@ -734,9 +787,8 @@ mod tests { .enable_time() .build() .unwrap(); - rt.block_on(run( - TokioDnsConfig::custom(CustomTransport, config, opts).unwrap() - )); + + rt.block_on(run(tokio::Transport::custom(CustomTransport, config, opts))); } } } diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index 3b3bce34dec..78effb673d2 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,9 +1,29 @@ -## 0.42.0 - unreleased +## 0.44.0 + +- Migrate to `{In,Out}boundConnectionUpgrade` traits. + See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). + +## 0.43.2 + +- Update x25519-dalek to 2.0.0. + +## 0.43.1 + +- Update dependencies. + +## 0.43.0 - Raise MSRV to 1.65. See [PR 3715]. +- Remove deprecated APIs. See [PR 3511]. + +- Add `Config::with_webtransport_certhashes`. See [PR 3991]. + This can be used by WebTransport implementers to send (responder) or verify (initiator) certhashes. + +[PR 3511]: https://github.com/libp2p/rust-libp2p/pull/3511 [PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3715 +[PR 3715]: https://github.com/libp2p/rust-libp2p/pull/3991 ## 0.42.2 diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index b44c07633c7..70f262cf2d4 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -3,41 +3,48 @@ name = "libp2p-noise" edition = "2021" rust-version = { workspace = true } description = "Cryptographic handshake protocol using the noise framework." -version = "0.43.0" +version = "0.44.0" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" [dependencies] +asynchronous-codec = { workspace = true } bytes = "1" -curve25519-dalek = "3.0.0" -futures = "0.3.28" +curve25519-dalek = "4.1.1" +futures = "0.3.30" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } -log = "0.4" +multiaddr = { workspace = true } +multihash = { workspace = true } +once_cell = "1.19.0" quick-protobuf = "0.8" -once_cell = "1.17.1" rand = "0.8.3" -sha2 = "0.10.0" +sha2 = "0.10.8" static_assertions = "1" -thiserror = "1.0.40" -x25519-dalek = "1.1.0" +thiserror = "1.0.51" +tracing = "0.1.37" +x25519-dalek = "2" zeroize = "1" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -snow = { version = "0.9.2", features = ["ring-resolver"], default-features = false } +snow = { version = "0.9.4", features = ["ring-resolver"], default-features = false } [target.'cfg(target_arch = "wasm32")'.dependencies] snow = { version = "0.9.2", features = ["default-resolver"], default-features = false } [dev-dependencies] -env_logger = "0.10.0" -futures_ringbuf = "0.3.1" +futures_ringbuf = "0.4.0" quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +libp2p-identity = { workspace = true, features = ["rand"] } -# Passing arguments to the docsrs builder in order to properly document cfg's. +# Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] rustc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/transports/noise/src/generated/payload.proto b/transports/noise/src/generated/payload.proto index 1893dc55037..68bd01edd75 100644 --- a/transports/noise/src/generated/payload.proto +++ b/transports/noise/src/generated/payload.proto @@ -1,11 +1,15 @@ syntax = "proto3"; - package payload.proto; // Payloads for Noise handshake messages. +message NoiseExtensions { + repeated bytes webtransport_certhashes = 1; + repeated string stream_muxers = 2; +} + message NoiseHandshakePayload { bytes identity_key = 1; bytes identity_sig = 2; - bytes data = 3; + optional NoiseExtensions extensions = 4; } diff --git a/transports/noise/src/generated/payload/proto.rs b/transports/noise/src/generated/payload/proto.rs index 7b17a58ef37..98808ed466a 100644 --- a/transports/noise/src/generated/payload/proto.rs +++ b/transports/noise/src/generated/payload/proto.rs @@ -13,12 +13,48 @@ use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer use quick_protobuf::sizeofs::*; use super::super::*; +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct NoiseExtensions { + pub webtransport_certhashes: Vec>, + pub stream_muxers: Vec, +} + +impl<'a> MessageRead<'a> for NoiseExtensions { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.webtransport_certhashes.push(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.stream_muxers.push(r.read_string(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for NoiseExtensions { + fn get_size(&self) -> usize { + 0 + + self.webtransport_certhashes.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + + self.stream_muxers.iter().map(|s| 1 + sizeof_len((s).len())).sum::() + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + for s in &self.webtransport_certhashes { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + for s in &self.stream_muxers { w.write_with_tag(18, |w| w.write_string(&**s))?; } + Ok(()) + } +} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] pub struct NoiseHandshakePayload { pub identity_key: Vec, pub identity_sig: Vec, - pub data: Vec, + pub extensions: Option, } impl<'a> MessageRead<'a> for NoiseHandshakePayload { @@ -28,7 +64,7 @@ impl<'a> MessageRead<'a> for NoiseHandshakePayload { match r.next_tag(bytes) { Ok(10) => msg.identity_key = r.read_bytes(bytes)?.to_owned(), Ok(18) => msg.identity_sig = r.read_bytes(bytes)?.to_owned(), - Ok(26) => msg.data = r.read_bytes(bytes)?.to_owned(), + Ok(34) => msg.extensions = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -42,13 +78,13 @@ impl MessageWrite for NoiseHandshakePayload { 0 + if self.identity_key.is_empty() { 0 } else { 1 + sizeof_len((&self.identity_key).len()) } + if self.identity_sig.is_empty() { 0 } else { 1 + sizeof_len((&self.identity_sig).len()) } - + if self.data.is_empty() { 0 } else { 1 + sizeof_len((&self.data).len()) } + + self.extensions.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { if !self.identity_key.is_empty() { w.write_with_tag(10, |w| w.write_bytes(&**&self.identity_key))?; } if !self.identity_sig.is_empty() { w.write_with_tag(18, |w| w.write_bytes(&**&self.identity_sig))?; } - if !self.data.is_empty() { w.write_with_tag(26, |w| w.write_bytes(&**&self.data))?; } + if let Some(ref s) = self.extensions { w.write_with_tag(34, |w| w.write_message(s))?; } Ok(()) } } diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index ee184695696..9cd4cfed52a 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -22,11 +22,11 @@ mod framed; pub(crate) mod handshake; +use asynchronous_codec::Framed; use bytes::Bytes; -use framed::{NoiseFramed, MAX_FRAME_LEN}; +use framed::{Codec, MAX_FRAME_LEN}; use futures::prelude::*; use futures::ready; -use log::trace; use std::{ cmp::min, fmt, io, @@ -38,7 +38,7 @@ use std::{ /// /// `T` is the type of the underlying I/O resource. pub struct Output { - io: NoiseFramed, + io: Framed>, recv_buffer: Bytes, recv_offset: usize, send_buffer: Vec, @@ -47,12 +47,12 @@ pub struct Output { impl fmt::Debug for Output { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NoiseOutput").field("io", &self.io).finish() + f.debug_struct("NoiseOutput").finish() } } impl Output { - fn new(io: NoiseFramed) -> Self { + fn new(io: Framed>) -> Self { Output { io, recv_buffer: Bytes::new(), @@ -75,10 +75,10 @@ impl AsyncRead for Output { if len > 0 { let n = min(len - off, buf.len()); buf[..n].copy_from_slice(&self.recv_buffer[off..off + n]); - trace!("read: copied {}/{} bytes", off + n, len); + tracing::trace!(copied_bytes=%(off + n), total_bytes=%len, "read: copied"); self.recv_offset += n; if len == self.recv_offset { - trace!("read: frame consumed"); + tracing::trace!("read: frame consumed"); // Drop the existing view so `NoiseFramed` can reuse // the buffer when polling for the next frame below. self.recv_buffer = Bytes::new(); @@ -111,7 +111,7 @@ impl AsyncWrite for Output { // The MAX_FRAME_LEN is the maximum buffer size before a frame must be sent. if this.send_offset == MAX_FRAME_LEN { - trace!("write: sending {} bytes", MAX_FRAME_LEN); + tracing::trace!(bytes=%MAX_FRAME_LEN, "write: sending"); ready!(io.as_mut().poll_ready(cx))?; io.as_mut().start_send(frame_buf)?; this.send_offset = 0; @@ -123,7 +123,7 @@ impl AsyncWrite for Output { let n = min(MAX_FRAME_LEN - off, buf.len()); this.send_buffer[off..off + n].copy_from_slice(&buf[..n]); this.send_offset += n; - trace!("write: buffered {} bytes", this.send_offset); + tracing::trace!(bytes=%this.send_offset, "write: buffered"); Poll::Ready(Ok(n)) } @@ -136,7 +136,7 @@ impl AsyncWrite for Output { // Check if there is still one more frame to send. if this.send_offset > 0 { ready!(io.as_mut().poll_ready(cx))?; - trace!("flush: sending {} bytes", this.send_offset); + tracing::trace!(bytes= %this.send_offset, "flush: sending"); io.as_mut().start_send(frame_buf)?; this.send_offset = 0; } diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 01e1f9dbca1..9ed6045cf38 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -18,20 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -//! This module provides a `Sink` and `Stream` for length-delimited -//! Noise protocol messages in form of [`NoiseFramed`]. - -use crate::io::Output; -use crate::{Error, Protocol, PublicKey}; -use bytes::{Bytes, BytesMut}; -use futures::prelude::*; -use futures::ready; -use log::{debug, trace}; -use std::{ - fmt, io, - pin::Pin, - task::{Context, Poll}, -}; +//! Provides a [`Codec`] type implementing the [`Encoder`] and [`Decoder`] traits. +//! +//! Alongside a [`asynchronous_codec::Framed`] this provides a [Sink](futures::Sink) +//! and [Stream](futures::Stream) for length-delimited Noise protocol messages. + +use super::handshake::proto; +use crate::{protocol::PublicKey, Error}; +use asynchronous_codec::{Decoder, Encoder}; +use bytes::{Buf, Bytes, BytesMut}; +use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use std::io; +use std::mem::size_of; /// Max. size of a noise message. const MAX_NOISE_MSG_LEN: usize = 65535; @@ -43,410 +41,202 @@ static_assertions::const_assert! { MAX_FRAME_LEN + EXTRA_ENCRYPT_SPACE <= MAX_NOISE_MSG_LEN } -/// A `NoiseFramed` is a `Sink` and `Stream` for length-delimited -/// Noise protocol messages. -/// -/// `T` is the type of the underlying I/O resource and `S` the -/// type of the Noise session state. -pub(crate) struct NoiseFramed { - io: T, +/// Codec holds the noise session state `S` and acts as a medium for +/// encoding and decoding length-delimited session messages. +pub(crate) struct Codec { session: S, - read_state: ReadState, - write_state: WriteState, - read_buffer: Vec, - write_buffer: Vec, - decrypt_buffer: BytesMut, -} -impl fmt::Debug for NoiseFramed { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("NoiseFramed") - .field("read_state", &self.read_state) - .field("write_state", &self.write_state) - .finish() - } + // We reuse write and encryption buffers across multiple messages to avoid reallocations. + // We cannot reuse read and decryption buffers because we cannot return borrowed data. + write_buffer: BytesMut, + encrypt_buffer: BytesMut, } -impl NoiseFramed { - /// Creates a nwe `NoiseFramed` for beginning a Noise protocol handshake. - pub(crate) fn new(io: T, state: snow::HandshakeState) -> Self { - NoiseFramed { - io, - session: state, - read_state: ReadState::Ready, - write_state: WriteState::Ready, - read_buffer: Vec::new(), - write_buffer: Vec::new(), - decrypt_buffer: BytesMut::new(), +impl Codec { + pub(crate) fn new(session: S) -> Self { + Codec { + session, + write_buffer: BytesMut::default(), + encrypt_buffer: BytesMut::default(), } } +} - /// Converts the `NoiseFramed` into a `NoiseOutput` encrypted data stream - /// once the handshake is complete, including the static DH [`PublicKey`] - /// of the remote, if received. - /// - /// If the underlying Noise protocol session state does not permit - /// transitioning to transport mode because the handshake is incomplete, - /// an error is returned. Similarly if the remote's static DH key, if - /// present, cannot be parsed. - pub(crate) fn into_transport(self) -> Result<(Option>, Output), Error> - where - C: Protocol + AsRef<[u8]>, - { - let dh_remote_pubkey = self - .session - .get_remote_static() - .map(C::public_from_bytes) - .transpose()?; - - let io = NoiseFramed { - session: self.session.into_transport_mode()?, - io: self.io, - read_state: ReadState::Ready, - write_state: WriteState::Ready, - read_buffer: self.read_buffer, - write_buffer: self.write_buffer, - decrypt_buffer: self.decrypt_buffer, - }; +impl Codec { + /// Checks if the session was started in the `initiator` role. + pub(crate) fn is_initiator(&self) -> bool { + self.session.is_initiator() + } - Ok((dh_remote_pubkey, Output::new(io))) + /// Checks if the session was started in the `responder` role. + pub(crate) fn is_responder(&self) -> bool { + !self.session.is_initiator() } -} -/// The states for reading Noise protocol frames. -#[derive(Debug)] -enum ReadState { - /// Ready to read another frame. - Ready, - /// Reading frame length. - ReadLen { buf: [u8; 2], off: usize }, - /// Reading frame data. - ReadData { len: usize, off: usize }, - /// EOF has been reached (terminal state). + /// Converts the underlying Noise session from the [`snow::HandshakeState`] to a + /// [`snow::TransportState`] once the handshake is complete, including the static + /// DH [`PublicKey`] of the remote if received. + /// + /// If the Noise protocol session state does not permit transitioning to + /// transport mode because the handshake is incomplete, an error is returned. /// - /// The associated result signals if the EOF was unexpected or not. - Eof(Result<(), ()>), - /// A decryption error occurred (terminal state). - DecErr, + /// An error is also returned if the remote's static DH key is not present or + /// cannot be parsed, as that indicates a fatal handshake error for the noise + /// `XX` pattern, which is the only handshake protocol libp2p currently supports. + pub(crate) fn into_transport(self) -> Result<(PublicKey, Codec), Error> { + let dh_remote_pubkey = self.session.get_remote_static().ok_or_else(|| { + Error::Io(io::Error::new( + io::ErrorKind::Other, + "expect key to always be present at end of XX session", + )) + })?; + + let dh_remote_pubkey = PublicKey::from_slice(dh_remote_pubkey)?; + let codec = Codec::new(self.session.into_transport_mode()?); + + Ok((dh_remote_pubkey, codec)) + } } -/// The states for writing Noise protocol frames. -#[derive(Debug)] -enum WriteState { - /// Ready to write another frame. - Ready, - /// Writing the frame length. - WriteLen { - len: usize, - buf: [u8; 2], - off: usize, - }, - /// Writing the frame data. - WriteData { len: usize, off: usize }, - /// EOF has been reached unexpectedly (terminal state). - Eof, - /// An encryption error occurred (terminal state). - EncErr, -} +impl Encoder for Codec { + type Error = io::Error; + type Item<'a> = &'a proto::NoiseHandshakePayload; -impl WriteState { - fn is_ready(&self) -> bool { - if let WriteState::Ready = self { - return true; - } - false - } -} + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + let item_size = item.get_size(); -impl futures::stream::Stream for NoiseFramed -where - T: AsyncRead + Unpin, - S: SessionState + Unpin, -{ - type Item = io::Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = Pin::into_inner(self); - loop { - trace!("read state: {:?}", this.read_state); - match this.read_state { - ReadState::Ready => { - this.read_state = ReadState::ReadLen { - buf: [0, 0], - off: 0, - }; - } - ReadState::ReadLen { mut buf, mut off } => { - let n = match read_frame_len(&mut this.io, cx, &mut buf, &mut off) { - Poll::Ready(Ok(Some(n))) => n, - Poll::Ready(Ok(None)) => { - trace!("read: eof"); - this.read_state = ReadState::Eof(Ok(())); - return Poll::Ready(None); - } - Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), - Poll::Pending => { - this.read_state = ReadState::ReadLen { buf, off }; - return Poll::Pending; - } - }; - trace!("read: frame len = {}", n); - if n == 0 { - trace!("read: empty frame"); - this.read_state = ReadState::Ready; - continue; - } - this.read_buffer.resize(usize::from(n), 0u8); - this.read_state = ReadState::ReadData { - len: usize::from(n), - off: 0, - } - } - ReadState::ReadData { len, ref mut off } => { - let n = { - let f = - Pin::new(&mut this.io).poll_read(cx, &mut this.read_buffer[*off..len]); - match ready!(f) { - Ok(n) => n, - Err(e) => return Poll::Ready(Some(Err(e))), - } - }; - trace!("read: {}/{} bytes", *off + n, len); - if n == 0 { - trace!("read: eof"); - this.read_state = ReadState::Eof(Err(())); - return Poll::Ready(Some(Err(io::ErrorKind::UnexpectedEof.into()))); - } - *off += n; - if len == *off { - trace!("read: decrypting {} bytes", len); - this.decrypt_buffer.resize(len, 0); - if let Ok(n) = this - .session - .read_message(&this.read_buffer, &mut this.decrypt_buffer) - { - this.decrypt_buffer.truncate(n); - trace!("read: payload len = {} bytes", n); - this.read_state = ReadState::Ready; - // Return an immutable view into the current buffer. - // If the view is dropped before the next frame is - // read, the `BytesMut` will reuse the same buffer - // for the next frame. - let view = this.decrypt_buffer.split().freeze(); - return Poll::Ready(Some(Ok(view))); - } else { - debug!("read: decryption error"); - this.read_state = ReadState::DecErr; - return Poll::Ready(Some(Err(io::ErrorKind::InvalidData.into()))); - } - } - } - ReadState::Eof(Ok(())) => { - trace!("read: eof"); - return Poll::Ready(None); - } - ReadState::Eof(Err(())) => { - trace!("read: eof (unexpected)"); - return Poll::Ready(Some(Err(io::ErrorKind::UnexpectedEof.into()))); - } - ReadState::DecErr => { - return Poll::Ready(Some(Err(io::ErrorKind::InvalidData.into()))) - } - } - } + self.write_buffer.resize(item_size, 0); + let mut writer = Writer::new(&mut self.write_buffer[..item_size]); + item.write_message(&mut writer) + .expect("Protobuf encoding to succeed"); + + encrypt( + &self.write_buffer[..item_size], + dst, + &mut self.encrypt_buffer, + |item, buffer| self.session.write_message(item, buffer), + )?; + + Ok(()) } } -impl futures::sink::Sink<&Vec> for NoiseFramed -where - T: AsyncWrite + Unpin, - S: SessionState + Unpin, -{ +impl Decoder for Codec { type Error = io::Error; + type Item = proto::NoiseHandshakePayload; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + let cleartext = match decrypt(src, |ciphertext, decrypt_buffer| { + self.session.read_message(ciphertext, decrypt_buffer) + })? { + None => return Ok(None), + Some(cleartext) => cleartext, + }; - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = Pin::into_inner(self); - loop { - trace!("write state {:?}", this.write_state); - match this.write_state { - WriteState::Ready => { - return Poll::Ready(Ok(())); - } - WriteState::WriteLen { len, buf, mut off } => { - trace!("write: frame len ({}, {:?}, {}/2)", len, buf, off); - match write_frame_len(&mut this.io, cx, &buf, &mut off) { - Poll::Ready(Ok(true)) => (), - Poll::Ready(Ok(false)) => { - trace!("write: eof"); - this.write_state = WriteState::Eof; - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), - Poll::Pending => { - this.write_state = WriteState::WriteLen { len, buf, off }; - return Poll::Pending; - } - } - this.write_state = WriteState::WriteData { len, off: 0 } - } - WriteState::WriteData { len, ref mut off } => { - let n = { - let f = - Pin::new(&mut this.io).poll_write(cx, &this.write_buffer[*off..len]); - match ready!(f) { - Ok(n) => n, - Err(e) => return Poll::Ready(Err(e)), - } - }; - if n == 0 { - trace!("write: eof"); - this.write_state = WriteState::Eof; - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - *off += n; - trace!("write: {}/{} bytes written", *off, len); - if len == *off { - trace!("write: finished with {} bytes", len); - this.write_state = WriteState::Ready; - } - } - WriteState::Eof => { - trace!("write: eof"); - return Poll::Ready(Err(io::ErrorKind::WriteZero.into())); - } - WriteState::EncErr => return Poll::Ready(Err(io::ErrorKind::InvalidData.into())), - } - } - } + let mut reader = BytesReader::from_bytes(&cleartext[..]); + let pb = + proto::NoiseHandshakePayload::from_reader(&mut reader, &cleartext).map_err(|_| { + io::Error::new( + io::ErrorKind::InvalidData, + "Failed decoding handshake payload", + ) + })?; - fn start_send(self: Pin<&mut Self>, frame: &Vec) -> Result<(), Self::Error> { - assert!(frame.len() <= MAX_FRAME_LEN); - let mut this = Pin::into_inner(self); - assert!(this.write_state.is_ready()); - - this.write_buffer - .resize(frame.len() + EXTRA_ENCRYPT_SPACE, 0u8); - match this - .session - .write_message(frame, &mut this.write_buffer[..]) - { - Ok(n) => { - trace!("write: cipher text len = {} bytes", n); - this.write_buffer.truncate(n); - this.write_state = WriteState::WriteLen { - len: n, - buf: u16::to_be_bytes(n as u16), - off: 0, - }; - Ok(()) - } - Err(e) => { - log::error!("encryption error: {:?}", e); - this.write_state = WriteState::EncErr; - Err(io::ErrorKind::InvalidData.into()) - } - } + Ok(Some(pb)) } +} + +impl Encoder for Codec { + type Error = io::Error; + type Item<'a> = &'a [u8]; - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_ready(cx))?; - Pin::new(&mut self.io).poll_flush(cx) + fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { + encrypt(item, dst, &mut self.encrypt_buffer, |item, buffer| { + self.session.write_message(item, buffer) + }) } +} - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.as_mut().poll_flush(cx))?; - Pin::new(&mut self.io).poll_close(cx) +impl Decoder for Codec { + type Error = io::Error; + type Item = Bytes; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + decrypt(src, |ciphertext, decrypt_buffer| { + self.session.read_message(ciphertext, decrypt_buffer) + }) } } -/// A stateful context in which Noise protocol messages can be read and written. -pub(crate) trait SessionState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result; - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result; +/// Encrypts the given cleartext to `dst`. +/// +/// This is a standalone function to allow us reusing the `encrypt_buffer` and to use to across different session states of the noise protocol. +fn encrypt( + cleartext: &[u8], + dst: &mut BytesMut, + encrypt_buffer: &mut BytesMut, + encrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, +) -> io::Result<()> { + tracing::trace!("Encrypting {} bytes", cleartext.len()); + + encrypt_buffer.resize(cleartext.len() + EXTRA_ENCRYPT_SPACE, 0); + let n = encrypt_fn(cleartext, encrypt_buffer).map_err(into_io_error)?; + + tracing::trace!("Outgoing ciphertext has {n} bytes"); + + encode_length_prefixed(&encrypt_buffer[..n], dst); + + Ok(()) } -impl SessionState for snow::HandshakeState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.read_message(msg, buf) - } +/// Encrypts the given ciphertext. +/// +/// This is a standalone function so we can use it across different session states of the noise protocol. +/// In case `ciphertext` does not contain enough bytes to decrypt the entire frame, `Ok(None)` is returned. +fn decrypt( + ciphertext: &mut BytesMut, + decrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, +) -> io::Result> { + let Some(ciphertext) = decode_length_prefixed(ciphertext)? else { + return Ok(None); + }; - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.write_message(msg, buf) - } + tracing::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); + + let mut decrypt_buffer = BytesMut::zeroed(ciphertext.len()); + let n = decrypt_fn(&ciphertext, &mut decrypt_buffer).map_err(into_io_error)?; + + tracing::trace!("Decrypted cleartext has {n} bytes"); + + Ok(Some(decrypt_buffer.split_to(n).freeze())) } -impl SessionState for snow::TransportState { - fn read_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.read_message(msg, buf) - } +fn into_io_error(err: snow::Error) -> io::Error { + io::Error::new(io::ErrorKind::InvalidData, err) +} - fn write_message(&mut self, msg: &[u8], buf: &mut [u8]) -> Result { - self.write_message(msg, buf) - } +const U16_LENGTH: usize = size_of::(); + +fn encode_length_prefixed(src: &[u8], dst: &mut BytesMut) { + dst.reserve(U16_LENGTH + src.len()); + dst.extend_from_slice(&(src.len() as u16).to_be_bytes()); + dst.extend_from_slice(src); } -/// Read 2 bytes as frame length from the given source into the given buffer. -/// -/// Panics if `off >= 2`. -/// -/// When [`Poll::Pending`] is returned, the given buffer and offset -/// may have been updated (i.e. a byte may have been read) and must be preserved -/// for the next invocation. -/// -/// Returns `None` if EOF has been encountered. -fn read_frame_len( - mut io: &mut R, - cx: &mut Context<'_>, - buf: &mut [u8; 2], - off: &mut usize, -) -> Poll>> { - loop { - match ready!(Pin::new(&mut io).poll_read(cx, &mut buf[*off..])) { - Ok(n) => { - if n == 0 { - return Poll::Ready(Ok(None)); - } - *off += n; - if *off == 2 { - return Poll::Ready(Ok(Some(u16::from_be_bytes(*buf)))); - } - } - Err(e) => { - return Poll::Ready(Err(e)); - } - } +fn decode_length_prefixed(src: &mut BytesMut) -> Result, io::Error> { + if src.len() < size_of::() { + return Ok(None); } -} -/// Write 2 bytes as frame length from the given buffer into the given sink. -/// -/// Panics if `off >= 2`. -/// -/// When [`Poll::Pending`] is returned, the given offset -/// may have been updated (i.e. a byte may have been written) and must -/// be preserved for the next invocation. -/// -/// Returns `false` if EOF has been encountered. -fn write_frame_len( - mut io: &mut W, - cx: &mut Context<'_>, - buf: &[u8; 2], - off: &mut usize, -) -> Poll> { - loop { - match ready!(Pin::new(&mut io).poll_write(cx, &buf[*off..])) { - Ok(n) => { - if n == 0 { - return Poll::Ready(Ok(false)); - } - *off += n; - if *off == 2 { - return Poll::Ready(Ok(true)); - } - } - Err(e) => { - return Poll::Ready(Err(e)); - } - } + let mut len_bytes = [0u8; U16_LENGTH]; + len_bytes.copy_from_slice(&src[..U16_LENGTH]); + let len = u16::from_be_bytes(len_bytes) as usize; + + if src.len() - U16_LENGTH >= len { + // Skip the length header we already read. + src.advance(U16_LENGTH); + Ok(Some(src.split_to(len).freeze())) + } else { + Ok(None) } } diff --git a/transports/noise/src/io/handshake.rs b/transports/noise/src/io/handshake.rs index ea3331d5e68..7cc0f859e6e 100644 --- a/transports/noise/src/io/handshake.rs +++ b/transports/noise/src/io/handshake.rs @@ -20,52 +20,24 @@ //! Noise protocol handshake I/O. -mod proto { +pub(super) mod proto { #![allow(unreachable_pub)] include!("../generated/mod.rs"); + pub use self::payload::proto::NoiseExtensions; pub use self::payload::proto::NoiseHandshakePayload; } -use crate::io::{framed::NoiseFramed, Output}; -use crate::protocol::{KeypairIdentity, Protocol, PublicKey}; - +use super::framed::Codec; +use crate::io::Output; +use crate::protocol::{KeypairIdentity, PublicKey, STATIC_KEY_DOMAIN}; use crate::Error; -use crate::LegacyConfig; -use bytes::Bytes; +use asynchronous_codec::Framed; use futures::prelude::*; use libp2p_identity as identity; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; -use std::io; - -/// The identity of the remote established during a handshake. -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub enum RemoteIdentity { - /// The remote provided no identifying information. - /// - /// The identity of the remote is unknown and must be obtained through - /// a different, out-of-band channel. - Unknown, - - /// The remote provided a static DH public key. - /// - /// The static DH public key is authentic in the sense that a successful - /// handshake implies that the remote possesses a corresponding secret key. - /// - /// > **Note**: To rule out active attacks like a MITM, trust in the public key must - /// > still be established, e.g. by comparing the key against an expected or - /// > otherwise known public key. - StaticDhKey(PublicKey), - - /// The remote provided a public identity key in addition to a static DH - /// public key and the latter is authentic w.r.t. the former. - /// - /// > **Note**: To rule out active attacks like a MITM, trust in the public key must - /// > still be established, e.g. by comparing the key against an expected or - /// > otherwise known public key. - IdentityKey(identity::PublicKey), -} +use multihash::Multihash; +use quick_protobuf::MessageWrite; +use std::collections::HashSet; +use std::{io, mem}; ////////////////////////////////////////////////////////////////////////////// // Internal @@ -73,7 +45,7 @@ pub enum RemoteIdentity { /// Handshake state. pub(crate) struct State { /// The underlying I/O resource. - io: NoiseFramed, + io: Framed>, /// The associated public identity of the local node's static DH keypair, /// which can be sent to the remote as part of an authenticated handshake. identity: KeypairIdentity, @@ -81,11 +53,21 @@ pub(crate) struct State { dh_remote_pubkey_sig: Option>, /// The known or received public identity key of the remote, if any. id_remote_pubkey: Option, - /// Legacy configuration parameters. - legacy: LegacyConfig, + /// The WebTransport certhashes of the responder, if any. + responder_webtransport_certhashes: Option>>, + /// The received extensions of the remote, if any. + remote_extensions: Option, +} + +/// Extensions +struct Extensions { + webtransport_certhashes: HashSet>, } -impl State { +impl State +where + T: AsyncRead + AsyncWrite, +{ /// Initializes the state for a new Noise handshake, using the given local /// identity keypair and local DH static public key. The handshake messages /// will be sent and received on the given I/O resource and using the @@ -97,38 +79,103 @@ impl State { session: snow::HandshakeState, identity: KeypairIdentity, expected_remote_key: Option, - legacy: LegacyConfig, + responder_webtransport_certhashes: Option>>, ) -> Self { Self { identity, - io: NoiseFramed::new(io, session), + io: Framed::new(io, Codec::new(session)), dh_remote_pubkey_sig: None, id_remote_pubkey: expected_remote_key, - legacy, + responder_webtransport_certhashes, + remote_extensions: None, } } } -impl State { +impl State +where + T: AsyncRead + AsyncWrite, +{ /// Finish a handshake, yielding the established remote identity and the /// [`Output`] for communicating on the encrypted channel. - pub(crate) fn finish(self) -> Result<(RemoteIdentity, Output), Error> - where - C: Protocol + AsRef<[u8]>, - { - let (pubkey, io) = self.io.into_transport()?; - let remote = match (self.id_remote_pubkey, pubkey) { - (_, None) => RemoteIdentity::Unknown, - (None, Some(dh_pk)) => RemoteIdentity::StaticDhKey(dh_pk), - (Some(id_pk), Some(dh_pk)) => { - if C::verify(&id_pk, &dh_pk, &self.dh_remote_pubkey_sig) { - RemoteIdentity::IdentityKey(id_pk) - } else { - return Err(Error::BadSignature); + pub(crate) fn finish(self) -> Result<(identity::PublicKey, Output), Error> { + let is_initiator = self.io.codec().is_initiator(); + + let (pubkey, framed) = map_into_transport(self.io)?; + + let id_pk = self + .id_remote_pubkey + .ok_or_else(|| Error::AuthenticationFailed)?; + + let is_valid_signature = self.dh_remote_pubkey_sig.as_ref().map_or(false, |s| { + id_pk.verify(&[STATIC_KEY_DOMAIN.as_bytes(), pubkey.as_ref()].concat(), s) + }); + + if !is_valid_signature { + return Err(Error::BadSignature); + } + + // Check WebTransport certhashes that responder reported back to us. + if is_initiator { + // We check only if we care (i.e. Config::with_webtransport_certhashes was used). + if let Some(expected_certhashes) = self.responder_webtransport_certhashes { + let ext = self.remote_extensions.ok_or_else(|| { + Error::UnknownWebTransportCerthashes( + expected_certhashes.to_owned(), + HashSet::new(), + ) + })?; + + let received_certhashes = ext.webtransport_certhashes; + + // Expected WebTransport certhashes must be a strict subset + // of the reported ones. + if !expected_certhashes.is_subset(&received_certhashes) { + return Err(Error::UnknownWebTransportCerthashes( + expected_certhashes, + received_certhashes, + )); } } - }; - Ok((remote, io)) + } + + Ok((id_pk, Output::new(framed))) + } +} + +/// Maps the provided [`Framed`] from the [`snow::HandshakeState`] into the [`snow::TransportState`]. +/// +/// This is a bit tricky because [`Framed`] cannot just be de-composed but only into its [`FramedParts`](asynchronous_codec::FramedParts). +/// However, we need to retain the original [`FramedParts`](asynchronous_codec::FramedParts) because they contain the active read & write buffers. +/// +/// Those are likely **not** empty because the remote may directly write to the stream again after the noise handshake finishes. +fn map_into_transport( + framed: Framed>, +) -> Result<(PublicKey, Framed>), Error> +where + T: AsyncRead + AsyncWrite, +{ + let mut parts = framed.into_parts().map_codec(Some); + + let (pubkey, codec) = mem::take(&mut parts.codec) + .expect("We just set it to `Some`") + .into_transport()?; + + let parts = parts.map_codec(|_| codec); + let framed = Framed::from_parts(parts); + + Ok((pubkey, framed)) +} + +impl From for Extensions { + fn from(value: proto::NoiseExtensions) -> Self { + Extensions { + webtransport_certhashes: value + .webtransport_certhashes + .into_iter() + .filter_map(|bytes| Multihash::read(&bytes[..]).ok()) + .collect(), + } } } @@ -136,14 +183,14 @@ impl State { // Handshake Message Futures /// A future for receiving a Noise handshake message. -async fn recv(state: &mut State) -> Result +async fn recv(state: &mut State) -> Result where T: AsyncRead + Unpin, { match state.io.next().await { None => Err(io::Error::new(io::ErrorKind::UnexpectedEof, "eof").into()), Some(Err(e)) => Err(e.into()), - Some(Ok(m)) => Ok(m), + Some(Ok(p)) => Ok(p), } } @@ -152,12 +199,11 @@ pub(crate) async fn recv_empty(state: &mut State) -> Result<(), Error> where T: AsyncRead + Unpin, { - let msg = recv(state).await?; - if !msg.is_empty() { - return Err( - io::Error::new(io::ErrorKind::InvalidData, "Unexpected handshake payload.").into(), - ); + let payload = recv(state).await?; + if payload.get_size() != 0 { + return Err(io::Error::new(io::ErrorKind::InvalidData, "Expected empty payload.").into()); } + Ok(()) } @@ -166,123 +212,56 @@ pub(crate) async fn send_empty(state: &mut State) -> Result<(), Error> where T: AsyncWrite + Unpin, { - state.io.send(&Vec::new()).await?; + state + .io + .send(&proto::NoiseHandshakePayload::default()) + .await?; Ok(()) } -/// A future for receiving a Noise handshake message with a payload -/// identifying the remote. -/// -/// In case `expected_key` is passed, this function will fail if the received key does not match the expected key. -/// In case the remote does not send us a key, the expected key is assumed to be the remote's key. +/// A future for receiving a Noise handshake message with a payload identifying the remote. pub(crate) async fn recv_identity(state: &mut State) -> Result<(), Error> where T: AsyncRead + Unpin, { - let msg = recv(state).await?; - - let mut reader = BytesReader::from_bytes(&msg[..]); - let mut pb_result = proto::NoiseHandshakePayload::from_reader(&mut reader, &msg[..]); - - if pb_result.is_err() && state.legacy.recv_legacy_handshake { - // NOTE: This is support for legacy handshake payloads. As long as - // the frame length is less than 256 bytes, which is the case for - // all protobuf payloads not containing RSA keys, there is no room - // for misinterpretation, since if a two-bytes length prefix is present - // the first byte will be 0, which is always an unexpected protobuf tag - // value because the fields in the .proto file start with 1 and decoding - // thus expects a non-zero first byte. We will therefore always correctly - // fall back to the legacy protobuf parsing in these cases (again, not - // considering RSA keys, for which there may be a probabilistically - // very small chance of misinterpretation). - pb_result = pb_result.or_else(|e| { - if msg.len() > 2 { - let mut buf = [0, 0]; - buf.copy_from_slice(&msg[..2]); - // If there is a second length it must be 2 bytes shorter than the - // frame length, because each length is encoded as a `u16`. - if usize::from(u16::from_be_bytes(buf)) + 2 == msg.len() { - log::debug!("Attempting fallback legacy protobuf decoding."); - let mut reader = BytesReader::from_bytes(&msg[2..]); - proto::NoiseHandshakePayload::from_reader(&mut reader, &msg[2..]) - } else { - Err(e) - } - } else { - Err(e) - } - }); - } - let pb = pb_result?; - - if !pb.identity_key.is_empty() { - let pk = identity::PublicKey::try_decode_protobuf(&pb.identity_key)?; - if let Some(ref k) = state.id_remote_pubkey { - if k != &pk { - return Err(Error::UnexpectedKey); - } - } - state.id_remote_pubkey = Some(pk); - } + let pb = recv(state).await?; + state.id_remote_pubkey = Some(identity::PublicKey::try_decode_protobuf(&pb.identity_key)?); if !pb.identity_sig.is_empty() { state.dh_remote_pubkey_sig = Some(pb.identity_sig); } + if let Some(extensions) = pb.extensions { + state.remote_extensions = Some(extensions.into()); + } + Ok(()) } /// Send a Noise handshake message with a payload identifying the local node to the remote. pub(crate) async fn send_identity(state: &mut State) -> Result<(), Error> where - T: AsyncWrite + Unpin, + T: AsyncRead + AsyncWrite + Unpin, { let mut pb = proto::NoiseHandshakePayload { identity_key: state.identity.public.encode_protobuf(), ..Default::default() }; - if let Some(ref sig) = state.identity.signature { - pb.identity_sig = sig.clone() - } - - let mut msg = if state.legacy.send_legacy_handshake { - let mut msg = Vec::with_capacity(2 + pb.get_size()); - msg.extend_from_slice(&(pb.get_size() as u16).to_be_bytes()); - msg - } else { - Vec::with_capacity(pb.get_size()) - }; - - let mut writer = Writer::new(&mut msg); - pb.write_message(&mut writer).expect("Encoding to succeed"); - state.io.send(&msg).await?; - - Ok(()) -} + pb.identity_sig = state.identity.signature.clone(); -/// Send a Noise handshake message with a payload identifying the local node to the remote. -pub(crate) async fn send_signature_only(state: &mut State) -> Result<(), Error> -where - T: AsyncWrite + Unpin, -{ - let mut pb = proto::NoiseHandshakePayload::default(); + // If this is the responder then send WebTransport certhashes to initiator, if any. + if state.io.codec().is_responder() { + if let Some(ref certhashes) = state.responder_webtransport_certhashes { + let ext = pb + .extensions + .get_or_insert_with(proto::NoiseExtensions::default); - if let Some(ref sig) = state.identity.signature { - pb.identity_sig = sig.clone() + ext.webtransport_certhashes = certhashes.iter().map(|hash| hash.to_bytes()).collect(); + } } - let mut msg = if state.legacy.send_legacy_handshake { - let mut msg = Vec::with_capacity(2 + pb.get_size()); - msg.extend_from_slice(&(pb.get_size() as u16).to_be_bytes()); - msg - } else { - Vec::with_capacity(pb.get_size()) - }; - - let mut writer = Writer::new(&mut msg); - pb.write_message(&mut writer).expect("Encoding to succeed"); - state.io.send(&msg).await?; + state.io.send(&pb).await?; Ok(()) } diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index 8e180482780..70fae9d7ee6 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -54,164 +54,33 @@ //! [noise]: http://noiseprotocol.org/ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#![allow(deprecated)] // Temporarily until we remove deprecated items. mod io; mod protocol; -pub use io::handshake::RemoteIdentity; pub use io::Output; -pub use protocol::Protocol; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type X25519Spec = protocol::x25519_spec::X25519Spec; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type X25519 = protocol::x25519::X25519; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type AuthenticKeypair = protocol::AuthenticKeypair; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type Keypair = protocol::Keypair; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type KeypairIdentity = protocol::KeypairIdentity; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type PublicKey = protocol::PublicKey; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type SecretKey = protocol::SecretKey; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type ProtocolParams = protocol::ProtocolParams; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type IK = protocol::IK; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type IX = protocol::IX; - -#[deprecated( - note = "This type will be made private in the future. Use `libp2p_noise::Config::new` instead to use the noise protocol." -)] -pub type XX = protocol::XX; - -#[deprecated( - note = "This type has been renamed to drop the `Noise` prefix, refer to it as `noise::Error` instead." -)] -pub type NoiseError = Error; - -#[deprecated( - note = "This type has been renamed to drop the `Noise` prefix, refer to it as `noise::Output` instead." -)] -pub type NoiseOutput = Output; - use crate::handshake::State; use crate::io::handshake; -use futures::future::BoxFuture; +use crate::protocol::{noise_params_into_builder, AuthenticKeypair, Keypair, PARAMS_XX}; use futures::prelude::*; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; +use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; -use std::fmt; -use std::fmt::Formatter; +use multiaddr::Protocol; +use multihash::Multihash; +use snow::params::NoiseParams; +use std::collections::HashSet; +use std::fmt::Write; use std::pin::Pin; -use zeroize::Zeroize; /// The configuration for the noise handshake. #[derive(Clone)] pub struct Config { - inner: NoiseAuthenticated, -} - -impl Config { - /// Construct a new configuration for the noise handshake using the XX handshake pattern. - - pub fn new(identity: &identity::Keypair) -> Result { - Ok(Config { - inner: NoiseAuthenticated::xx(identity)?, - }) - } - - /// Set the noise prologue. - - pub fn with_prologue(mut self, prologue: Vec) -> Self { - self.inner.config.prologue = prologue; - - self - } -} - -impl UpgradeInfo for Config { - type Info = &'static str; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once("/noise") - } -} - -impl InboundUpgrade for Config -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = (PeerId, Output); - type Error = Error; - type Future = Pin> + Send>>; - - fn upgrade_inbound(self, socket: T, info: Self::Info) -> Self::Future { - self.inner.upgrade_inbound(socket, info) - } -} - -impl OutboundUpgrade for Config -where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = (PeerId, Output); - type Error = Error; - type Future = Pin> + Send>>; - - fn upgrade_outbound(self, socket: T, info: Self::Info) -> Self::Future { - self.inner.upgrade_outbound(socket, info) - } -} - -/// The protocol upgrade configuration. -#[deprecated( - note = "Use `libp2p_noise::Config` instead. All other handshake patterns are deprecated and will be removed." -)] -#[derive(Clone)] -pub struct NoiseConfig { - dh_keys: AuthenticKeypair, - params: ProtocolParams, - - legacy: LegacyConfig, - remote: R, - _marker: std::marker::PhantomData