2019-04-04 12:25:42 -03:00
// Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
2022-11-23 06:16:40 +04:00
//! High-level network manager.
2019-07-04 14:47:59 +02:00
//!
//! A [`Swarm`] contains the state of the network as a whole. The entire
//! behaviour of a libp2p network can be controlled through the `Swarm`.
//! The `Swarm` struct contains all active and pending connections to
//! remotes and manages the state of all the substreams that have been
//! opened, and all the upgrades that were built upon these substreams.
//!
//! # Initializing a Swarm
//!
//! Creating a `Swarm` requires three things:
//!
//! 1. A network identity of the local node in form of a [`PeerId`].
//! 2. An implementation of the [`Transport`] trait. This is the type that
//! will be used in order to reach nodes on the network based on their
//! address. See the `transport` module for more information.
//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state
//! machine that defines how the swarm should behave once it is connected
//! to a node.
//!
//! # Network Behaviour
//!
//! The [`NetworkBehaviour`] trait is implemented on types that indicate to
//! the swarm how it should behave. This includes which protocols are supported
//! and which nodes to try to connect to. It is the `NetworkBehaviour` that
//! controls what happens on the network. Multiple types that implement
//! `NetworkBehaviour` can be composed into a single behaviour.
//!
//! # Protocols Handler
//!
2022-02-21 13:32:24 +01:00
//! The [`ConnectionHandler`] trait defines how each active connection to a
2019-07-04 14:47:59 +02:00
//! remote should behave: how to handle incoming substreams, which protocols
//! are supported, when to open a new outbound substream, etc.
//!
2022-10-24 04:00:20 +02:00
#![ cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg)) ]
2022-02-13 21:57:38 +01:00
mod connection ;
2023-05-04 05:47:11 +01:00
mod executor ;
2023-05-12 08:19:23 +02:00
mod stream ;
2023-05-04 05:47:11 +01:00
mod stream_protocol ;
2020-08-04 11:30:09 +02:00
#[ cfg(test) ]
mod test ;
2020-02-07 16:29:30 +01:00
mod upgrade ;
2019-07-04 14:47:59 +02:00
2021-12-09 03:00:47 -08:00
pub mod behaviour ;
2021-11-15 14:17:23 +01:00
pub mod dial_opts ;
2022-10-06 03:50:11 +11:00
pub mod dummy ;
2022-02-21 13:32:24 +01:00
pub mod handler ;
2022-10-06 03:50:11 +11:00
pub mod keep_alive ;
2023-06-08 21:34:33 -04:00
mod listen_opts ;
2019-07-04 14:47:59 +02:00
2022-11-13 10:59:14 +11:00
/// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro.
#[ doc(hidden) ]
pub mod derive_prelude {
2022-11-17 09:28:40 +00:00
pub use crate ::behaviour ::AddressChange ;
pub use crate ::behaviour ::ConnectionClosed ;
pub use crate ::behaviour ::ConnectionEstablished ;
pub use crate ::behaviour ::DialFailure ;
pub use crate ::behaviour ::ExpiredListenAddr ;
2023-05-24 09:52:16 +02:00
pub use crate ::behaviour ::ExternalAddrConfirmed ;
pub use crate ::behaviour ::ExternalAddrExpired ;
2022-11-17 09:28:40 +00:00
pub use crate ::behaviour ::FromSwarm ;
pub use crate ::behaviour ::ListenFailure ;
pub use crate ::behaviour ::ListenerClosed ;
pub use crate ::behaviour ::ListenerError ;
2023-05-24 09:52:16 +02:00
pub use crate ::behaviour ::NewExternalAddrCandidate ;
2022-11-17 09:28:40 +00:00
pub use crate ::behaviour ::NewListenAddr ;
pub use crate ::behaviour ::NewListener ;
2023-01-18 19:56:32 +11:00
pub use crate ::connection ::ConnectionId ;
2023-02-24 10:43:33 +11:00
pub use crate ::ConnectionDenied ;
2022-11-13 10:59:14 +11:00
pub use crate ::ConnectionHandler ;
2023-02-24 10:43:33 +11:00
pub use crate ::ConnectionHandlerSelect ;
2022-11-13 10:59:14 +11:00
pub use crate ::DialError ;
pub use crate ::NetworkBehaviour ;
pub use crate ::PollParameters ;
2023-02-24 10:43:33 +11:00
pub use crate ::THandler ;
2023-02-14 14:09:29 +13:00
pub use crate ::THandlerInEvent ;
2023-02-24 10:43:33 +11:00
pub use crate ::THandlerOutEvent ;
2023-03-24 14:43:49 +01:00
pub use crate ::ToSwarm ;
2023-01-23 23:31:30 +11:00
pub use either ::Either ;
2022-11-13 10:59:14 +11:00
pub use futures ::prelude as futures ;
pub use libp2p_core ::transport ::ListenerId ;
pub use libp2p_core ::ConnectedPoint ;
2023-02-24 10:43:33 +11:00
pub use libp2p_core ::Endpoint ;
2022-11-13 10:59:14 +11:00
pub use libp2p_core ::Multiaddr ;
2023-03-13 01:46:58 +11:00
pub use libp2p_identity ::PeerId ;
2022-11-13 10:59:14 +11:00
}
2019-07-04 14:47:59 +02:00
pub use behaviour ::{
2023-05-24 09:52:16 +02:00
AddressChange , CloseConnection , ConnectionClosed , DialFailure , ExpiredListenAddr ,
ExternalAddrExpired , ExternalAddresses , FromSwarm , ListenAddresses , ListenFailure ,
ListenerClosed , ListenerError , NetworkBehaviour , NewExternalAddrCandidate , NewListenAddr ,
NotifyHandler , PollParameters , ToSwarm ,
2019-07-04 14:47:59 +02:00
} ;
2023-05-08 12:54:53 +02:00
pub use connection ::pool ::ConnectionCounters ;
2023-05-08 16:36:30 +02:00
pub use connection ::{ ConnectionError , ConnectionId , SupportedProtocols } ;
2022-11-15 15:26:03 +01:00
pub use executor ::Executor ;
2022-02-21 13:32:24 +01:00
pub use handler ::{
2023-05-08 10:55:17 +02:00
ConnectionHandler , ConnectionHandlerEvent , ConnectionHandlerSelect , KeepAlive , OneShotHandler ,
OneShotHandlerConfig , StreamUpgradeError , SubstreamProtocol ,
2019-07-04 14:47:59 +02:00
} ;
2022-11-13 10:59:14 +11:00
#[ cfg(feature = " macros " ) ]
pub use libp2p_swarm_derive ::NetworkBehaviour ;
2023-06-08 21:34:33 -04:00
pub use listen_opts ::ListenOpts ;
2023-05-12 08:19:23 +02:00
pub use stream ::Stream ;
2023-05-04 05:47:11 +01:00
pub use stream_protocol ::{ InvalidProtocol , StreamProtocol } ;
2021-08-11 13:12:12 +02:00
2023-05-24 09:52:16 +02:00
use crate ::behaviour ::ExternalAddrConfirmed ;
2023-02-24 10:43:33 +11:00
use crate ::handler ::UpgradeInfoSend ;
2022-11-03 05:47:00 +11:00
use connection ::pool ::{ EstablishedConnection , Pool , PoolConfig , PoolEvent } ;
use connection ::IncomingInfo ;
2023-02-24 09:25:27 +11:00
use connection ::{
PendingConnectionError , PendingInboundConnectionError , PendingOutboundConnectionError ,
} ;
2021-11-15 14:17:23 +01:00
use dial_opts ::{ DialOpts , PeerCondition } ;
2023-05-24 16:32:59 +02:00
use futures ::{ prelude ::* , stream ::FusedStream } ;
2021-07-31 06:21:21 +10:00
use libp2p_core ::{
2022-07-04 04:16:57 +02:00
connection ::ConnectedPoint ,
2023-05-04 05:47:11 +01:00
multiaddr ,
2021-07-31 06:21:21 +10:00
muxing ::StreamMuxerBox ,
2022-07-04 04:16:57 +02:00
transport ::{ self , ListenerId , TransportError , TransportEvent } ,
2023-05-12 08:19:23 +02:00
Endpoint , Multiaddr , Transport ,
2021-08-11 13:12:12 +02:00
} ;
2023-03-13 01:46:58 +11:00
use libp2p_identity ::PeerId ;
2019-04-04 12:25:42 -03:00
use smallvec ::SmallVec ;
2022-07-04 04:16:57 +02:00
use std ::collections ::{ HashMap , HashSet } ;
2021-10-14 18:05:07 +02:00
use std ::num ::{ NonZeroU32 , NonZeroU8 , NonZeroUsize } ;
2021-03-18 14:55:33 +01:00
use std ::{
2021-11-26 10:48:12 -05:00
convert ::TryFrom ,
2021-03-18 14:55:33 +01:00
error , fmt , io ,
pin ::Pin ,
task ::{ Context , Poll } ,
2021-08-11 13:12:12 +02:00
} ;
2019-04-04 12:25:42 -03:00
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
/// Substream for which a protocol has been chosen.
///
/// Implements the [`AsyncRead`](futures::io::AsyncRead) and
/// [`AsyncWrite`](futures::io::AsyncWrite) traits.
2023-05-12 08:19:23 +02:00
#[ deprecated(note = " The 'substream' terminology is deprecated. Use 'Stream' instead " ) ]
pub type NegotiatedSubstream = Stream ;
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
2021-08-09 15:29:58 +02:00
/// Event generated by the [`NetworkBehaviour`] that the swarm will report back.
2023-05-14 12:58:08 +02:00
type TBehaviourOutEvent < TBehaviour > = < TBehaviour as NetworkBehaviour > ::ToSwarm ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// [`ConnectionHandler`] of the [`NetworkBehaviour`] for all the protocols the [`NetworkBehaviour`]
2021-08-09 15:29:58 +02:00
/// supports.
2023-05-08 10:30:29 +02:00
pub type THandler < TBehaviour > = < TBehaviour as NetworkBehaviour > ::ConnectionHandler ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom event that can be received by the [`ConnectionHandler`] of the
2021-08-09 15:29:58 +02:00
/// [`NetworkBehaviour`].
2023-05-14 12:58:08 +02:00
pub type THandlerInEvent < TBehaviour > = < THandler < TBehaviour > as ConnectionHandler > ::FromBehaviour ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`].
2023-05-14 12:58:08 +02:00
pub type THandlerOutEvent < TBehaviour > = < THandler < TBehaviour > as ConnectionHandler > ::ToBehaviour ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`].
2023-02-24 10:43:33 +11:00
pub type THandlerErr < TBehaviour > = < THandler < TBehaviour > as ConnectionHandler > ::Error ;
2021-08-09 15:29:58 +02:00
2020-01-07 11:57:00 +01:00
/// Event generated by the `Swarm`.
#[ derive(Debug) ]
2021-08-09 15:29:58 +02:00
pub enum SwarmEvent < TBehaviourOutEvent , THandlerErr > {
2020-01-07 11:57:00 +01:00
/// Event generated by the `NetworkBehaviour`.
2021-08-09 15:29:58 +02:00
Behaviour ( TBehaviourOutEvent ) ,
2020-03-26 18:02:37 +01:00
/// A connection to the given peer has been opened.
ConnectionEstablished {
/// Identity of the peer that we have connected to.
peer_id : PeerId ,
2023-05-17 07:19:53 +02:00
/// Identifier of the connection.
connection_id : ConnectionId ,
2020-03-26 18:02:37 +01:00
/// Endpoint of the connection that has been opened.
endpoint : ConnectedPoint ,
/// Number of established connections to this peer, including the one that has just been
/// opened.
num_established : NonZeroU32 ,
2021-10-14 18:05:07 +02:00
/// [`Some`] when the new connection is an outgoing connection.
/// Addresses are dialed concurrently. Contains the addresses and errors
/// of dial attempts that failed before the one successful dial.
concurrent_dial_errors : Option < Vec < ( Multiaddr , TransportError < io ::Error > ) > > ,
time to establish connection (#3134)
Implementing #2745 , adding a metric to break down time from connection pending to connection established, per protocol stack.
````
$curl -s http://127.0.0.1:42183/metrics | grep nt_duration
# HELP libp2p_swarm_connection_establishment_duration Time it took (locally) to finish establishing connections.
# TYPE libp2p_swarm_connection_establishment_duration histogram
libp2p_swarm_connection_establishment_duration_sum{role="Listener",protocols="/ip4/tcp"} 0.007
libp2p_swarm_connection_establishment_duration_count{role="Listener",protocols="/ip4/tcp"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.001"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.002"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.004"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.008"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.016"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.032"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.064"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.128"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.256"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.512"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="+Inf"} 1
lbl@chomp:~lbl
$curl -s http://127.0.0.1:34283/metrics | grep nt_duration
# HELP libp2p_swarm_connection_establishment_duration Time it took (locally) to finish establishing connections.
# TYPE libp2p_swarm_connection_establishment_duration histogram
libp2p_swarm_connection_establishment_duration_sum{role="Dialer",protocols="/ip4/tcp"} 0.009
libp2p_swarm_connection_establishment_duration_count{role="Dialer",protocols="/ip4/tcp"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.001"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.002"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.004"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.008"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.016"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.032"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.064"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.128"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.256"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.512"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="+Inf"} 1
````
2022-12-12 09:40:36 -05:00
/// How long it took to establish this connection
established_in : std ::time ::Duration ,
2020-03-26 18:02:37 +01:00
} ,
2020-08-04 11:30:09 +02:00
/// A connection with the given peer has been closed,
/// possibly as a result of an error.
2020-03-26 18:02:37 +01:00
ConnectionClosed {
/// Identity of the peer that we have connected to.
peer_id : PeerId ,
2023-05-17 07:19:53 +02:00
/// Identifier of the connection.
connection_id : ConnectionId ,
2020-03-26 18:02:37 +01:00
/// Endpoint of the connection that has been closed.
endpoint : ConnectedPoint ,
/// Number of other remaining connections to this same peer.
num_established : u32 ,
2020-08-04 11:30:09 +02:00
/// Reason for the disconnection, if it was not a successful
/// active close.
2022-02-18 11:32:58 +01:00
cause : Option < ConnectionError < THandlerErr > > ,
2020-03-26 18:02:37 +01:00
} ,
/// A new connection arrived on a listener and is in the process of protocol negotiation.
///
2023-05-08 07:50:27 +02:00
/// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) or
2020-03-26 18:02:37 +01:00
/// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event will later be
/// generated for this connection.
IncomingConnection {
2023-05-17 07:19:53 +02:00
/// Identifier of the connection.
connection_id : ConnectionId ,
2020-03-26 18:02:37 +01:00
/// Local connection address.
/// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr)
/// event.
local_addr : Multiaddr ,
/// Address used to send back data to the remote.
send_back_addr : Multiaddr ,
} ,
2023-01-27 10:23:55 +11:00
/// An error happened on an inbound connection during its initial handshake.
2020-03-26 18:02:37 +01:00
///
/// This can include, for example, an error during the handshake of the encryption layer, or
/// the connection unexpectedly closed.
IncomingConnectionError {
2023-05-17 07:19:53 +02:00
/// Identifier of the connection.
connection_id : ConnectionId ,
2020-03-26 18:02:37 +01:00
/// Local connection address.
/// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr)
/// event.
local_addr : Multiaddr ,
/// Address used to send back data to the remote.
send_back_addr : Multiaddr ,
/// The error that happened.
2023-01-27 10:23:55 +11:00
error : ListenError ,
2021-10-14 18:05:07 +02:00
} ,
2023-01-27 10:23:55 +11:00
/// An error happened on an outbound connection.
2021-10-14 18:05:07 +02:00
OutgoingConnectionError {
2023-05-17 07:19:53 +02:00
/// Identifier of the connection.
connection_id : ConnectionId ,
2021-10-14 18:05:07 +02:00
/// If known, [`PeerId`] of the peer we tried to reach.
peer_id : Option < PeerId > ,
/// Error that has been encountered.
error : DialError ,
2020-03-26 18:02:37 +01:00
} ,
/// One of our listeners has reported a new local listening address.
2021-07-08 11:41:33 +02:00
NewListenAddr {
/// The listener that is listening on the new address.
listener_id : ListenerId ,
/// The new address that is being listened on.
address : Multiaddr ,
} ,
2020-03-26 18:02:37 +01:00
/// One of our listeners has reported the expiration of a listening address.
2021-07-08 11:41:33 +02:00
ExpiredListenAddr {
/// The listener that is no longer listening on the address.
listener_id : ListenerId ,
/// The expired address.
address : Multiaddr ,
} ,
2020-03-26 18:02:37 +01:00
/// One of the listeners gracefully closed.
ListenerClosed {
2021-07-08 11:41:33 +02:00
/// The listener that closed.
listener_id : ListenerId ,
2020-03-26 18:02:37 +01:00
/// The addresses that the listener was listening on. These addresses are now considered
/// expired, similar to if a [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event
/// has been generated for each of them.
addresses : Vec < Multiaddr > ,
/// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err`
/// if the stream produced an error.
reason : Result < ( ) , io ::Error > ,
} ,
/// One of the listeners reported a non-fatal error.
ListenerError {
2021-07-08 11:41:33 +02:00
/// The listener that errored.
listener_id : ListenerId ,
2020-03-26 18:02:37 +01:00
/// The listener error.
error : io ::Error ,
2020-01-07 11:57:00 +01:00
} ,
2022-01-14 13:17:45 +01:00
/// A new dialing attempt has been initiated by the [`NetworkBehaviour`]
/// implementation.
2020-03-31 15:41:13 +02:00
///
2021-10-14 18:05:07 +02:00
/// A [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) event is
/// reported if the dialing attempt succeeds, otherwise a
/// [`OutgoingConnectionError`](SwarmEvent::OutgoingConnectionError) event
2022-01-14 13:17:45 +01:00
/// is reported.
2023-05-17 07:19:53 +02:00
Dialing {
/// Identity of the peer that we are connecting to.
peer_id : Option < PeerId > ,
/// Identifier of the connection.
connection_id : ConnectionId ,
} ,
2020-01-07 11:57:00 +01:00
}
2023-03-08 20:36:35 +11:00
impl < TBehaviourOutEvent , THandlerErr > SwarmEvent < TBehaviourOutEvent , THandlerErr > {
/// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail.
#[ allow(clippy::result_large_err) ]
pub fn try_into_behaviour_event ( self ) -> Result < TBehaviourOutEvent , Self > {
match self {
SwarmEvent ::Behaviour ( inner ) = > Ok ( inner ) ,
other = > Err ( other ) ,
}
}
}
2019-04-07 18:34:14 -03:00
/// Contains the state of the network, plus the way it should behave.
2021-06-14 20:41:44 +02:00
///
2021-08-09 15:29:58 +02:00
/// Note: Needs to be polled via `<Swarm as Stream>` in order to make
2021-06-14 20:41:44 +02:00
/// progress.
2021-08-09 15:29:58 +02:00
pub struct Swarm < TBehaviour >
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
where
2021-08-09 15:29:58 +02:00
TBehaviour : NetworkBehaviour ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
{
2022-07-04 04:16:57 +02:00
/// [`Transport`] for dialing remote peers and listening for incoming connection.
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2022-02-13 21:57:38 +01:00
/// The nodes currently active.
2022-12-23 11:13:34 +11:00
pool : Pool < THandler < TBehaviour > > ,
2019-04-04 12:25:42 -03:00
2022-02-13 21:57:38 +01:00
/// The local peer ID.
local_peer_id : PeerId ,
2019-04-04 12:25:42 -03:00
/// Handles which nodes to connect to and how to handle the events sent back by the protocol
/// handlers.
behaviour : TBehaviour ,
/// List of protocols that the behaviour says it supports.
supported_protocols : SmallVec < [ Vec < u8 > ; 16 ] > ,
2023-05-24 09:52:16 +02:00
confirmed_external_addr : HashSet < Multiaddr > ,
2022-07-04 04:16:57 +02:00
/// Multiaddresses that our listeners are listening on,
listened_addrs : HashMap < ListenerId , SmallVec < [ Multiaddr ; 1 ] > > ,
2019-04-04 12:25:42 -03:00
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
/// Pending event to be delivered to connection handlers
/// (or dropped if the peer disconnected) before the `behaviour`
/// can be polled again.
2021-08-09 15:29:58 +02:00
pending_event : Option < ( PeerId , PendingNotifyHandler , THandlerInEvent < TBehaviour > ) > ,
2019-04-04 12:25:42 -03:00
}
2021-08-09 15:29:58 +02:00
impl < TBehaviour > Unpin for Swarm < TBehaviour > where TBehaviour : NetworkBehaviour { }
2019-09-16 11:08:44 +02:00
2021-08-09 15:29:58 +02:00
impl < TBehaviour > Swarm < TBehaviour >
where
TBehaviour : NetworkBehaviour ,
2019-04-04 12:25:42 -03:00
{
2022-02-13 21:57:38 +01:00
/// Returns information about the connections underlying the [`Swarm`].
2021-03-18 14:55:33 +01:00
pub fn network_info ( & self ) -> NetworkInfo {
2022-02-13 21:57:38 +01:00
let num_peers = self . pool . num_peers ( ) ;
let connection_counters = self . pool . counters ( ) . clone ( ) ;
NetworkInfo {
num_peers ,
connection_counters ,
}
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
}
2019-04-04 12:25:42 -03:00
/// Starts listening on the given address.
/// Returns an error if the address is not supported.
2021-07-08 11:41:33 +02:00
///
/// Listeners report their new listening addresses as [`SwarmEvent::NewListenAddr`].
/// Depending on the underlying transport, one listener may have multiple listening addresses.
2021-03-18 14:55:33 +01:00
pub fn listen_on ( & mut self , addr : Multiaddr ) -> Result < ListenerId , TransportError < io ::Error > > {
2023-06-08 21:34:33 -04:00
let opts = ListenOpts ::new ( addr ) ;
let id = opts . listener_id ( ) ;
self . add_listener ( opts ) ? ;
2021-03-24 17:21:53 +01:00
Ok ( id )
2019-04-04 12:25:42 -03:00
}
2019-08-13 15:41:12 +02:00
/// Remove some listener.
2020-01-14 12:03:10 +01:00
///
2021-10-11 22:38:55 +02:00
/// Returns `true` if there was a listener with this ID, `false`
/// otherwise.
2022-07-04 04:16:57 +02:00
pub fn remove_listener ( & mut self , listener_id : ListenerId ) -> bool {
self . transport . remove_listener ( listener_id )
2019-08-13 15:41:12 +02:00
}
2021-11-15 14:17:23 +01:00
/// Dial a known or unknown peer.
///
/// See also [`DialOpts`].
///
/// ```
2023-03-13 20:53:14 +01:00
/// # use libp2p_swarm::SwarmBuilder;
2021-11-15 14:17:23 +01:00
/// # use libp2p_swarm::dial_opts::{DialOpts, PeerCondition};
2023-06-06 21:22:50 +02:00
/// # use libp2p_core::{Multiaddr, Transport};
2021-11-15 14:17:23 +01:00
/// # use libp2p_core::transport::dummy::DummyTransport;
2022-10-06 03:50:11 +11:00
/// # use libp2p_swarm::dummy;
2023-06-06 21:22:50 +02:00
/// # use libp2p_identity::PeerId;
2021-11-15 14:17:23 +01:00
/// #
2023-03-13 20:53:14 +01:00
/// let mut swarm = SwarmBuilder::without_executor(
2023-01-12 11:21:02 +00:00
/// DummyTransport::new().boxed(),
/// dummy::Behaviour,
/// PeerId::random(),
2023-03-13 20:53:14 +01:00
/// ).build();
2021-11-15 14:17:23 +01:00
///
/// // Dial a known peer.
/// swarm.dial(PeerId::random());
///
/// // Dial an unknown peer.
/// swarm.dial("/ip6/::1/tcp/12345".parse::<Multiaddr>().unwrap());
/// ```
pub fn dial ( & mut self , opts : impl Into < DialOpts > ) -> Result < ( ) , DialError > {
2023-02-14 14:09:29 +13:00
let dial_opts = opts . into ( ) ;
2021-08-31 17:00:51 +02:00
2023-06-08 03:38:18 +02:00
let peer_id = dial_opts . get_peer_id ( ) ;
2022-12-23 03:44:58 +11:00
let condition = dial_opts . peer_condition ( ) ;
2023-02-14 14:09:29 +13:00
let connection_id = dial_opts . connection_id ( ) ;
2022-12-23 03:44:58 +11:00
let should_dial = match ( condition , peer_id ) {
( PeerCondition ::Always , _ ) = > true ,
( PeerCondition ::Disconnected , None ) = > true ,
( PeerCondition ::NotDialing , None ) = > true ,
( PeerCondition ::Disconnected , Some ( peer_id ) ) = > ! self . pool . is_connected ( peer_id ) ,
( PeerCondition ::NotDialing , Some ( peer_id ) ) = > ! self . pool . is_dialing ( peer_id ) ,
} ;
2021-11-15 14:17:23 +01:00
2022-12-23 03:44:58 +11:00
if ! should_dial {
let e = DialError ::DialPeerConditionFalse ( condition ) ;
2021-11-15 14:17:23 +01:00
2023-01-12 11:21:02 +00:00
self . behaviour
. on_swarm_event ( FromSwarm ::DialFailure ( DialFailure {
peer_id ,
error : & e ,
2023-02-14 14:09:29 +13:00
connection_id ,
2023-01-12 11:21:02 +00:00
} ) ) ;
2022-02-13 21:57:38 +01:00
2022-12-23 03:44:58 +11:00
return Err ( e ) ;
}
2021-11-15 14:17:23 +01:00
2022-12-23 03:44:58 +11:00
let addresses = {
2023-02-24 10:43:33 +11:00
let mut addresses_from_opts = dial_opts . get_addresses ( ) ;
2022-02-13 21:57:38 +01:00
2023-02-24 10:43:33 +11:00
match self . behaviour . handle_pending_outbound_connection (
connection_id ,
peer_id ,
addresses_from_opts . as_slice ( ) ,
dial_opts . role_override ( ) ,
) {
Ok ( addresses ) = > {
if dial_opts . extend_addresses_through_behaviour ( ) {
addresses_from_opts . extend ( addresses )
} else {
let num_addresses = addresses . len ( ) ;
if num_addresses > 0 {
log ::debug! ( " discarding {num_addresses} addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection {connection_id:?} " )
}
}
}
Err ( cause ) = > {
let error = DialError ::Denied { cause } ;
self . behaviour
. on_swarm_event ( FromSwarm ::DialFailure ( DialFailure {
peer_id ,
error : & error ,
connection_id ,
} ) ) ;
return Err ( error ) ;
2021-11-15 14:17:23 +01:00
}
2022-12-23 03:44:58 +11:00
}
2022-01-13 18:07:07 +01:00
2022-12-23 03:44:58 +11:00
let mut unique_addresses = HashSet ::new ( ) ;
2023-02-24 10:43:33 +11:00
addresses_from_opts . retain ( | addr | {
2022-12-23 03:44:58 +11:00
! self . listened_addrs . values ( ) . flatten ( ) . any ( | a | a = = addr )
& & unique_addresses . insert ( addr . clone ( ) )
} ) ;
2023-02-24 10:43:33 +11:00
if addresses_from_opts . is_empty ( ) {
2022-12-23 03:44:58 +11:00
let error = DialError ::NoAddresses ;
2023-01-12 11:21:02 +00:00
self . behaviour
. on_swarm_event ( FromSwarm ::DialFailure ( DialFailure {
peer_id ,
error : & error ,
2023-02-14 14:09:29 +13:00
connection_id ,
2023-01-12 11:21:02 +00:00
} ) ) ;
2022-12-23 03:44:58 +11:00
return Err ( error ) ;
2022-02-13 21:57:38 +01:00
} ;
2020-05-12 13:10:18 +02:00
2023-02-24 10:43:33 +11:00
addresses_from_opts
2022-12-23 03:44:58 +11:00
} ;
2022-04-06 20:23:16 +02:00
let dials = addresses
2022-12-23 03:44:58 +11:00
. into_iter ( )
2022-04-06 20:23:16 +02:00
. map ( | a | match p2p_addr ( peer_id , a ) {
Ok ( address ) = > {
2022-12-23 03:44:58 +11:00
let dial = match dial_opts . role_override ( ) {
2022-07-04 04:16:57 +02:00
Endpoint ::Dialer = > self . transport . dial ( address . clone ( ) ) ,
Endpoint ::Listener = > self . transport . dial_as_listener ( address . clone ( ) ) ,
2022-04-06 20:23:16 +02:00
} ;
match dial {
Ok ( fut ) = > fut
2022-04-19 12:13:45 +02:00
. map ( | r | ( address , r . map_err ( TransportError ::Other ) ) )
2022-04-06 20:23:16 +02:00
. boxed ( ) ,
Err ( err ) = > futures ::future ::ready ( ( address , Err ( err ) ) ) . boxed ( ) ,
}
}
Err ( address ) = > futures ::future ::ready ( (
address . clone ( ) ,
Err ( TransportError ::MultiaddrNotSupported ( address ) ) ,
) )
. boxed ( ) ,
} )
. collect ( ) ;
2023-05-08 12:54:53 +02:00
self . pool . add_outgoing (
2022-04-06 20:23:16 +02:00
dials ,
2022-02-13 21:57:38 +01:00
peer_id ,
2022-12-23 03:44:58 +11:00
dial_opts . role_override ( ) ,
dial_opts . dial_concurrency_override ( ) ,
2023-02-14 14:09:29 +13:00
connection_id ,
2023-05-08 12:54:53 +02:00
) ;
2023-01-12 11:21:02 +00:00
2023-05-08 12:54:53 +02:00
Ok ( ( ) )
2019-04-04 12:25:42 -03:00
}
/// Returns an iterator that produces the list of addresses we're listening on.
2021-03-18 14:55:33 +01:00
pub fn listeners ( & self ) -> impl Iterator < Item = & Multiaddr > {
2022-07-04 04:16:57 +02:00
self . listened_addrs . values ( ) . flatten ( )
2019-04-04 12:25:42 -03:00
}
/// Returns the peer ID of the swarm passed as parameter.
2021-03-18 14:55:33 +01:00
pub fn local_peer_id ( & self ) -> & PeerId {
2022-02-13 21:57:38 +01:00
& self . local_peer_id
2019-04-04 12:25:42 -03:00
}
2023-07-07 04:38:54 +02:00
/// List all **confirmed** external address for the local node.
2023-05-24 09:52:16 +02:00
pub fn external_addresses ( & self ) -> impl Iterator < Item = & Multiaddr > {
self . confirmed_external_addr . iter ( )
2020-11-18 15:52:33 +01:00
}
2023-06-08 21:34:33 -04:00
fn add_listener ( & mut self , opts : ListenOpts ) -> Result < ( ) , TransportError < io ::Error > > {
let addr = opts . address ( ) ;
let listener_id = opts . listener_id ( ) ;
if let Err ( e ) = self . transport . listen_on ( listener_id , addr . clone ( ) ) {
self . behaviour
. on_swarm_event ( FromSwarm ::ListenerError ( behaviour ::ListenerError {
listener_id ,
err : & e ,
} ) ) ;
return Err ( e ) ;
}
self . behaviour
. on_swarm_event ( FromSwarm ::NewListener ( behaviour ::NewListener {
listener_id ,
} ) ) ;
Ok ( ( ) )
}
2023-05-24 09:52:16 +02:00
/// Add a **confirmed** external address for the local node.
2020-11-18 15:52:33 +01:00
///
2023-05-24 09:52:16 +02:00
/// This function should only be called with addresses that are guaranteed to be reachable.
/// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrConfirmed`].
pub fn add_external_address ( & mut self , a : Multiaddr ) {
self . behaviour
. on_swarm_event ( FromSwarm ::ExternalAddrConfirmed ( ExternalAddrConfirmed {
addr : & a ,
} ) ) ;
self . confirmed_external_addr . insert ( a ) ;
2019-04-04 12:25:42 -03:00
}
2019-04-18 19:17:14 +03:00
2023-05-24 09:52:16 +02:00
/// Remove an external address for the local node.
2020-11-18 15:52:33 +01:00
///
2023-05-24 09:52:16 +02:00
/// The address is broadcast to all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`].
pub fn remove_external_address ( & mut self , addr : & Multiaddr ) {
self . behaviour
. on_swarm_event ( FromSwarm ::ExternalAddrExpired ( ExternalAddrExpired { addr } ) ) ;
self . confirmed_external_addr . remove ( addr ) ;
2019-06-12 16:21:39 +02:00
}
2021-07-03 00:35:51 +07:00
/// Disconnects a peer by its peer ID, closing all connections to said peer.
///
/// Returns `Ok(())` if there was one or more established connections to the peer.
///
2021-08-09 15:29:58 +02:00
/// Note: Closing a connection via [`Swarm::disconnect_peer_id`] does
2022-02-21 13:32:24 +01:00
/// not inform the corresponding [`ConnectionHandler`].
/// Closing a connection via a [`ConnectionHandler`] can be done either in a
/// collaborative manner across [`ConnectionHandler`]s
/// with [`ConnectionHandler::connection_keep_alive`] or directly with
/// [`ConnectionHandlerEvent::Close`].
2022-05-03 13:11:48 +02:00
#[ allow(clippy::result_unit_err) ]
2021-07-03 00:35:51 +07:00
pub fn disconnect_peer_id ( & mut self , peer_id : PeerId ) -> Result < ( ) , ( ) > {
2022-02-15 10:19:55 +01:00
let was_connected = self . pool . is_connected ( peer_id ) ;
self . pool . disconnect ( peer_id ) ;
2021-07-03 00:35:51 +07:00
2022-02-15 10:19:55 +01:00
if was_connected {
Ok ( ( ) )
} else {
Err ( ( ) )
}
2021-07-03 00:35:51 +07:00
}
2023-05-24 14:33:18 +02:00
/// Attempt to gracefully close a connection.
///
/// Closing a connection is asynchronous but this function will return immediately.
/// A [`SwarmEvent::ConnectionClosed`] event will be emitted once the connection is actually closed.
///
/// # Returns
///
/// - `true` if the connection was established and is now being closed.
/// - `false` if the connection was not found or is no longer established.
pub fn close_connection ( & mut self , connection_id : ConnectionId ) -> bool {
if let Some ( established ) = self . pool . get_established ( connection_id ) {
established . start_close ( ) ;
return true ;
}
false
}
2022-02-13 21:57:38 +01:00
/// Checks whether there is an established connection to a peer.
2021-03-18 14:55:33 +01:00
pub fn is_connected ( & self , peer_id : & PeerId ) -> bool {
2022-02-13 21:57:38 +01:00
self . pool . is_connected ( * peer_id )
2021-03-18 14:55:33 +01:00
}
2021-12-13 17:17:12 +01:00
/// Returns the currently connected peers.
pub fn connected_peers ( & self ) -> impl Iterator < Item = & PeerId > {
2022-02-13 21:57:38 +01:00
self . pool . iter_connected ( )
2021-12-13 17:17:12 +01:00
}
2021-03-18 14:55:33 +01:00
/// Returns a reference to the provided [`NetworkBehaviour`].
pub fn behaviour ( & self ) -> & TBehaviour {
& self . behaviour
}
/// Returns a mutable reference to the provided [`NetworkBehaviour`].
pub fn behaviour_mut ( & mut self ) -> & mut TBehaviour {
& mut self . behaviour
2020-11-26 21:01:38 +01:00
}
2022-05-05 20:15:24 +02:00
fn handle_pool_event (
& mut self ,
2022-12-23 11:13:34 +11:00
event : PoolEvent < THandler < TBehaviour > > ,
2023-05-14 12:58:08 +02:00
) -> Option < SwarmEvent < TBehaviour ::ToSwarm , THandlerErr < TBehaviour > > > {
2022-05-05 20:15:24 +02:00
match event {
PoolEvent ::ConnectionEstablished {
peer_id ,
id ,
endpoint ,
2023-02-14 14:09:29 +13:00
connection ,
2022-05-05 20:15:24 +02:00
concurrent_dial_errors ,
time to establish connection (#3134)
Implementing #2745 , adding a metric to break down time from connection pending to connection established, per protocol stack.
````
$curl -s http://127.0.0.1:42183/metrics | grep nt_duration
# HELP libp2p_swarm_connection_establishment_duration Time it took (locally) to finish establishing connections.
# TYPE libp2p_swarm_connection_establishment_duration histogram
libp2p_swarm_connection_establishment_duration_sum{role="Listener",protocols="/ip4/tcp"} 0.007
libp2p_swarm_connection_establishment_duration_count{role="Listener",protocols="/ip4/tcp"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.001"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.002"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.004"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.008"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.016"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.032"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.064"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.128"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.256"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="0.512"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Listener",protocols="/ip4/tcp",le="+Inf"} 1
lbl@chomp:~lbl
$curl -s http://127.0.0.1:34283/metrics | grep nt_duration
# HELP libp2p_swarm_connection_establishment_duration Time it took (locally) to finish establishing connections.
# TYPE libp2p_swarm_connection_establishment_duration histogram
libp2p_swarm_connection_establishment_duration_sum{role="Dialer",protocols="/ip4/tcp"} 0.009
libp2p_swarm_connection_establishment_duration_count{role="Dialer",protocols="/ip4/tcp"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.001"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.002"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.004"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.008"} 0
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.016"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.032"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.064"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.128"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.256"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="0.512"} 1
libp2p_swarm_connection_establishment_duration_bucket{role="Dialer",protocols="/ip4/tcp",le="+Inf"} 1
````
2022-12-12 09:40:36 -05:00
established_in ,
2022-05-05 20:15:24 +02:00
} = > {
2023-02-24 10:43:33 +11:00
let handler = match endpoint . clone ( ) {
ConnectedPoint ::Dialer {
address ,
role_override ,
} = > {
match self . behaviour . handle_established_outbound_connection (
id ,
peer_id ,
& address ,
role_override ,
) {
Ok ( handler ) = > handler ,
Err ( cause ) = > {
let dial_error = DialError ::Denied { cause } ;
self . behaviour . on_swarm_event ( FromSwarm ::DialFailure (
DialFailure {
connection_id : id ,
error : & dial_error ,
peer_id : Some ( peer_id ) ,
} ,
) ) ;
return Some ( SwarmEvent ::OutgoingConnectionError {
peer_id : Some ( peer_id ) ,
2023-05-17 07:19:53 +02:00
connection_id : id ,
2023-02-24 10:43:33 +11:00
error : dial_error ,
} ) ;
}
}
}
ConnectedPoint ::Listener {
local_addr ,
send_back_addr ,
} = > {
match self . behaviour . handle_established_inbound_connection (
id ,
peer_id ,
& local_addr ,
& send_back_addr ,
) {
Ok ( handler ) = > handler ,
Err ( cause ) = > {
let listen_error = ListenError ::Denied { cause } ;
self . behaviour . on_swarm_event ( FromSwarm ::ListenFailure (
ListenFailure {
local_addr : & local_addr ,
send_back_addr : & send_back_addr ,
error : & listen_error ,
connection_id : id ,
} ,
) ) ;
return Some ( SwarmEvent ::IncomingConnectionError {
2023-05-17 07:19:53 +02:00
connection_id : id ,
2023-02-24 10:43:33 +11:00
send_back_addr ,
local_addr ,
error : listen_error ,
} ) ;
}
}
}
} ;
2023-02-14 14:09:29 +13:00
2023-02-24 10:43:33 +11:00
let supported_protocols = handler
. listen_protocol ( )
. upgrade ( )
. protocol_info ( )
2023-05-04 05:47:11 +01:00
. map ( | p | p . as_ref ( ) . as_bytes ( ) . to_vec ( ) )
2023-02-24 10:43:33 +11:00
. collect ( ) ;
2023-02-14 14:09:29 +13:00
let other_established_connection_ids = self
. pool
. iter_established_connections_of_peer ( & peer_id )
. collect ::< Vec < _ > > ( ) ;
let num_established = NonZeroU32 ::new (
u32 ::try_from ( other_established_connection_ids . len ( ) + 1 ) . unwrap ( ) ,
)
. expect ( " n + 1 is always non-zero; qed " ) ;
self . pool
. spawn_connection ( id , peer_id , & endpoint , connection , handler ) ;
log ::debug! (
" Connection established: {:?} {:?}; Total (peer): {}. " ,
peer_id ,
endpoint ,
num_established ,
) ;
let failed_addresses = concurrent_dial_errors
. as_ref ( )
. map ( | es | {
es . iter ( )
. map ( | ( a , _ ) | a )
. cloned ( )
. collect ::< Vec < Multiaddr > > ( )
} )
. unwrap_or_default ( ) ;
self . behaviour
. on_swarm_event ( FromSwarm ::ConnectionEstablished (
behaviour ::ConnectionEstablished {
2022-05-05 20:15:24 +02:00
peer_id ,
2023-02-14 14:09:29 +13:00
connection_id : id ,
endpoint : & endpoint ,
failed_addresses : & failed_addresses ,
other_established : other_established_connection_ids . len ( ) ,
} ,
) ) ;
2023-02-24 10:43:33 +11:00
self . supported_protocols = supported_protocols ;
2023-02-14 14:09:29 +13:00
return Some ( SwarmEvent ::ConnectionEstablished {
peer_id ,
2023-05-17 07:19:53 +02:00
connection_id : id ,
2023-02-14 14:09:29 +13:00
num_established ,
endpoint ,
concurrent_dial_errors ,
established_in ,
} ) ;
2022-05-05 20:15:24 +02:00
}
PoolEvent ::PendingOutboundConnectionError {
2023-02-14 14:09:29 +13:00
id : connection_id ,
2022-05-05 20:15:24 +02:00
error ,
peer ,
} = > {
let error = error . into ( ) ;
2023-01-12 11:21:02 +00:00
self . behaviour
. on_swarm_event ( FromSwarm ::DialFailure ( DialFailure {
peer_id : peer ,
error : & error ,
2023-02-14 14:09:29 +13:00
connection_id ,
2023-01-12 11:21:02 +00:00
} ) ) ;
2022-05-05 20:15:24 +02:00
if let Some ( peer ) = peer {
log ::debug! ( " Connection attempt to {:?} failed with {:?}. " , peer , error , ) ;
} else {
log ::debug! ( " Connection attempt to unknown peer failed with {:?} " , error ) ;
}
return Some ( SwarmEvent ::OutgoingConnectionError {
peer_id : peer ,
2023-05-17 07:19:53 +02:00
connection_id ,
2022-05-05 20:15:24 +02:00
error ,
} ) ;
}
PoolEvent ::PendingInboundConnectionError {
2023-02-14 14:09:29 +13:00
id ,
2022-05-05 20:15:24 +02:00
send_back_addr ,
local_addr ,
error ,
} = > {
2023-01-27 10:23:55 +11:00
let error = error . into ( ) ;
2022-05-05 20:15:24 +02:00
log ::debug! ( " Incoming connection failed: {:?} " , error ) ;
self . behaviour
2023-01-12 11:21:02 +00:00
. on_swarm_event ( FromSwarm ::ListenFailure ( ListenFailure {
local_addr : & local_addr ,
send_back_addr : & send_back_addr ,
2023-01-27 10:23:55 +11:00
error : & error ,
2023-02-14 14:09:29 +13:00
connection_id : id ,
2023-01-12 11:21:02 +00:00
} ) ) ;
2022-05-05 20:15:24 +02:00
return Some ( SwarmEvent ::IncomingConnectionError {
2023-05-17 07:19:53 +02:00
connection_id : id ,
2022-02-13 21:57:38 +01:00
local_addr ,
send_back_addr ,
2022-05-05 20:15:24 +02:00
error ,
} ) ;
}
PoolEvent ::ConnectionClosed {
id ,
connected ,
error ,
remaining_established_connection_ids ,
handler ,
..
} = > {
if let Some ( error ) = error . as_ref ( ) {
2022-02-13 21:57:38 +01:00
log ::debug! (
2022-05-05 20:15:24 +02:00
" Connection closed with error {:?}: {:?}; Total (peer): {}. " ,
error ,
connected ,
remaining_established_connection_ids . len ( )
2022-02-13 21:57:38 +01:00
) ;
2022-05-05 20:15:24 +02:00
} else {
log ::debug! (
" Connection closed: {:?}; Total (peer): {}. " ,
connected ,
remaining_established_connection_ids . len ( )
2022-02-13 21:57:38 +01:00
) ;
}
2022-05-05 20:15:24 +02:00
let peer_id = connected . peer_id ;
let endpoint = connected . endpoint ;
let num_established =
u32 ::try_from ( remaining_established_connection_ids . len ( ) ) . unwrap ( ) ;
2023-02-14 14:09:29 +13:00
self . behaviour
. on_swarm_event ( FromSwarm ::ConnectionClosed ( ConnectionClosed {
peer_id ,
connection_id : id ,
endpoint : & endpoint ,
handler ,
remaining_established : num_established as usize ,
} ) ) ;
2022-05-05 20:15:24 +02:00
return Some ( SwarmEvent ::ConnectionClosed {
2022-05-04 10:33:40 +02:00
peer_id ,
2023-05-17 07:19:53 +02:00
connection_id : id ,
2022-05-04 10:33:40 +02:00
endpoint ,
2022-05-05 20:15:24 +02:00
cause : error ,
num_established ,
} ) ;
}
PoolEvent ::ConnectionEvent { peer_id , id , event } = > {
2023-02-14 14:09:29 +13:00
self . behaviour
. on_connection_handler_event ( peer_id , id , event ) ;
2022-05-05 20:15:24 +02:00
}
PoolEvent ::AddressChange {
peer_id ,
id ,
new_endpoint ,
old_endpoint ,
} = > {
2023-02-14 14:09:29 +13:00
self . behaviour
. on_swarm_event ( FromSwarm ::AddressChange ( AddressChange {
peer_id ,
connection_id : id ,
old : & old_endpoint ,
new : & new_endpoint ,
} ) ) ;
2022-05-05 20:15:24 +02:00
}
}
2022-02-13 21:57:38 +01:00
2022-05-05 20:15:24 +02:00
None
}
2022-02-13 21:57:38 +01:00
2022-07-04 04:16:57 +02:00
fn handle_transport_event (
2022-05-05 20:15:24 +02:00
& mut self ,
2022-07-04 04:16:57 +02:00
event : TransportEvent <
< transport ::Boxed < ( PeerId , StreamMuxerBox ) > as Transport > ::ListenerUpgrade ,
io ::Error ,
> ,
2023-05-14 12:58:08 +02:00
) -> Option < SwarmEvent < TBehaviour ::ToSwarm , THandlerErr < TBehaviour > > > {
2022-05-05 20:15:24 +02:00
match event {
2022-07-04 04:16:57 +02:00
TransportEvent ::Incoming {
2022-05-05 20:15:24 +02:00
listener_id : _ ,
upgrade ,
local_addr ,
send_back_addr ,
} = > {
2023-02-14 14:09:29 +13:00
let connection_id = ConnectionId ::next ( ) ;
2023-02-24 10:43:33 +11:00
match self . behaviour . handle_pending_inbound_connection (
connection_id ,
& local_addr ,
& send_back_addr ,
) {
Ok ( ( ) ) = > { }
Err ( cause ) = > {
let listen_error = ListenError ::Denied { cause } ;
self . behaviour
. on_swarm_event ( FromSwarm ::ListenFailure ( ListenFailure {
local_addr : & local_addr ,
send_back_addr : & send_back_addr ,
error : & listen_error ,
connection_id ,
} ) ) ;
return Some ( SwarmEvent ::IncomingConnectionError {
2023-05-17 07:19:53 +02:00
connection_id ,
2023-02-24 10:43:33 +11:00
local_addr ,
send_back_addr ,
error : listen_error ,
} ) ;
}
}
2023-05-08 12:54:53 +02:00
self . pool . add_incoming (
2022-05-05 20:15:24 +02:00
upgrade ,
IncomingInfo {
local_addr : & local_addr ,
send_back_addr : & send_back_addr ,
} ,
2023-02-14 14:09:29 +13:00
connection_id ,
2023-05-08 12:54:53 +02:00
) ;
Some ( SwarmEvent ::IncomingConnection {
2023-05-17 07:19:53 +02:00
connection_id ,
2023-05-08 12:54:53 +02:00
local_addr ,
send_back_addr ,
} )
2022-05-05 20:15:24 +02:00
}
2022-07-04 04:16:57 +02:00
TransportEvent ::NewAddress {
2022-05-05 20:15:24 +02:00
listener_id ,
listen_addr ,
} = > {
log ::debug! ( " Listener {:?}; New address: {:?} " , listener_id , listen_addr ) ;
2022-07-04 04:16:57 +02:00
let addrs = self . listened_addrs . entry ( listener_id ) . or_default ( ) ;
if ! addrs . contains ( & listen_addr ) {
addrs . push ( listen_addr . clone ( ) )
2022-02-13 21:57:38 +01:00
}
2022-05-05 20:15:24 +02:00
self . behaviour
2023-01-12 11:21:02 +00:00
. on_swarm_event ( FromSwarm ::NewListenAddr ( NewListenAddr {
listener_id ,
addr : & listen_addr ,
} ) ) ;
2023-05-08 12:54:53 +02:00
Some ( SwarmEvent ::NewListenAddr {
2022-05-05 20:15:24 +02:00
listener_id ,
address : listen_addr ,
2023-05-08 12:54:53 +02:00
} )
2022-05-05 20:15:24 +02:00
}
2022-07-04 04:16:57 +02:00
TransportEvent ::AddressExpired {
2022-05-05 20:15:24 +02:00
listener_id ,
listen_addr ,
} = > {
log ::debug! (
" Listener {:?}; Expired address {:?}. " ,
listener_id ,
listen_addr
) ;
2022-07-04 04:16:57 +02:00
if let Some ( addrs ) = self . listened_addrs . get_mut ( & listener_id ) {
addrs . retain ( | a | a ! = & listen_addr ) ;
}
2022-05-05 20:15:24 +02:00
self . behaviour
2023-01-12 11:21:02 +00:00
. on_swarm_event ( FromSwarm ::ExpiredListenAddr ( ExpiredListenAddr {
listener_id ,
addr : & listen_addr ,
} ) ) ;
2023-05-08 12:54:53 +02:00
Some ( SwarmEvent ::ExpiredListenAddr {
2022-05-05 20:15:24 +02:00
listener_id ,
address : listen_addr ,
2023-05-08 12:54:53 +02:00
} )
2022-05-05 20:15:24 +02:00
}
2022-07-04 04:16:57 +02:00
TransportEvent ::ListenerClosed {
2022-05-05 20:15:24 +02:00
listener_id ,
reason ,
} = > {
log ::debug! ( " Listener {:?}; Closed by {:?}. " , listener_id , reason ) ;
2022-07-04 04:16:57 +02:00
let addrs = self . listened_addrs . remove ( & listener_id ) . unwrap_or_default ( ) ;
for addr in addrs . iter ( ) {
2023-01-12 11:21:02 +00:00
self . behaviour . on_swarm_event ( FromSwarm ::ExpiredListenAddr (
ExpiredListenAddr { listener_id , addr } ,
) ) ;
2022-02-13 21:57:38 +01:00
}
2023-01-12 11:21:02 +00:00
self . behaviour
. on_swarm_event ( FromSwarm ::ListenerClosed ( ListenerClosed {
listener_id ,
reason : reason . as_ref ( ) . copied ( ) ,
} ) ) ;
2023-05-08 12:54:53 +02:00
Some ( SwarmEvent ::ListenerClosed {
2022-05-05 20:15:24 +02:00
listener_id ,
2022-07-04 04:16:57 +02:00
addresses : addrs . to_vec ( ) ,
2022-05-05 20:15:24 +02:00
reason ,
2023-05-08 12:54:53 +02:00
} )
2022-05-05 20:15:24 +02:00
}
2022-07-04 04:16:57 +02:00
TransportEvent ::ListenerError { listener_id , error } = > {
2023-01-12 11:21:02 +00:00
self . behaviour
. on_swarm_event ( FromSwarm ::ListenerError ( ListenerError {
listener_id ,
err : & error ,
} ) ) ;
2023-05-08 12:54:53 +02:00
Some ( SwarmEvent ::ListenerError { listener_id , error } )
2022-05-05 20:15:24 +02:00
}
}
}
fn handle_behaviour_event (
& mut self ,
2023-05-14 12:58:08 +02:00
event : ToSwarm < TBehaviour ::ToSwarm , THandlerInEvent < TBehaviour > > ,
) -> Option < SwarmEvent < TBehaviour ::ToSwarm , THandlerErr < TBehaviour > > > {
2022-05-05 20:15:24 +02:00
match event {
2023-03-24 14:43:49 +01:00
ToSwarm ::GenerateEvent ( event ) = > return Some ( SwarmEvent ::Behaviour ( event ) ) ,
ToSwarm ::Dial { opts } = > {
2023-06-08 03:38:18 +02:00
let peer_id = opts . get_peer_id ( ) ;
2023-05-17 07:19:53 +02:00
let connection_id = opts . connection_id ( ) ;
2023-02-14 14:09:29 +13:00
if let Ok ( ( ) ) = self . dial ( opts ) {
2023-05-17 07:19:53 +02:00
return Some ( SwarmEvent ::Dialing {
2023-06-08 03:38:18 +02:00
peer_id ,
2023-05-17 07:19:53 +02:00
connection_id ,
} ) ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
2023-06-08 21:34:33 -04:00
ToSwarm ::ListenOn { opts } = > {
// Error is dispatched internally, safe to ignore.
let _ = self . add_listener ( opts ) ;
}
ToSwarm ::RemoveListener { id } = > {
self . remove_listener ( id ) ;
}
2023-03-24 14:43:49 +01:00
ToSwarm ::NotifyHandler {
2022-05-05 20:15:24 +02:00
peer_id ,
handler ,
event ,
} = > {
assert! ( self . pending_event . is_none ( ) ) ;
let handler = match handler {
NotifyHandler ::One ( connection ) = > PendingNotifyHandler ::One ( connection ) ,
NotifyHandler ::Any = > {
let ids = self
. pool
. iter_established_connections_of_peer ( & peer_id )
. collect ( ) ;
PendingNotifyHandler ::Any ( ids )
2019-04-10 10:29:21 +02:00
}
2022-05-05 20:15:24 +02:00
} ;
self . pending_event = Some ( ( peer_id , handler , event ) ) ;
}
2023-05-24 09:52:16 +02:00
ToSwarm ::NewExternalAddrCandidate ( addr ) = > {
self . behaviour
. on_swarm_event ( FromSwarm ::NewExternalAddrCandidate (
NewExternalAddrCandidate { addr : & addr } ,
) ) ;
// Generate more candidates based on address translation.
// For TCP without port-reuse, the observed address contains an ephemeral port which needs to be replaced by the port of a listen address.
2022-05-05 20:15:24 +02:00
let translated_addresses = {
let mut addrs : Vec < _ > = self
2022-07-04 04:16:57 +02:00
. listened_addrs
. values ( )
. flatten ( )
2023-05-24 09:52:16 +02:00
. filter_map ( | server | self . transport . address_translation ( server , & addr ) )
2022-05-05 20:15:24 +02:00
. collect ( ) ;
// remove duplicates
addrs . sort_unstable ( ) ;
addrs . dedup ( ) ;
addrs
} ;
for addr in translated_addresses {
2023-05-24 09:52:16 +02:00
self . behaviour
. on_swarm_event ( FromSwarm ::NewExternalAddrCandidate (
NewExternalAddrCandidate { addr : & addr } ,
) ) ;
2019-04-10 10:29:21 +02:00
}
2022-05-05 20:15:24 +02:00
}
2023-05-24 09:52:16 +02:00
ToSwarm ::ExternalAddrConfirmed ( addr ) = > {
self . add_external_address ( addr ) ;
}
ToSwarm ::ExternalAddrExpired ( addr ) = > {
self . remove_external_address ( & addr ) ;
}
2023-03-24 14:43:49 +01:00
ToSwarm ::CloseConnection {
2022-05-05 20:15:24 +02:00
peer_id ,
connection ,
} = > match connection {
CloseConnection ::One ( connection_id ) = > {
if let Some ( conn ) = self . pool . get_established ( connection_id ) {
conn . start_close ( ) ;
2020-03-23 20:31:38 +10:00
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
CloseConnection ::All = > {
self . pool . disconnect ( peer_id ) ;
}
} ,
}
None
}
/// Internal function used by everything event-related.
///
/// Polls the `Swarm` for the next event.
fn poll_next_event (
mut self : Pin < & mut Self > ,
cx : & mut Context < '_ > ,
2023-05-14 12:58:08 +02:00
) -> Poll < SwarmEvent < TBehaviour ::ToSwarm , THandlerErr < TBehaviour > > > {
2022-05-05 20:15:24 +02:00
// We use a `this` variable because the compiler can't mutably borrow multiple times
// across a `Deref`.
let this = & mut * self ;
2019-04-04 12:25:42 -03:00
2022-05-05 20:15:24 +02:00
// This loop polls the components below in a prioritized order.
//
// 1. [`NetworkBehaviour`]
// 2. Connection [`Pool`]
// 3. [`ListenersStream`]
//
// (1) is polled before (2) to prioritize local work over work coming from a remote.
//
// (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections.
loop {
match this . pending_event . take ( ) {
// Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous
// iteration to the connection handler(s).
Some ( ( peer_id , handler , event ) ) = > match handler {
2022-02-13 21:57:38 +01:00
PendingNotifyHandler ::One ( conn_id ) = > {
2022-05-05 20:15:24 +02:00
match this . pool . get_established ( conn_id ) {
2022-11-03 05:47:00 +11:00
Some ( conn ) = > match notify_one ( conn , event , cx ) {
2022-05-05 20:15:24 +02:00
None = > continue ,
Some ( event ) = > {
this . pending_event = Some ( ( peer_id , handler , event ) ) ;
2021-10-26 22:23:55 +02:00
}
2022-05-05 20:15:24 +02:00
} ,
None = > continue ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
}
2019-07-09 16:47:24 +02:00
}
2022-02-13 21:57:38 +01:00
PendingNotifyHandler ::Any ( ids ) = > {
2022-12-23 11:13:34 +11:00
match notify_any ::< _ , TBehaviour > ( ids , & mut this . pool , event , cx ) {
2022-05-05 20:15:24 +02:00
None = > continue ,
Some ( ( event , ids ) ) = > {
let handler = PendingNotifyHandler ::Any ( ids ) ;
this . pending_event = Some ( ( peer_id , handler , event ) ) ;
2022-02-13 21:57:38 +01:00
}
}
}
2022-05-05 20:15:24 +02:00
} ,
// No pending event. Allow the [`NetworkBehaviour`] to make progress.
None = > {
let behaviour_poll = {
let mut parameters = SwarmPollParameters {
supported_protocols : & this . supported_protocols ,
} ;
this . behaviour . poll ( cx , & mut parameters )
} ;
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
2022-05-05 20:15:24 +02:00
match behaviour_poll {
Poll ::Pending = > { }
Poll ::Ready ( behaviour_event ) = > {
if let Some ( swarm_event ) = this . handle_behaviour_event ( behaviour_event )
{
return Poll ::Ready ( swarm_event ) ;
}
2019-04-04 12:25:42 -03:00
2022-05-05 20:15:24 +02:00
continue ;
2021-08-31 17:00:51 +02:00
}
2019-04-18 19:17:14 +03:00
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
2022-02-13 21:57:38 +01:00
2022-05-05 20:15:24 +02:00
// Poll the known peers.
match this . pool . poll ( cx ) {
Poll ::Pending = > { }
Poll ::Ready ( pool_event ) = > {
if let Some ( swarm_event ) = this . handle_pool_event ( pool_event ) {
return Poll ::Ready ( swarm_event ) ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
continue ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
} ;
// Poll the listener(s) for new connections.
2022-07-04 04:16:57 +02:00
match Pin ::new ( & mut this . transport ) . poll ( cx ) {
2022-05-05 20:15:24 +02:00
Poll ::Pending = > { }
2022-07-04 04:16:57 +02:00
Poll ::Ready ( transport_event ) = > {
if let Some ( swarm_event ) = this . handle_transport_event ( transport_event ) {
2022-05-05 20:15:24 +02:00
return Poll ::Ready ( swarm_event ) ;
2022-02-13 21:57:38 +01:00
}
2022-05-05 20:15:24 +02:00
continue ;
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
return Poll ::Pending ;
2019-04-04 12:25:42 -03:00
}
}
}
2020-12-17 11:01:45 +01:00
/// Connection to notify of a pending event.
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
///
2020-12-17 11:01:45 +01:00
/// The connection IDs out of which to notify one of an event are captured at
/// the time the behaviour emits the event, in order not to forward the event to
/// a new connection which the behaviour may not have been aware of at the time
/// it issued the request for sending it.
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
enum PendingNotifyHandler {
One ( ConnectionId ) ,
Any ( SmallVec < [ ConnectionId ; 10 ] > ) ,
}
/// Notify a single connection of an event.
///
/// Returns `Some` with the given event if the connection is not currently
/// ready to receive another event, in which case the current task is
/// scheduled to be woken up.
///
/// Returns `None` if the connection is closing or the event has been
/// successfully sent, in either case the event is consumed.
2022-11-03 05:47:00 +11:00
fn notify_one < THandlerInEvent > (
conn : & mut EstablishedConnection < THandlerInEvent > ,
2021-08-09 15:29:58 +02:00
event : THandlerInEvent ,
2020-07-27 20:27:33 +00:00
cx : & mut Context < '_ > ,
2021-08-09 15:29:58 +02:00
) -> Option < THandlerInEvent > {
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
match conn . poll_ready_notify_handler ( cx ) {
Poll ::Pending = > Some ( event ) ,
Poll ::Ready ( Err ( ( ) ) ) = > None , // connection is closing
Poll ::Ready ( Ok ( ( ) ) ) = > {
// Can now only fail if connection is closing.
let _ = conn . notify_handler ( event ) ;
None
}
}
}
/// Notify any one of a given list of connections of a peer of an event.
///
/// Returns `Some` with the given event and a new list of connections if
/// none of the given connections was able to receive the event but at
/// least one of them is not closing, in which case the current task
/// is scheduled to be woken up. The returned connections are those which
/// may still become ready to receive another event.
///
/// Returns `None` if either all connections are closing or the event
/// was successfully sent to a handler, in either case the event is consumed.
2022-12-23 11:13:34 +11:00
fn notify_any < THandler , TBehaviour > (
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
ids : SmallVec < [ ConnectionId ; 10 ] > ,
2022-12-23 11:13:34 +11:00
pool : & mut Pool < THandler > ,
2021-08-11 12:41:28 +02:00
event : THandlerInEvent < TBehaviour > ,
2020-07-27 20:27:33 +00:00
cx : & mut Context < '_ > ,
2021-08-11 12:41:28 +02:00
) -> Option < ( THandlerInEvent < TBehaviour > , SmallVec < [ ConnectionId ; 10 ] > ) >
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
where
2021-08-11 12:41:28 +02:00
TBehaviour : NetworkBehaviour ,
2023-02-24 10:43:33 +11:00
THandler : ConnectionHandler <
2023-05-14 12:58:08 +02:00
FromBehaviour = THandlerInEvent < TBehaviour > ,
ToBehaviour = THandlerOutEvent < TBehaviour > ,
2021-08-11 13:12:12 +02:00
> ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
{
let mut pending = SmallVec ::new ( ) ;
let mut event = Some ( event ) ; // (1)
for id in ids . into_iter ( ) {
2022-11-03 05:47:00 +11:00
if let Some ( conn ) = pool . get_established ( id ) {
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
match conn . poll_ready_notify_handler ( cx ) {
Poll ::Pending = > pending . push ( id ) ,
Poll ::Ready ( Err ( ( ) ) ) = > { } // connection is closing
Poll ::Ready ( Ok ( ( ) ) ) = > {
let e = event . take ( ) . expect ( " by (1),(2) " ) ;
if let Err ( e ) = conn . notify_handler ( e ) {
event = Some ( e ) // (2)
} else {
break ;
}
}
}
}
}
event . and_then ( | e | {
if ! pending . is_empty ( ) {
Some ( ( e , pending ) )
} else {
None
2021-08-11 13:12:12 +02:00
}
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
} )
}
2021-08-09 15:29:58 +02:00
/// Stream of events returned by [`Swarm`].
2021-06-14 20:41:44 +02:00
///
/// Includes events from the [`NetworkBehaviour`] as well as events about
/// connection and listener status. See [`SwarmEvent`] for details.
///
/// Note: This stream is infinite and it is guaranteed that
2023-05-12 08:19:23 +02:00
/// [`futures::Stream::poll_next`] will never return `Poll::Ready(None)`.
impl < TBehaviour > futures ::Stream for Swarm < TBehaviour >
2021-08-09 15:29:58 +02:00
where
TBehaviour : NetworkBehaviour ,
2020-01-07 11:57:00 +01:00
{
2021-08-09 15:29:58 +02:00
type Item = SwarmEvent < TBehaviourOutEvent < TBehaviour > , THandlerErr < TBehaviour > > ;
2020-01-07 11:57:00 +01:00
2020-07-27 20:27:33 +00:00
fn poll_next ( mut self : Pin < & mut Self > , cx : & mut Context < '_ > ) -> Poll < Option < Self ::Item > > {
2021-06-14 20:41:44 +02:00
self . as_mut ( ) . poll_next_event ( cx ) . map ( Some )
2020-01-07 11:57:00 +01:00
}
}
2021-06-14 20:41:44 +02:00
/// The stream of swarm events never terminates, so we can implement fused for it.
2021-08-09 15:29:58 +02:00
impl < TBehaviour > FusedStream for Swarm < TBehaviour >
where
TBehaviour : NetworkBehaviour ,
2020-02-18 10:33:01 +01:00
{
fn is_terminated ( & self ) -> bool {
false
}
}
2019-04-04 12:25:42 -03:00
/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to.
// TODO: #[derive(Debug)]
2019-06-18 10:23:26 +02:00
pub struct SwarmPollParameters < ' a > {
2019-04-04 12:25:42 -03:00
supported_protocols : & ' a [ Vec < u8 > ] ,
}
2019-06-18 10:23:26 +02:00
impl < ' a > PollParameters for SwarmPollParameters < ' a > {
2022-02-28 10:05:17 +01:00
type SupportedProtocolsIter = std ::iter ::Cloned < std ::slice ::Iter < ' a , std ::vec ::Vec < u8 > > > ;
2019-06-18 10:23:26 +02:00
fn supported_protocols ( & self ) -> Self ::SupportedProtocolsIter {
2022-02-28 10:05:17 +01:00
self . supported_protocols . iter ( ) . cloned ( )
2019-04-04 12:25:42 -03:00
}
}
2022-02-13 21:57:38 +01:00
/// A [`SwarmBuilder`] provides an API for configuring and constructing a [`Swarm`].
2020-10-31 01:51:27 +11:00
pub struct SwarmBuilder < TBehaviour > {
2019-04-04 12:25:42 -03:00
local_peer_id : PeerId ,
2020-10-31 01:51:27 +11:00
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2019-04-04 12:25:42 -03:00
behaviour : TBehaviour ,
2022-02-13 21:57:38 +01:00
pool_config : PoolConfig ,
2019-04-04 12:25:42 -03:00
}
2020-10-31 01:51:27 +11:00
impl < TBehaviour > SwarmBuilder < TBehaviour >
2021-08-09 15:29:58 +02:00
where
TBehaviour : NetworkBehaviour ,
2019-04-04 12:25:42 -03:00
{
2022-11-15 15:26:03 +01:00
/// Creates a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and
/// executor. The `Swarm` with its underlying `Network` is obtained via
/// [`SwarmBuilder::build`].
pub fn with_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
executor : impl Executor + Send + 'static ,
) -> Self {
Self {
local_peer_id ,
transport ,
behaviour ,
pool_config : PoolConfig ::new ( Some ( Box ::new ( executor ) ) ) ,
}
}
2023-03-13 20:53:14 +01:00
/// Sets executor to the `wasm` executor.
/// Background tasks will be executed by the browser on the next micro-tick.
///
/// Spawning a task is similar too:
/// ```typescript
/// function spawn(task: () => Promise<void>) {
/// task()
/// }
/// ```
#[ cfg(feature = " wasm-bindgen " ) ]
pub fn with_wasm_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self ::with_executor (
transport ,
behaviour ,
local_peer_id ,
crate ::executor ::WasmBindgenExecutor ,
)
}
2022-11-23 19:19:22 +01:00
/// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a
/// `tokio` executor.
#[ cfg(all(
feature = " tokio " ,
not ( any ( target_os = " emscripten " , target_os = " wasi " , target_os = " unknown " ) )
) ) ]
pub fn with_tokio_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self ::with_executor (
transport ,
behaviour ,
local_peer_id ,
crate ::executor ::TokioExecutor ,
)
}
/// Builds a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and a
/// `async-std` executor.
#[ cfg(all(
feature = " async-std " ,
not ( any ( target_os = " emscripten " , target_os = " wasi " , target_os = " unknown " ) )
) ) ]
pub fn with_async_std_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self ::with_executor (
transport ,
behaviour ,
local_peer_id ,
crate ::executor ::AsyncStdExecutor ,
)
}
2022-11-15 15:26:03 +01:00
/// Creates a new [`SwarmBuilder`] from the given transport, behaviour and local peer ID. The
/// `Swarm` with its underlying `Network` is obtained via [`SwarmBuilder::build`].
///
/// ## ⚠️ Performance warning
/// All connections will be polled on the current task, thus quite bad performance
/// characteristics should be expected. Whenever possible use an executor and
/// [`SwarmBuilder::with_executor`].
pub fn without_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self {
local_peer_id ,
transport ,
behaviour ,
pool_config : PoolConfig ::new ( None ) ,
2019-04-04 12:25:42 -03:00
}
}
2020-05-15 14:40:10 +02:00
/// Configures the number of events from the [`NetworkBehaviour`] in
2022-02-21 13:32:24 +01:00
/// destination to the [`ConnectionHandler`] that can be buffered before
2020-05-15 14:40:10 +02:00
/// the [`Swarm`] has to wait. An individual buffer with this number of
/// events exists for each individual connection.
///
/// The ideal value depends on the executor used, the CPU speed, and the
/// volume of events. If this value is too low, then the [`Swarm`] will
/// be sleeping more often than necessary. Increasing this value increases
/// the overall memory usage.
pub fn notify_handler_buffer_size ( mut self , n : NonZeroUsize ) -> Self {
2022-02-13 21:57:38 +01:00
self . pool_config = self . pool_config . with_notify_handler_buffer_size ( n ) ;
2020-05-15 14:40:10 +02:00
self
}
2023-01-20 09:49:11 +11:00
/// Configures the size of the buffer for events sent by a [`ConnectionHandler`] to the
/// [`NetworkBehaviour`].
2020-05-15 14:40:10 +02:00
///
2023-01-20 09:49:11 +11:00
/// Each connection has its own buffer.
2020-05-15 14:40:10 +02:00
///
2023-01-20 09:49:11 +11:00
/// The ideal value depends on the executor used, the CPU speed and the volume of events.
/// If this value is too low, then the [`ConnectionHandler`]s will be sleeping more often
2020-05-15 14:40:10 +02:00
/// than necessary. Increasing this value increases the overall memory
/// usage, and more importantly the latency between the moment when an
/// event is emitted and the moment when it is received by the
/// [`NetworkBehaviour`].
2023-01-20 09:49:11 +11:00
pub fn per_connection_event_buffer_size ( mut self , n : usize ) -> Self {
self . pool_config = self . pool_config . with_per_connection_event_buffer_size ( n ) ;
2020-05-15 14:40:10 +02:00
self
}
2021-10-14 18:05:07 +02:00
/// Number of addresses concurrently dialed for a single outbound connection attempt.
pub fn dial_concurrency_factor ( mut self , factor : NonZeroU8 ) -> Self {
2022-02-13 21:57:38 +01:00
self . pool_config = self . pool_config . with_dial_concurrency_factor ( factor ) ;
2021-10-14 18:05:07 +02:00
self
}
2020-11-25 14:26:49 +01:00
/// Configures an override for the substream upgrade protocol to use.
///
/// The subtream upgrade protocol is the multistream-select protocol
/// used for protocol negotiation on substreams. Since a listener
/// supports all existing versions, the choice of upgrade protocol
/// only effects the "dialer", i.e. the peer opening a substream.
///
/// > **Note**: If configured, specific upgrade protocols for
/// > individual [`SubstreamProtocol`]s emitted by the `NetworkBehaviour`
/// > are ignored.
pub fn substream_upgrade_protocol_override ( mut self , v : libp2p_core ::upgrade ::Version ) -> Self {
2022-02-18 11:32:58 +01:00
self . pool_config = self . pool_config . with_substream_upgrade_protocol_override ( v ) ;
2020-11-25 14:26:49 +01:00
self
}
2022-10-03 19:01:45 -06:00
/// The maximum number of inbound streams concurrently negotiating on a
/// connection. New inbound streams exceeding the limit are dropped and thus
/// reset.
2022-06-08 11:48:46 +02:00
///
2022-10-03 19:01:45 -06:00
/// Note: This only enforces a limit on the number of concurrently
/// negotiating inbound streams. The total number of inbound streams on a
/// connection is the sum of negotiating and negotiated streams. A limit on
/// the total number of streams can be enforced at the
/// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level.
2022-06-08 11:48:46 +02:00
pub fn max_negotiating_inbound_streams ( mut self , v : usize ) -> Self {
self . pool_config = self . pool_config . with_max_negotiating_inbound_streams ( v ) ;
self
}
2020-03-31 15:41:13 +02:00
/// Builds a `Swarm` with the current configuration.
2023-02-24 10:43:33 +11:00
pub fn build ( self ) -> Swarm < TBehaviour > {
2021-08-09 15:29:58 +02:00
Swarm {
2022-02-13 21:57:38 +01:00
local_peer_id : self . local_peer_id ,
2022-07-04 04:16:57 +02:00
transport : self . transport ,
2023-05-08 12:54:53 +02:00
pool : Pool ::new ( self . local_peer_id , self . pool_config ) ,
2019-04-04 12:25:42 -03:00
behaviour : self . behaviour ,
2023-02-24 10:43:33 +11:00
supported_protocols : Default ::default ( ) ,
2023-05-24 09:52:16 +02:00
confirmed_external_addr : Default ::default ( ) ,
2022-07-04 04:16:57 +02:00
listened_addrs : HashMap ::new ( ) ,
2020-11-25 14:26:49 +01:00
pending_event : None ,
2019-04-04 12:25:42 -03:00
}
}
}
2023-01-27 10:23:55 +11:00
/// Possible errors when trying to establish or upgrade an outbound connection.
2021-10-14 18:05:07 +02:00
#[ derive(Debug) ]
2020-05-12 13:10:18 +02:00
pub enum DialError {
2023-01-26 11:20:23 +04:00
/// The peer identity obtained on the connection matches the local peer.
2023-02-24 10:43:33 +11:00
LocalPeerId {
endpoint : ConnectedPoint ,
} ,
2023-05-08 10:30:29 +02:00
/// No addresses have been provided by [`NetworkBehaviour::handle_pending_outbound_connection`] and [`DialOpts`].
2020-05-12 13:10:18 +02:00
NoAddresses ,
2021-11-15 14:17:23 +01:00
/// The provided [`dial_opts::PeerCondition`] evaluated to false and thus
/// the dial was aborted.
DialPeerConditionFalse ( dial_opts ::PeerCondition ) ,
2021-10-14 18:05:07 +02:00
/// Pending connection attempt has been aborted.
Aborted ,
2022-01-18 21:21:11 +01:00
/// The peer identity obtained on the connection did not match the one that was expected.
WrongPeerId {
obtained : PeerId ,
endpoint : ConnectedPoint ,
} ,
2023-02-24 10:43:33 +11:00
Denied {
cause : ConnectionDenied ,
} ,
2021-10-14 18:05:07 +02:00
/// An error occurred while negotiating the transport protocol(s) on a connection.
Transport ( Vec < ( Multiaddr , TransportError < io ::Error > ) > ) ,
2020-05-12 13:10:18 +02:00
}
2022-12-23 11:13:34 +11:00
impl From < PendingOutboundConnectionError > for DialError {
fn from ( error : PendingOutboundConnectionError ) -> Self {
2021-10-14 18:05:07 +02:00
match error {
PendingConnectionError ::Aborted = > DialError ::Aborted ,
2022-01-18 21:21:11 +01:00
PendingConnectionError ::WrongPeerId { obtained , endpoint } = > {
DialError ::WrongPeerId { obtained , endpoint }
}
2023-01-26 11:20:23 +04:00
PendingConnectionError ::LocalPeerId { endpoint } = > DialError ::LocalPeerId { endpoint } ,
2021-10-14 18:05:07 +02:00
PendingConnectionError ::Transport ( e ) = > DialError ::Transport ( e ) ,
}
}
}
2020-05-12 13:10:18 +02:00
impl fmt ::Display for DialError {
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
match self {
2020-08-04 11:30:09 +02:00
DialError ::NoAddresses = > write! ( f , " Dial error: no addresses for peer. " ) ,
2023-01-26 11:20:23 +04:00
DialError ::LocalPeerId { endpoint } = > write! (
f ,
" Dial error: tried to dial local peer id at {endpoint:?}. "
) ,
2021-08-31 17:00:51 +02:00
DialError ::DialPeerConditionFalse ( c ) = > {
2022-12-14 16:45:04 +01:00
write! ( f , " Dial error: condition {c:?} for dialing peer was false. " )
2021-08-31 17:00:51 +02:00
}
2021-10-14 18:05:07 +02:00
DialError ::Aborted = > write! (
f ,
" Dial error: Pending connection attempt has been aborted. "
) ,
2022-11-23 11:51:47 +11:00
DialError ::WrongPeerId { obtained , endpoint } = > write! (
f ,
2022-12-14 16:45:04 +01:00
" Dial error: Unexpected peer ID {obtained} at {endpoint:?}. "
2022-11-23 11:51:47 +11:00
) ,
DialError ::Transport ( errors ) = > {
write! ( f , " Failed to negotiate transport protocol(s): [ " ) ? ;
for ( addr , error ) in errors {
write! ( f , " ({addr} " ) ? ;
print_error_chain ( f , error ) ? ;
write! ( f , " ) " ) ? ;
}
write! ( f , " ] " ) ? ;
Ok ( ( ) )
}
2023-02-24 10:43:33 +11:00
DialError ::Denied { .. } = > {
write! ( f , " Dial error " )
}
2020-05-12 13:10:18 +02:00
}
}
}
2022-11-23 11:51:47 +11:00
fn print_error_chain ( f : & mut fmt ::Formatter < '_ > , e : & dyn error ::Error ) -> fmt ::Result {
write! ( f , " : {e} " ) ? ;
if let Some ( source ) = e . source ( ) {
print_error_chain ( f , source ) ? ;
}
Ok ( ( ) )
}
2020-05-12 13:10:18 +02:00
impl error ::Error for DialError {
fn source ( & self ) -> Option < & ( dyn error ::Error + 'static ) > {
match self {
2023-01-26 11:20:23 +04:00
DialError ::LocalPeerId { .. } = > None ,
2020-08-04 11:30:09 +02:00
DialError ::NoAddresses = > None ,
2021-08-31 17:00:51 +02:00
DialError ::DialPeerConditionFalse ( _ ) = > None ,
2021-10-14 18:05:07 +02:00
DialError ::Aborted = > None ,
2022-01-18 21:21:11 +01:00
DialError ::WrongPeerId { .. } = > None ,
2021-10-14 18:05:07 +02:00
DialError ::Transport ( _ ) = > None ,
2023-02-24 10:43:33 +11:00
DialError ::Denied { cause } = > Some ( cause ) ,
2020-05-12 13:10:18 +02:00
}
}
}
2023-01-27 10:23:55 +11:00
/// Possible errors when upgrading an inbound connection.
#[ derive(Debug) ]
pub enum ListenError {
/// Pending connection attempt has been aborted.
Aborted ,
/// The peer identity obtained on the connection did not match the one that was expected.
WrongPeerId {
obtained : PeerId ,
endpoint : ConnectedPoint ,
} ,
2023-02-24 10:43:33 +11:00
/// The connection was dropped because it resolved to our own [`PeerId`].
LocalPeerId {
endpoint : ConnectedPoint ,
} ,
Denied {
cause : ConnectionDenied ,
} ,
2023-01-27 10:23:55 +11:00
/// An error occurred while negotiating the transport protocol(s) on a connection.
Transport ( TransportError < io ::Error > ) ,
}
impl From < PendingInboundConnectionError > for ListenError {
fn from ( error : PendingInboundConnectionError ) -> Self {
match error {
PendingInboundConnectionError ::Transport ( inner ) = > ListenError ::Transport ( inner ) ,
PendingInboundConnectionError ::Aborted = > ListenError ::Aborted ,
PendingInboundConnectionError ::WrongPeerId { obtained , endpoint } = > {
ListenError ::WrongPeerId { obtained , endpoint }
}
PendingInboundConnectionError ::LocalPeerId { endpoint } = > {
ListenError ::LocalPeerId { endpoint }
}
}
}
}
impl fmt ::Display for ListenError {
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
match self {
ListenError ::Aborted = > write! (
f ,
" Listen error: Pending connection attempt has been aborted. "
) ,
ListenError ::WrongPeerId { obtained , endpoint } = > write! (
f ,
" Listen error: Unexpected peer ID {obtained} at {endpoint:?}. "
) ,
ListenError ::Transport ( _ ) = > {
write! ( f , " Listen error: Failed to negotiate transport protocol(s) " )
}
2023-02-24 10:43:33 +11:00
ListenError ::Denied { .. } = > {
write! ( f , " Listen error " )
}
2023-01-27 10:23:55 +11:00
ListenError ::LocalPeerId { endpoint } = > {
2023-02-24 10:43:33 +11:00
write! ( f , " Listen error: Local peer ID at {endpoint:?}. " )
2023-01-27 10:23:55 +11:00
}
}
}
}
impl error ::Error for ListenError {
fn source ( & self ) -> Option < & ( dyn error ::Error + 'static ) > {
match self {
ListenError ::WrongPeerId { .. } = > None ,
ListenError ::Transport ( err ) = > Some ( err ) ,
ListenError ::Aborted = > None ,
2023-02-24 10:43:33 +11:00
ListenError ::Denied { cause } = > Some ( cause ) ,
2023-01-27 10:23:55 +11:00
ListenError ::LocalPeerId { .. } = > None ,
}
}
}
2023-03-21 21:58:09 +01:00
/// A connection was denied.
///
/// To figure out which [`NetworkBehaviour`] denied the connection, use [`ConnectionDenied::downcast`].
2023-02-24 10:43:33 +11:00
#[ derive(Debug) ]
pub struct ConnectionDenied {
inner : Box < dyn error ::Error + Send + Sync + 'static > ,
}
impl ConnectionDenied {
pub fn new ( cause : impl error ::Error + Send + Sync + 'static ) -> Self {
Self {
inner : Box ::new ( cause ) ,
}
}
2023-03-21 16:04:53 +01:00
/// Attempt to downcast to a particular reason for why the connection was denied.
pub fn downcast < E > ( self ) -> Result < E , Self >
where
E : error ::Error + Send + Sync + 'static ,
{
let inner = self
. inner
. downcast ::< E > ( )
. map_err ( | inner | ConnectionDenied { inner } ) ? ;
Ok ( * inner )
}
2023-06-12 09:26:09 -06:00
/// Attempt to downcast to a particular reason for why the connection was denied.
pub fn downcast_ref < E > ( & self ) -> Option < & E >
where
E : error ::Error + Send + Sync + 'static ,
{
self . inner . downcast_ref ::< E > ( )
}
2023-02-24 10:43:33 +11:00
}
impl fmt ::Display for ConnectionDenied {
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
write! ( f , " connection denied " )
}
}
impl error ::Error for ConnectionDenied {
fn source ( & self ) -> Option < & ( dyn error ::Error + 'static ) > {
Some ( self . inner . as_ref ( ) )
}
}
2022-02-13 21:57:38 +01:00
/// Information about the connections obtained by [`Swarm::network_info()`].
#[ derive(Clone, Debug) ]
pub struct NetworkInfo {
/// The total number of connected peers.
num_peers : usize ,
/// Counters of ongoing network connections.
connection_counters : ConnectionCounters ,
}
impl NetworkInfo {
/// The number of connected peers, i.e. peers with whom at least
/// one established connection exists.
pub fn num_peers ( & self ) -> usize {
self . num_peers
}
/// Gets counters for ongoing network connections.
pub fn connection_counters ( & self ) -> & ConnectionCounters {
& self . connection_counters
}
}
2022-04-06 20:23:16 +02:00
/// Ensures a given `Multiaddr` is a `/p2p/...` address for the given peer.
///
/// If the given address is already a `p2p` address for the given peer,
/// i.e. the last encapsulated protocol is `/p2p/<peer-id>`, this is a no-op.
///
/// If the given address is already a `p2p` address for a different peer
/// than the one given, the given `Multiaddr` is returned as an `Err`.
///
/// If the given address is not yet a `p2p` address for the given peer,
/// the `/p2p/<peer-id>` protocol is appended to the returned address.
fn p2p_addr ( peer : Option < PeerId > , addr : Multiaddr ) -> Result < Multiaddr , Multiaddr > {
let peer = match peer {
Some ( p ) = > p ,
None = > return Ok ( addr ) ,
} ;
2023-06-08 03:38:18 +02:00
if let Some ( multiaddr ::Protocol ::P2p ( peer_id ) ) = addr . iter ( ) . last ( ) {
if peer_id ! = peer {
2022-04-06 20:23:16 +02:00
return Err ( addr ) ;
}
2023-06-08 03:38:18 +02:00
return Ok ( addr ) ;
2022-04-06 20:23:16 +02:00
}
2023-06-08 03:38:18 +02:00
Ok ( addr . with ( multiaddr ::Protocol ::P2p ( peer ) ) )
2022-04-06 20:23:16 +02:00
}
2020-02-07 16:29:30 +01:00
#[ cfg(test) ]
mod tests {
2020-08-04 11:30:09 +02:00
use super ::* ;
use crate ::test ::{ CallTraceBehaviour , MockBehaviour } ;
2022-02-13 21:57:38 +01:00
use futures ::executor ::block_on ;
2022-11-15 15:26:03 +01:00
use futures ::executor ::ThreadPool ;
2023-05-08 12:54:53 +02:00
use futures ::{ executor , future } ;
2022-02-13 21:57:38 +01:00
use libp2p_core ::multiaddr ::multiaddr ;
2022-11-23 11:51:47 +11:00
use libp2p_core ::transport ::memory ::MemoryTransportError ;
2022-07-04 04:16:57 +02:00
use libp2p_core ::transport ::TransportEvent ;
2023-06-05 17:16:04 +02:00
use libp2p_core ::Endpoint ;
2023-03-13 01:46:58 +11:00
use libp2p_core ::{ multiaddr , transport , upgrade } ;
use libp2p_identity as identity ;
2022-11-13 10:59:14 +11:00
use libp2p_plaintext as plaintext ;
use libp2p_yamux as yamux ;
2022-09-22 12:48:32 +04:00
use quickcheck ::* ;
2019-04-04 12:25:42 -03:00
2021-07-03 00:35:51 +07:00
// Test execution state.
// Connection => Disconnecting => Connecting.
enum State {
Connecting ,
Disconnecting ,
}
2022-02-13 21:57:38 +01:00
fn new_test_swarm < T , O > (
handler_proto : T ,
) -> SwarmBuilder < CallTraceBehaviour < MockBehaviour < T , O > > >
2020-08-04 11:30:09 +02:00
where
2022-02-21 13:32:24 +01:00
T : ConnectionHandler + Clone ,
2023-05-14 12:58:08 +02:00
T ::ToBehaviour : Clone ,
2020-08-04 11:30:09 +02:00
O : Send + 'static ,
{
2020-09-07 12:13:10 +02:00
let id_keys = identity ::Keypair ::generate_ed25519 ( ) ;
2021-08-31 17:00:51 +02:00
let local_public_key = id_keys . public ( ) ;
2020-09-07 12:13:10 +02:00
let transport = transport ::MemoryTransport ::default ( )
2020-08-04 11:30:09 +02:00
. upgrade ( upgrade ::Version ::V1 )
2021-08-31 17:00:51 +02:00
. authenticate ( plaintext ::PlainText2Config {
local_public_key : local_public_key . clone ( ) ,
} )
2023-05-01 04:25:52 +02:00
. multiplex ( yamux ::Config ::default ( ) )
2020-10-16 16:53:02 +02:00
. boxed ( ) ;
2020-09-07 12:13:10 +02:00
let behaviour = CallTraceBehaviour ::new ( MockBehaviour ::new ( handler_proto ) ) ;
2022-11-15 15:26:03 +01:00
match ThreadPool ::new ( ) . ok ( ) {
Some ( tp ) = > {
SwarmBuilder ::with_executor ( transport , behaviour , local_public_key . into ( ) , tp )
}
None = > SwarmBuilder ::without_executor ( transport , behaviour , local_public_key . into ( ) ) ,
}
2020-08-04 11:30:09 +02:00
}
2021-07-03 00:35:51 +07:00
fn swarms_connected < TBehaviour > (
swarm1 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
swarm2 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
num_connections : usize ,
) -> bool
where
TBehaviour : NetworkBehaviour ,
2023-02-24 10:43:33 +11:00
THandlerOutEvent < TBehaviour > : Clone ,
2021-07-03 00:35:51 +07:00
{
2022-03-02 12:10:57 +01:00
swarm1
. behaviour ( )
. num_connections_to_peer ( * swarm2 . local_peer_id ( ) )
= = num_connections
& & swarm2
. behaviour ( )
. num_connections_to_peer ( * swarm1 . local_peer_id ( ) )
= = num_connections
& & swarm1 . is_connected ( swarm2 . local_peer_id ( ) )
& & swarm2 . is_connected ( swarm1 . local_peer_id ( ) )
2021-07-03 00:35:51 +07:00
}
fn swarms_disconnected < TBehaviour : NetworkBehaviour > (
swarm1 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
swarm2 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
) -> bool
where
TBehaviour : NetworkBehaviour ,
2023-02-24 10:43:33 +11:00
THandlerOutEvent < TBehaviour > : Clone ,
2021-07-03 00:35:51 +07:00
{
2022-03-02 12:10:57 +01:00
swarm1
. behaviour ( )
. num_connections_to_peer ( * swarm2 . local_peer_id ( ) )
= = 0
& & swarm2
. behaviour ( )
. num_connections_to_peer ( * swarm1 . local_peer_id ( ) )
= = 0
& & ! swarm1 . is_connected ( swarm2 . local_peer_id ( ) )
& & ! swarm2 . is_connected ( swarm1 . local_peer_id ( ) )
2021-07-03 00:35:51 +07:00
}
/// Establishes multiple connections between two peers,
2021-08-09 15:29:58 +02:00
/// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`].
2021-07-03 00:35:51 +07:00
///
2023-01-12 11:21:02 +00:00
/// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`]
/// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`]
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_swarm_disconnect ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2021-11-26 10:48:12 -05:00
swarm1 . listen_on ( addr1 . clone ( ) ) . unwrap ( ) ;
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let mut reconnected = false ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
2021-08-11 13:12:12 +02:00
swarm2
2021-11-26 10:48:12 -05:00
. disconnect_peer_id ( swarm1_id )
2021-07-03 00:35:51 +07:00
. expect ( " Error disconnecting " ) ;
state = State ::Disconnecting ;
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
2022-03-02 12:10:57 +01:00
if swarms_disconnected ( & swarm1 , & swarm2 ) {
2021-07-03 00:35:51 +07:00
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
reconnected = true ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm2 . dial ( addr1 . clone ( ) ) . unwrap ( ) ;
2020-08-04 11:30:09 +02:00
}
2021-07-03 00:35:51 +07:00
state = State ::Connecting ;
2020-08-04 11:30:09 +02:00
}
}
2021-08-11 13:12:12 +02:00
}
2020-08-04 11:30:09 +02:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
2021-07-03 00:35:51 +07:00
/// Establishes multiple connections between two peers,
/// after which one peer disconnects the other
2023-03-24 14:43:49 +01:00
/// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`].
2021-07-03 00:35:51 +07:00
///
2023-01-12 11:21:02 +00:00
/// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`]
/// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`]
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_behaviour_disconnect_all ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2021-11-26 10:48:12 -05:00
swarm1 . listen_on ( addr1 . clone ( ) ) . unwrap ( ) ;
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let mut reconnected = false ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
2023-03-24 14:43:49 +01:00
swarm2
. behaviour
. inner ( )
. next_action
. replace ( ToSwarm ::CloseConnection {
2021-11-26 10:48:12 -05:00
peer_id : swarm1_id ,
2021-07-03 00:35:51 +07:00
connection : CloseConnection ::All ,
2023-03-24 14:43:49 +01:00
} ) ;
2021-07-03 00:35:51 +07:00
state = State ::Disconnecting ;
2022-03-02 12:10:57 +01:00
continue ;
2021-07-03 00:35:51 +07:00
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
2022-03-02 12:10:57 +01:00
if swarms_disconnected ( & swarm1 , & swarm2 ) {
2021-07-03 00:35:51 +07:00
reconnected = true ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm2 . dial ( addr1 . clone ( ) ) . unwrap ( ) ;
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
state = State ::Connecting ;
2022-03-02 12:10:57 +01:00
continue ;
2021-07-03 00:35:51 +07:00
}
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
/// Establishes multiple connections between two peers,
/// after which one peer closes a single connection
2023-03-24 14:43:49 +01:00
/// using [`ToSwarm::CloseConnection`] returned by a [`NetworkBehaviour`].
2021-07-03 00:35:51 +07:00
///
2023-01-12 11:21:02 +00:00
/// The test expects both behaviours to be notified via calls to [`NetworkBehaviour::on_swarm_event`]
/// with pairs of [`FromSwarm::ConnectionEstablished`] / [`FromSwarm::ConnectionClosed`]
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_behaviour_disconnect_one ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2022-10-04 18:24:38 +11:00
swarm1 . listen_on ( addr1 ) . unwrap ( ) ;
2021-11-26 10:48:12 -05:00
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
let mut disconnected_conn_id = None ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
disconnected_conn_id = {
2022-11-17 09:28:40 +00:00
let conn_id =
swarm2 . behaviour . on_connection_established [ num_connections / 2 ] . 1 ;
2021-07-03 00:35:51 +07:00
swarm2 . behaviour . inner ( ) . next_action . replace (
2023-03-24 14:43:49 +01:00
ToSwarm ::CloseConnection {
2021-11-26 10:48:12 -05:00
peer_id : swarm1_id ,
2021-07-03 00:35:51 +07:00
connection : CloseConnection ::One ( conn_id ) ,
} ,
) ;
Some ( conn_id )
} ;
state = State ::Disconnecting ;
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
for s in & [ & swarm1 , & swarm2 ] {
2022-02-09 10:08:28 -05:00
assert! ( s
. behaviour
2022-11-17 09:28:40 +00:00
. on_connection_closed
2022-02-09 10:08:28 -05:00
. iter ( )
. all ( | ( .. , remaining_conns ) | * remaining_conns > 0 ) ) ;
2022-11-17 09:28:40 +00:00
assert_eq! ( s . behaviour . on_connection_established . len ( ) , num_connections ) ;
2022-02-09 10:08:28 -05:00
s . behaviour . assert_connected ( num_connections , 1 ) ;
2021-07-03 00:35:51 +07:00
}
if [ & swarm1 , & swarm2 ]
. iter ( )
2022-11-17 09:28:40 +00:00
. all ( | s | s . behaviour . on_connection_closed . len ( ) = = 1 )
2021-07-03 00:35:51 +07:00
{
2022-11-17 09:28:40 +00:00
let conn_id = swarm2 . behaviour . on_connection_closed [ 0 ] . 1 ;
2021-07-03 00:35:51 +07:00
assert_eq! ( Some ( conn_id ) , disconnected_conn_id ) ;
return Poll ::Ready ( ( ) ) ;
}
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
2022-02-13 21:57:38 +01:00
#[ test ]
fn concurrent_dialing ( ) {
#[ derive(Clone, Debug) ]
struct DialConcurrencyFactor ( NonZeroU8 ) ;
impl Arbitrary for DialConcurrencyFactor {
2022-09-22 12:48:32 +04:00
fn arbitrary ( g : & mut Gen ) -> Self {
Self ( NonZeroU8 ::new ( g . gen_range ( 1 .. 11 ) ) . unwrap ( ) )
2022-02-13 21:57:38 +01:00
}
}
fn prop ( concurrency_factor : DialConcurrencyFactor ) {
block_on ( async {
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( keep_alive ::ConnectionHandler )
. dial_concurrency_factor ( concurrency_factor . 0 )
. build ( ) ;
2022-02-13 21:57:38 +01:00
// Listen on `concurrency_factor + 1` addresses.
//
// `+ 2` to ensure a subset of addresses is dialed by network_2.
let num_listen_addrs = concurrency_factor . 0. get ( ) + 2 ;
let mut listen_addresses = Vec ::new ( ) ;
2022-07-04 04:16:57 +02:00
let mut transports = Vec ::new ( ) ;
2022-02-13 21:57:38 +01:00
for _ in 0 .. num_listen_addrs {
2022-07-04 04:16:57 +02:00
let mut transport = transport ::MemoryTransport ::default ( ) . boxed ( ) ;
2023-05-14 05:42:51 -04:00
transport
. listen_on ( ListenerId ::next ( ) , " /memory/0 " . parse ( ) . unwrap ( ) )
. unwrap ( ) ;
2022-02-13 21:57:38 +01:00
2022-07-04 04:16:57 +02:00
match transport . select_next_some ( ) . await {
TransportEvent ::NewAddress { listen_addr , .. } = > {
listen_addresses . push ( listen_addr ) ;
2022-02-13 21:57:38 +01:00
}
_ = > panic! ( " Expected `NewListenAddr` event. " ) ,
}
2022-07-04 04:16:57 +02:00
transports . push ( transport ) ;
2022-02-13 21:57:38 +01:00
}
// Have swarm dial each listener and wait for each listener to receive the incoming
// connections.
swarm
. dial (
DialOpts ::peer_id ( PeerId ::random ( ) )
2022-10-04 18:24:38 +11:00
. addresses ( listen_addresses )
2022-02-13 21:57:38 +01:00
. build ( ) ,
)
. unwrap ( ) ;
2022-07-04 04:16:57 +02:00
for mut transport in transports . into_iter ( ) {
2022-02-13 21:57:38 +01:00
loop {
2022-07-04 04:16:57 +02:00
match futures ::future ::select ( transport . select_next_some ( ) , swarm . next ( ) )
. await
{
2023-01-18 10:05:59 +11:00
future ::Either ::Left ( ( TransportEvent ::Incoming { .. } , _ ) ) = > {
2022-02-13 21:57:38 +01:00
break ;
}
2023-01-18 10:05:59 +11:00
future ::Either ::Left ( _ ) = > {
2022-07-04 04:16:57 +02:00
panic! ( " Unexpected transport event. " )
2022-02-13 21:57:38 +01:00
}
2023-01-18 10:05:59 +11:00
future ::Either ::Right ( ( e , _ ) ) = > {
2022-12-14 16:45:04 +01:00
panic! ( " Expect swarm to not emit any event {e:?} " )
2022-02-13 21:57:38 +01:00
}
}
}
}
match swarm . next ( ) . await . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError { .. } = > { }
2022-12-14 16:45:04 +01:00
e = > panic! ( " Unexpected swarm event {e:?} " ) ,
2022-02-13 21:57:38 +01:00
}
} )
}
QuickCheck ::new ( ) . tests ( 10 ) . quickcheck ( prop as fn ( _ ) -> _ ) ;
}
#[ test ]
fn invalid_peer_id ( ) {
// Checks whether dialing an address containing the wrong peer id raises an error
// for the expected peer id instead of the obtained peer id.
2022-10-06 03:50:11 +11:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
swarm1 . listen_on ( " /memory/0 " . parse ( ) . unwrap ( ) ) . unwrap ( ) ;
let address =
futures ::executor ::block_on ( future ::poll_fn ( | cx | match swarm1 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::NewListenAddr { address , .. } ) ) = > {
Poll ::Ready ( address )
}
Poll ::Pending = > Poll ::Pending ,
_ = > panic! ( " Was expecting the listen address to be reported " ) ,
} ) ) ;
let other_id = PeerId ::random ( ) ;
2023-06-08 03:38:18 +02:00
let other_addr = address . with ( multiaddr ::Protocol ::P2p ( other_id ) ) ;
2022-02-13 21:57:38 +01:00
swarm2 . dial ( other_addr . clone ( ) ) . unwrap ( ) ;
let ( peer_id , error ) = futures ::executor ::block_on ( future ::poll_fn ( | cx | {
if let Poll ::Ready ( Some ( SwarmEvent ::IncomingConnection { .. } ) ) =
swarm1 . poll_next_unpin ( cx )
{ }
match swarm2 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::OutgoingConnectionError {
peer_id , error , ..
} ) ) = > Poll ::Ready ( ( peer_id , error ) ) ,
2022-12-14 16:45:04 +01:00
Poll ::Ready ( x ) = > panic! ( " unexpected {x:?} " ) ,
2022-02-13 21:57:38 +01:00
Poll ::Pending = > Poll ::Pending ,
}
} ) ) ;
assert_eq! ( peer_id . unwrap ( ) , other_id ) ;
match error {
DialError ::WrongPeerId { obtained , endpoint } = > {
assert_eq! ( obtained , * swarm1 . local_peer_id ( ) ) ;
assert_eq! (
endpoint ,
ConnectedPoint ::Dialer {
address : other_addr ,
role_override : Endpoint ::Dialer ,
}
) ;
}
2022-12-14 16:45:04 +01:00
x = > panic! ( " wrong error {x:?} " ) ,
2022-02-13 21:57:38 +01:00
}
}
#[ test ]
fn dial_self ( ) {
// Check whether dialing ourselves correctly fails.
//
// Dialing the same address we're listening should result in three events:
//
// - The incoming connection notification (before we know the incoming peer ID).
// - The connection error for the dialing endpoint (once we've determined that it's our own ID).
// - The connection error for the listening endpoint (once we've determined that it's our own ID).
//
// The last two can happen in any order.
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
swarm . listen_on ( " /memory/0 " . parse ( ) . unwrap ( ) ) . unwrap ( ) ;
let local_address =
futures ::executor ::block_on ( future ::poll_fn ( | cx | match swarm . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::NewListenAddr { address , .. } ) ) = > {
Poll ::Ready ( address )
}
Poll ::Pending = > Poll ::Pending ,
_ = > panic! ( " Was expecting the listen address to be reported " ) ,
} ) ) ;
2022-12-23 03:44:58 +11:00
swarm . listened_addrs . clear ( ) ; // This is a hack to actually execute the dial to ourselves which would otherwise be filtered.
2022-02-13 21:57:38 +01:00
swarm . dial ( local_address . clone ( ) ) . unwrap ( ) ;
let mut got_dial_err = false ;
let mut got_inc_err = false ;
futures ::executor ::block_on ( future ::poll_fn ( | cx | -> Poll < Result < ( ) , io ::Error > > {
loop {
match swarm . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::OutgoingConnectionError {
peer_id ,
2023-01-26 11:20:23 +04:00
error : DialError ::LocalPeerId { .. } ,
2022-02-13 21:57:38 +01:00
..
} ) ) = > {
assert_eq! ( & peer_id . unwrap ( ) , swarm . local_peer_id ( ) ) ;
assert! ( ! got_dial_err ) ;
got_dial_err = true ;
if got_inc_err {
return Poll ::Ready ( Ok ( ( ) ) ) ;
}
}
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnectionError {
local_addr , ..
} ) ) = > {
assert! ( ! got_inc_err ) ;
assert_eq! ( local_addr , local_address ) ;
got_inc_err = true ;
if got_dial_err {
return Poll ::Ready ( Ok ( ( ) ) ) ;
}
}
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnection { local_addr , .. } ) ) = > {
assert_eq! ( local_addr , local_address ) ;
}
Poll ::Ready ( ev ) = > {
2022-12-14 16:45:04 +01:00
panic! ( " Unexpected event: {ev:?} " )
2022-02-13 21:57:38 +01:00
}
Poll ::Pending = > break Poll ::Pending ,
}
}
} ) )
. unwrap ( ) ;
}
#[ test ]
fn dial_self_by_id ( ) {
// Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first
// place.
2022-10-06 03:50:11 +11:00
let swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
let peer_id = * swarm . local_peer_id ( ) ;
assert! ( ! swarm . is_connected ( & peer_id ) ) ;
}
2022-09-11 16:55:26 +10:00
#[ async_std::test ]
async fn multiple_addresses_err ( ) {
2022-02-13 21:57:38 +01:00
// Tries dialing multiple addresses, and makes sure there's one dialing error per address.
let target = PeerId ::random ( ) ;
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
let addresses = HashSet ::from ( [
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
] ) ;
2022-02-13 21:57:38 +01:00
swarm
. dial (
DialOpts ::peer_id ( target )
2022-09-11 16:55:26 +10:00
. addresses ( addresses . iter ( ) . cloned ( ) . collect ( ) )
2022-02-13 21:57:38 +01:00
. build ( ) ,
)
. unwrap ( ) ;
2022-09-11 16:55:26 +10:00
match swarm . next ( ) . await . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError {
peer_id ,
// multiaddr,
error : DialError ::Transport ( errors ) ,
2023-05-17 07:19:53 +02:00
..
2022-09-11 16:55:26 +10:00
} = > {
assert_eq! ( target , peer_id . unwrap ( ) ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
let failed_addresses = errors . into_iter ( ) . map ( | ( addr , _ ) | addr ) . collect ::< Vec < _ > > ( ) ;
let expected_addresses = addresses
. into_iter ( )
2023-06-08 03:38:18 +02:00
. map ( | addr | addr . with ( multiaddr ::Protocol ::P2p ( target ) ) )
2022-09-11 16:55:26 +10:00
. collect ::< Vec < _ > > ( ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
assert_eq! ( expected_addresses , failed_addresses ) ;
2022-02-13 21:57:38 +01:00
}
2022-09-11 16:55:26 +10:00
e = > panic! ( " Unexpected event: {e:?} " ) ,
}
2022-02-13 21:57:38 +01:00
}
2022-02-15 10:19:55 +01:00
#[ test ]
fn aborting_pending_connection_surfaces_error ( ) {
let _ = env_logger ::try_init ( ) ;
2022-10-06 03:50:11 +11:00
let mut dialer = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
let mut listener = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-15 10:19:55 +01:00
let listener_peer_id = * listener . local_peer_id ( ) ;
listener . listen_on ( multiaddr! [ Memory ( 0 u64 ) ] ) . unwrap ( ) ;
let listener_address = match block_on ( listener . next ( ) ) . unwrap ( ) {
SwarmEvent ::NewListenAddr { address , .. } = > address ,
2022-12-14 16:45:04 +01:00
e = > panic! ( " Unexpected network event: {e:?} " ) ,
2022-02-15 10:19:55 +01:00
} ;
dialer
. dial (
DialOpts ::peer_id ( listener_peer_id )
. addresses ( vec! [ listener_address ] )
. build ( ) ,
)
. unwrap ( ) ;
dialer
. disconnect_peer_id ( listener_peer_id )
. expect_err ( " Expect peer to not yet be connected. " ) ;
match block_on ( dialer . next ( ) ) . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError {
error : DialError ::Aborted ,
..
} = > { }
2022-12-14 16:45:04 +01:00
e = > panic! ( " Unexpected swarm event {e:?} . " ) ,
2022-02-15 10:19:55 +01:00
}
}
2022-11-23 11:51:47 +11:00
#[ test ]
fn dial_error_prints_sources ( ) {
// This constitutes a fairly typical error for chained transports.
let error = DialError ::Transport ( vec! [ (
" /ip4/127.0.0.1/tcp/80 " . parse ( ) . unwrap ( ) ,
TransportError ::Other ( io ::Error ::new (
io ::ErrorKind ::Other ,
2023-06-05 17:16:04 +02:00
MemoryTransportError ::Unreachable ,
2022-11-23 11:51:47 +11:00
) ) ,
) ] ) ;
let string = format! ( " {error} " ) ;
// Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do.
2023-06-05 17:16:04 +02:00
assert_eq! ( " Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : No listener on the given port.)] " , string )
2022-11-23 11:51:47 +11:00
}
2019-04-04 12:25:42 -03:00
}