2019-04-04 12:25:42 -03:00
// Copyright 2019 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
2019-07-04 14:47:59 +02:00
//! High level manager of the network.
//!
//! A [`Swarm`] contains the state of the network as a whole. The entire
//! behaviour of a libp2p network can be controlled through the `Swarm`.
//! The `Swarm` struct contains all active and pending connections to
//! remotes and manages the state of all the substreams that have been
//! opened, and all the upgrades that were built upon these substreams.
//!
//! # Initializing a Swarm
//!
//! Creating a `Swarm` requires three things:
//!
//! 1. A network identity of the local node in form of a [`PeerId`].
//! 2. An implementation of the [`Transport`] trait. This is the type that
//! will be used in order to reach nodes on the network based on their
//! address. See the `transport` module for more information.
//! 3. An implementation of the [`NetworkBehaviour`] trait. This is a state
//! machine that defines how the swarm should behave once it is connected
//! to a node.
//!
//! # Network Behaviour
//!
//! The [`NetworkBehaviour`] trait is implemented on types that indicate to
//! the swarm how it should behave. This includes which protocols are supported
//! and which nodes to try to connect to. It is the `NetworkBehaviour` that
//! controls what happens on the network. Multiple types that implement
//! `NetworkBehaviour` can be composed into a single behaviour.
//!
//! # Protocols Handler
//!
2022-02-21 13:32:24 +01:00
//! The [`ConnectionHandler`] trait defines how each active connection to a
2019-07-04 14:47:59 +02:00
//! remote should behave: how to handle incoming substreams, which protocols
//! are supported, when to open a new outbound substream, etc.
//!
2022-10-24 04:00:20 +02:00
#![ cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg)) ]
2022-02-13 21:57:38 +01:00
mod connection ;
2019-07-04 14:47:59 +02:00
mod registry ;
2020-08-04 11:30:09 +02:00
#[ cfg(test) ]
mod test ;
2020-02-07 16:29:30 +01:00
mod upgrade ;
2019-07-04 14:47:59 +02:00
2021-12-09 03:00:47 -08:00
pub mod behaviour ;
2021-11-15 14:17:23 +01:00
pub mod dial_opts ;
2022-10-06 03:50:11 +11:00
pub mod dummy ;
2022-11-15 15:26:03 +01:00
mod executor ;
2022-02-21 13:32:24 +01:00
pub mod handler ;
2022-10-06 03:50:11 +11:00
pub mod keep_alive ;
2019-07-04 14:47:59 +02:00
2022-11-13 10:59:14 +11:00
/// Bundles all symbols required for the [`libp2p_swarm_derive::NetworkBehaviour`] macro.
#[ doc(hidden) ]
pub mod derive_prelude {
2022-11-17 09:28:40 +00:00
pub use crate ::behaviour ::AddressChange ;
pub use crate ::behaviour ::ConnectionClosed ;
pub use crate ::behaviour ::ConnectionEstablished ;
pub use crate ::behaviour ::DialFailure ;
pub use crate ::behaviour ::ExpiredExternalAddr ;
pub use crate ::behaviour ::ExpiredListenAddr ;
pub use crate ::behaviour ::FromSwarm ;
pub use crate ::behaviour ::ListenFailure ;
pub use crate ::behaviour ::ListenerClosed ;
pub use crate ::behaviour ::ListenerError ;
pub use crate ::behaviour ::NewExternalAddr ;
pub use crate ::behaviour ::NewListenAddr ;
pub use crate ::behaviour ::NewListener ;
2022-11-13 10:59:14 +11:00
pub use crate ::ConnectionHandler ;
pub use crate ::DialError ;
pub use crate ::IntoConnectionHandler ;
pub use crate ::IntoConnectionHandlerSelect ;
pub use crate ::NetworkBehaviour ;
pub use crate ::NetworkBehaviourAction ;
pub use crate ::PollParameters ;
pub use futures ::prelude as futures ;
pub use libp2p_core ::connection ::ConnectionId ;
pub use libp2p_core ::either ::EitherOutput ;
pub use libp2p_core ::transport ::ListenerId ;
pub use libp2p_core ::ConnectedPoint ;
pub use libp2p_core ::Multiaddr ;
pub use libp2p_core ::PeerId ;
}
2019-07-04 14:47:59 +02:00
pub use behaviour ::{
2022-08-16 06:58:17 +02:00
CloseConnection , NetworkBehaviour , NetworkBehaviourAction , NotifyHandler , PollParameters ,
2019-07-04 14:47:59 +02:00
} ;
2022-11-03 05:47:00 +11:00
pub use connection ::pool ::{ ConnectionCounters , ConnectionLimits } ;
2022-02-17 21:12:37 +01:00
pub use connection ::{
2022-11-03 05:47:00 +11:00
ConnectionError , ConnectionLimit , PendingConnectionError , PendingInboundConnectionError ,
PendingOutboundConnectionError ,
2022-02-17 21:12:37 +01:00
} ;
2022-11-15 15:26:03 +01:00
pub use executor ::Executor ;
2022-02-21 13:32:24 +01:00
pub use handler ::{
ConnectionHandler , ConnectionHandlerEvent , ConnectionHandlerSelect , ConnectionHandlerUpgrErr ,
IntoConnectionHandler , IntoConnectionHandlerSelect , KeepAlive , OneShotHandler ,
OneShotHandlerConfig , SubstreamProtocol ,
2019-07-04 14:47:59 +02:00
} ;
2022-11-13 10:59:14 +11:00
#[ cfg(feature = " macros " ) ]
pub use libp2p_swarm_derive ::NetworkBehaviour ;
2020-11-18 15:52:33 +01:00
pub use registry ::{ AddAddressResult , AddressRecord , AddressScore } ;
2021-08-11 13:12:12 +02:00
2022-11-03 05:47:00 +11:00
use connection ::pool ::{ EstablishedConnection , Pool , PoolConfig , PoolEvent } ;
use connection ::IncomingInfo ;
2021-11-15 14:17:23 +01:00
use dial_opts ::{ DialOpts , PeerCondition } ;
2022-02-13 21:57:38 +01:00
use either ::Either ;
2020-09-03 19:28:15 +10:00
use futures ::{ executor ::ThreadPoolBuilder , prelude ::* , stream ::FusedStream } ;
2022-11-03 05:47:00 +11:00
use libp2p_core ::connection ::ConnectionId ;
2022-06-23 13:52:11 +02:00
use libp2p_core ::muxing ::SubstreamBox ;
2021-07-31 06:21:21 +10:00
use libp2p_core ::{
2022-07-04 04:16:57 +02:00
connection ::ConnectedPoint ,
2022-02-13 21:57:38 +01:00
multiaddr ::Protocol ,
2022-01-18 21:21:11 +01:00
multihash ::Multihash ,
2021-07-31 06:21:21 +10:00
muxing ::StreamMuxerBox ,
2022-07-04 04:16:57 +02:00
transport ::{ self , ListenerId , TransportError , TransportEvent } ,
2021-07-31 06:21:21 +10:00
upgrade ::ProtocolName ,
2022-11-15 15:26:03 +01:00
Endpoint , Multiaddr , Negotiated , PeerId , Transport ,
2021-08-11 13:12:12 +02:00
} ;
2019-07-04 14:47:59 +02:00
use registry ::{ AddressIntoIter , Addresses } ;
2019-04-04 12:25:42 -03:00
use smallvec ::SmallVec ;
2022-07-04 04:16:57 +02:00
use std ::collections ::{ HashMap , HashSet } ;
2022-02-13 21:57:38 +01:00
use std ::iter ;
2021-10-14 18:05:07 +02:00
use std ::num ::{ NonZeroU32 , NonZeroU8 , NonZeroUsize } ;
2021-03-18 14:55:33 +01:00
use std ::{
2021-11-26 10:48:12 -05:00
convert ::TryFrom ,
2021-03-18 14:55:33 +01:00
error , fmt , io ,
pin ::Pin ,
task ::{ Context , Poll } ,
2021-08-11 13:12:12 +02:00
} ;
2020-02-07 16:29:30 +01:00
use upgrade ::UpgradeInfoSend as _ ;
2019-04-04 12:25:42 -03:00
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
/// Substream for which a protocol has been chosen.
///
/// Implements the [`AsyncRead`](futures::io::AsyncRead) and
/// [`AsyncWrite`](futures::io::AsyncWrite) traits.
2022-06-23 13:52:11 +02:00
pub type NegotiatedSubstream = Negotiated < SubstreamBox > ;
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
2021-08-09 15:29:58 +02:00
/// Event generated by the [`NetworkBehaviour`] that the swarm will report back.
type TBehaviourOutEvent < TBehaviour > = < TBehaviour as NetworkBehaviour > ::OutEvent ;
2022-02-21 13:32:24 +01:00
/// [`ConnectionHandler`] of the [`NetworkBehaviour`] for all the protocols the [`NetworkBehaviour`]
2021-08-09 15:29:58 +02:00
/// supports.
2022-02-21 13:32:24 +01:00
type THandler < TBehaviour > = < TBehaviour as NetworkBehaviour > ::ConnectionHandler ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom event that can be received by the [`ConnectionHandler`] of the
2021-08-09 15:29:58 +02:00
/// [`NetworkBehaviour`].
type THandlerInEvent < TBehaviour > =
2022-02-21 13:32:24 +01:00
< < THandler < TBehaviour > as IntoConnectionHandler > ::Handler as ConnectionHandler > ::InEvent ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`].
2021-08-09 15:29:58 +02:00
type THandlerOutEvent < TBehaviour > =
2022-02-21 13:32:24 +01:00
< < THandler < TBehaviour > as IntoConnectionHandler > ::Handler as ConnectionHandler > ::OutEvent ;
2021-08-09 15:29:58 +02:00
2022-02-21 13:32:24 +01:00
/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`].
2021-08-09 15:29:58 +02:00
type THandlerErr < TBehaviour > =
2022-02-21 13:32:24 +01:00
< < THandler < TBehaviour > as IntoConnectionHandler > ::Handler as ConnectionHandler > ::Error ;
2021-08-09 15:29:58 +02:00
2020-01-07 11:57:00 +01:00
/// Event generated by the `Swarm`.
#[ derive(Debug) ]
2021-08-09 15:29:58 +02:00
pub enum SwarmEvent < TBehaviourOutEvent , THandlerErr > {
2020-01-07 11:57:00 +01:00
/// Event generated by the `NetworkBehaviour`.
2021-08-09 15:29:58 +02:00
Behaviour ( TBehaviourOutEvent ) ,
2020-03-26 18:02:37 +01:00
/// A connection to the given peer has been opened.
ConnectionEstablished {
/// Identity of the peer that we have connected to.
peer_id : PeerId ,
/// Endpoint of the connection that has been opened.
endpoint : ConnectedPoint ,
/// Number of established connections to this peer, including the one that has just been
/// opened.
num_established : NonZeroU32 ,
2021-10-14 18:05:07 +02:00
/// [`Some`] when the new connection is an outgoing connection.
/// Addresses are dialed concurrently. Contains the addresses and errors
/// of dial attempts that failed before the one successful dial.
concurrent_dial_errors : Option < Vec < ( Multiaddr , TransportError < io ::Error > ) > > ,
2020-03-26 18:02:37 +01:00
} ,
2020-08-04 11:30:09 +02:00
/// A connection with the given peer has been closed,
/// possibly as a result of an error.
2020-03-26 18:02:37 +01:00
ConnectionClosed {
/// Identity of the peer that we have connected to.
peer_id : PeerId ,
/// Endpoint of the connection that has been closed.
endpoint : ConnectedPoint ,
/// Number of other remaining connections to this same peer.
num_established : u32 ,
2020-08-04 11:30:09 +02:00
/// Reason for the disconnection, if it was not a successful
/// active close.
2022-02-18 11:32:58 +01:00
cause : Option < ConnectionError < THandlerErr > > ,
2020-03-26 18:02:37 +01:00
} ,
/// A new connection arrived on a listener and is in the process of protocol negotiation.
///
/// A corresponding [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished),
/// [`BannedPeer`](SwarmEvent::BannedPeer), or
/// [`IncomingConnectionError`](SwarmEvent::IncomingConnectionError) event will later be
/// generated for this connection.
IncomingConnection {
/// Local connection address.
/// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr)
/// event.
local_addr : Multiaddr ,
/// Address used to send back data to the remote.
send_back_addr : Multiaddr ,
} ,
/// An error happened on a connection during its initial handshake.
///
/// This can include, for example, an error during the handshake of the encryption layer, or
/// the connection unexpectedly closed.
IncomingConnectionError {
/// Local connection address.
/// This address has been earlier reported with a [`NewListenAddr`](SwarmEvent::NewListenAddr)
/// event.
local_addr : Multiaddr ,
/// Address used to send back data to the remote.
send_back_addr : Multiaddr ,
/// The error that happened.
2021-10-14 18:05:07 +02:00
error : PendingInboundConnectionError < io ::Error > ,
} ,
/// Outgoing connection attempt failed.
OutgoingConnectionError {
/// If known, [`PeerId`] of the peer we tried to reach.
peer_id : Option < PeerId > ,
/// Error that has been encountered.
error : DialError ,
2020-03-26 18:02:37 +01:00
} ,
/// We connected to a peer, but we immediately closed the connection because that peer is banned.
BannedPeer {
/// Identity of the banned peer.
peer_id : PeerId ,
/// Endpoint of the connection that has been closed.
endpoint : ConnectedPoint ,
} ,
/// One of our listeners has reported a new local listening address.
2021-07-08 11:41:33 +02:00
NewListenAddr {
/// The listener that is listening on the new address.
listener_id : ListenerId ,
/// The new address that is being listened on.
address : Multiaddr ,
} ,
2020-03-26 18:02:37 +01:00
/// One of our listeners has reported the expiration of a listening address.
2021-07-08 11:41:33 +02:00
ExpiredListenAddr {
/// The listener that is no longer listening on the address.
listener_id : ListenerId ,
/// The expired address.
address : Multiaddr ,
} ,
2020-03-26 18:02:37 +01:00
/// One of the listeners gracefully closed.
ListenerClosed {
2021-07-08 11:41:33 +02:00
/// The listener that closed.
listener_id : ListenerId ,
2020-03-26 18:02:37 +01:00
/// The addresses that the listener was listening on. These addresses are now considered
/// expired, similar to if a [`ExpiredListenAddr`](SwarmEvent::ExpiredListenAddr) event
/// has been generated for each of them.
addresses : Vec < Multiaddr > ,
/// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err`
/// if the stream produced an error.
reason : Result < ( ) , io ::Error > ,
} ,
/// One of the listeners reported a non-fatal error.
ListenerError {
2021-07-08 11:41:33 +02:00
/// The listener that errored.
listener_id : ListenerId ,
2020-03-26 18:02:37 +01:00
/// The listener error.
error : io ::Error ,
2020-01-07 11:57:00 +01:00
} ,
2022-01-14 13:17:45 +01:00
/// A new dialing attempt has been initiated by the [`NetworkBehaviour`]
/// implementation.
2020-03-31 15:41:13 +02:00
///
2021-10-14 18:05:07 +02:00
/// A [`ConnectionEstablished`](SwarmEvent::ConnectionEstablished) event is
/// reported if the dialing attempt succeeds, otherwise a
/// [`OutgoingConnectionError`](SwarmEvent::OutgoingConnectionError) event
2022-01-14 13:17:45 +01:00
/// is reported.
2020-03-31 15:41:13 +02:00
Dialing ( PeerId ) ,
2020-01-07 11:57:00 +01:00
}
2019-04-07 18:34:14 -03:00
/// Contains the state of the network, plus the way it should behave.
2021-06-14 20:41:44 +02:00
///
2021-08-09 15:29:58 +02:00
/// Note: Needs to be polled via `<Swarm as Stream>` in order to make
2021-06-14 20:41:44 +02:00
/// progress.
2021-08-09 15:29:58 +02:00
pub struct Swarm < TBehaviour >
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
where
2021-08-09 15:29:58 +02:00
TBehaviour : NetworkBehaviour ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
{
2022-07-04 04:16:57 +02:00
/// [`Transport`] for dialing remote peers and listening for incoming connection.
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2022-02-13 21:57:38 +01:00
/// The nodes currently active.
2022-02-16 13:15:52 +01:00
pool : Pool < THandler < TBehaviour > , transport ::Boxed < ( PeerId , StreamMuxerBox ) > > ,
2019-04-04 12:25:42 -03:00
2022-02-13 21:57:38 +01:00
/// The local peer ID.
local_peer_id : PeerId ,
2019-04-04 12:25:42 -03:00
/// Handles which nodes to connect to and how to handle the events sent back by the protocol
/// handlers.
behaviour : TBehaviour ,
/// List of protocols that the behaviour says it supports.
supported_protocols : SmallVec < [ Vec < u8 > ; 16 ] > ,
2022-07-04 04:16:57 +02:00
/// Multiaddresses that our listeners are listening on,
listened_addrs : HashMap < ListenerId , SmallVec < [ Multiaddr ; 1 ] > > ,
2019-04-04 12:25:42 -03:00
/// List of multiaddresses we're listening on, after account for external IP addresses and
/// similar mechanisms.
2019-05-02 19:46:27 +02:00
external_addrs : Addresses ,
2019-04-18 19:17:14 +03:00
/// List of nodes for which we deny any incoming connection.
banned_peers : HashSet < PeerId > ,
2019-07-09 16:47:24 +02:00
2021-11-26 10:48:12 -05:00
/// Connections for which we withhold any reporting. These belong to banned peers.
///
/// Note: Connections to a peer that are established at the time of banning that peer
/// are not added here. Instead they are simply closed.
banned_peer_connections : HashSet < ConnectionId > ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
/// Pending event to be delivered to connection handlers
/// (or dropped if the peer disconnected) before the `behaviour`
/// can be polled again.
2021-08-09 15:29:58 +02:00
pending_event : Option < ( PeerId , PendingNotifyHandler , THandlerInEvent < TBehaviour > ) > ,
2019-04-04 12:25:42 -03:00
}
2021-08-09 15:29:58 +02:00
impl < TBehaviour > Unpin for Swarm < TBehaviour > where TBehaviour : NetworkBehaviour { }
2019-09-16 11:08:44 +02:00
2021-08-09 15:29:58 +02:00
impl < TBehaviour > Swarm < TBehaviour >
where
TBehaviour : NetworkBehaviour ,
2019-04-04 12:25:42 -03:00
{
/// Builds a new `Swarm`.
2022-11-15 15:26:03 +01:00
#[ deprecated(
since = " 0.41.0 " ,
note = " This constructor is considered ambiguous regarding the executor. Use one of the new, executor-specific constructors or `Swarm::with_threadpool_executor` for the same behaviour. "
) ]
2020-10-16 16:53:02 +02:00
pub fn new (
2020-10-31 01:51:27 +11:00
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2020-10-16 16:53:02 +02:00
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
2022-11-15 15:26:03 +01:00
Self ::with_threadpool_executor ( transport , behaviour , local_peer_id )
}
/// Builds a new `Swarm` with a provided executor.
pub fn with_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
executor : impl Executor + Send + 'static ,
) -> Self {
SwarmBuilder ::with_executor ( transport , behaviour , local_peer_id , executor ) . build ( )
}
/// Builds a new `Swarm` with a tokio executor.
2022-11-18 22:04:16 +11:00
#[ cfg(all(
feature = " tokio " ,
not ( any ( target_os = " emscripten " , target_os = " wasi " , target_os = " unknown " ) )
) ) ]
2022-11-15 15:26:03 +01:00
pub fn with_tokio_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self ::with_executor (
transport ,
behaviour ,
local_peer_id ,
crate ::executor ::TokioExecutor ,
)
}
/// Builds a new `Swarm` with an async-std executor.
2022-11-18 22:04:16 +11:00
#[ cfg(all(
feature = " async-std " ,
not ( any ( target_os = " emscripten " , target_os = " wasi " , target_os = " unknown " ) )
) ) ]
2022-11-15 15:26:03 +01:00
pub fn with_async_std_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self ::with_executor (
transport ,
behaviour ,
local_peer_id ,
crate ::executor ::AsyncStdExecutor ,
)
}
/// Builds a new `Swarm` with a threadpool executor.
pub fn with_threadpool_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
let builder = match ThreadPoolBuilder ::new ( )
. name_prefix ( " libp2p-swarm-task- " )
. create ( )
{
Ok ( tp ) = > SwarmBuilder ::with_executor ( transport , behaviour , local_peer_id , tp ) ,
Err ( err ) = > {
log ::warn! ( " Failed to create executor thread pool: {:?} " , err ) ;
SwarmBuilder ::without_executor ( transport , behaviour , local_peer_id )
}
} ;
builder . build ( )
}
/// Builds a new `Swarm` without an executor, instead using the current task.
///
/// ## ⚠️ Performance warning
/// All connections will be polled on the current task, thus quite bad performance
/// characteristics should be expected. Whenever possible use an executor and
/// [`Swarm::with_executor`].
pub fn without_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
SwarmBuilder ::without_executor ( transport , behaviour , local_peer_id ) . build ( )
2019-04-04 12:25:42 -03:00
}
2022-02-13 21:57:38 +01:00
/// Returns information about the connections underlying the [`Swarm`].
2021-03-18 14:55:33 +01:00
pub fn network_info ( & self ) -> NetworkInfo {
2022-02-13 21:57:38 +01:00
let num_peers = self . pool . num_peers ( ) ;
let connection_counters = self . pool . counters ( ) . clone ( ) ;
NetworkInfo {
num_peers ,
connection_counters ,
}
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
}
2019-04-04 12:25:42 -03:00
/// Starts listening on the given address.
/// Returns an error if the address is not supported.
2021-07-08 11:41:33 +02:00
///
/// Listeners report their new listening addresses as [`SwarmEvent::NewListenAddr`].
/// Depending on the underlying transport, one listener may have multiple listening addresses.
2021-03-18 14:55:33 +01:00
pub fn listen_on ( & mut self , addr : Multiaddr ) -> Result < ListenerId , TransportError < io ::Error > > {
2022-07-04 04:16:57 +02:00
let id = self . transport . listen_on ( addr ) ? ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2021-03-24 17:21:53 +01:00
self . behaviour . inject_new_listener ( id ) ;
Ok ( id )
2019-04-04 12:25:42 -03:00
}
2019-08-13 15:41:12 +02:00
/// Remove some listener.
2020-01-14 12:03:10 +01:00
///
2021-10-11 22:38:55 +02:00
/// Returns `true` if there was a listener with this ID, `false`
/// otherwise.
2022-07-04 04:16:57 +02:00
pub fn remove_listener ( & mut self , listener_id : ListenerId ) -> bool {
self . transport . remove_listener ( listener_id )
2019-08-13 15:41:12 +02:00
}
2021-11-15 14:17:23 +01:00
/// Dial a known or unknown peer.
///
/// See also [`DialOpts`].
///
/// ```
/// # use libp2p_swarm::Swarm;
/// # use libp2p_swarm::dial_opts::{DialOpts, PeerCondition};
/// # use libp2p_core::{Multiaddr, PeerId, Transport};
/// # use libp2p_core::transport::dummy::DummyTransport;
2022-10-06 03:50:11 +11:00
/// # use libp2p_swarm::dummy;
2021-11-15 14:17:23 +01:00
/// #
/// let mut swarm = Swarm::new(
/// DummyTransport::new().boxed(),
2022-10-06 03:50:11 +11:00
/// dummy::Behaviour,
2021-11-15 14:17:23 +01:00
/// PeerId::random(),
/// );
///
/// // Dial a known peer.
/// swarm.dial(PeerId::random());
///
/// // Dial an unknown peer.
/// swarm.dial("/ip6/::1/tcp/12345".parse::<Multiaddr>().unwrap());
/// ```
pub fn dial ( & mut self , opts : impl Into < DialOpts > ) -> Result < ( ) , DialError > {
2021-08-31 17:00:51 +02:00
let handler = self . behaviour . new_handler ( ) ;
2021-11-15 14:17:23 +01:00
self . dial_with_handler ( opts . into ( ) , handler )
2021-08-31 17:00:51 +02:00
}
2021-11-15 14:17:23 +01:00
fn dial_with_handler (
2021-08-31 17:00:51 +02:00
& mut self ,
2022-01-13 18:07:07 +01:00
swarm_dial_opts : DialOpts ,
2022-02-21 13:32:24 +01:00
handler : < TBehaviour as NetworkBehaviour > ::ConnectionHandler ,
2021-11-15 14:17:23 +01:00
) -> Result < ( ) , DialError > {
2022-02-13 21:57:38 +01:00
let ( peer_id , addresses , dial_concurrency_factor_override , role_override ) =
match swarm_dial_opts . 0 {
// Dial a known peer.
dial_opts ::Opts ::WithPeerId ( dial_opts ::WithPeerId {
peer_id ,
condition ,
role_override ,
dial_concurrency_factor_override ,
} )
| dial_opts ::Opts ::WithPeerIdWithAddresses ( dial_opts ::WithPeerIdWithAddresses {
peer_id ,
condition ,
role_override ,
dial_concurrency_factor_override ,
..
} ) = > {
// Check [`PeerCondition`] if provided.
let condition_matched = match condition {
PeerCondition ::Disconnected = > ! self . is_connected ( & peer_id ) ,
2022-11-03 05:47:00 +11:00
PeerCondition ::NotDialing = > ! self . pool . is_dialing ( peer_id ) ,
2022-02-13 21:57:38 +01:00
PeerCondition ::Always = > true ,
2021-11-15 14:17:23 +01:00
} ;
2022-02-13 21:57:38 +01:00
if ! condition_matched {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-02-13 21:57:38 +01:00
self . behaviour . inject_dial_failure (
Some ( peer_id ) ,
handler ,
& DialError ::DialPeerConditionFalse ( condition ) ,
) ;
2020-08-04 11:30:09 +02:00
2022-02-13 21:57:38 +01:00
return Err ( DialError ::DialPeerConditionFalse ( condition ) ) ;
}
2021-11-15 14:17:23 +01:00
2022-02-13 21:57:38 +01:00
// Check if peer is banned.
if self . banned_peers . contains ( & peer_id ) {
let error = DialError ::Banned ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2021-11-15 14:17:23 +01:00
self . behaviour
. inject_dial_failure ( Some ( peer_id ) , handler , & error ) ;
return Err ( error ) ;
2022-02-13 21:57:38 +01:00
}
2021-11-15 14:17:23 +01:00
2022-02-13 21:57:38 +01:00
// Retrieve the addresses to dial.
let addresses = {
let mut addresses = match swarm_dial_opts . 0 {
dial_opts ::Opts ::WithPeerId ( dial_opts ::WithPeerId { .. } ) = > {
self . behaviour . addresses_of_peer ( & peer_id )
}
dial_opts ::Opts ::WithPeerIdWithAddresses (
dial_opts ::WithPeerIdWithAddresses {
peer_id ,
mut addresses ,
extend_addresses_through_behaviour ,
..
} ,
) = > {
if extend_addresses_through_behaviour {
addresses . extend ( self . behaviour . addresses_of_peer ( & peer_id ) )
}
addresses
}
dial_opts ::Opts ::WithoutPeerIdWithAddress { .. } = > {
unreachable! ( " Due to outer match. " )
}
} ;
let mut unique_addresses = HashSet ::new ( ) ;
2022-07-04 04:16:57 +02:00
addresses . retain ( | addr | {
! self . listened_addrs . values ( ) . flatten ( ) . any ( | a | a = = addr )
& & unique_addresses . insert ( addr . clone ( ) )
2022-02-13 21:57:38 +01:00
} ) ;
2021-11-15 14:17:23 +01:00
2022-02-13 21:57:38 +01:00
if addresses . is_empty ( ) {
let error = DialError ::NoAddresses ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-02-13 21:57:38 +01:00
self . behaviour
. inject_dial_failure ( Some ( peer_id ) , handler , & error ) ;
return Err ( error ) ;
} ;
2021-11-15 14:17:23 +01:00
2022-02-13 21:57:38 +01:00
addresses
} ;
(
Some ( peer_id ) ,
Either ::Left ( addresses . into_iter ( ) ) ,
dial_concurrency_factor_override ,
role_override ,
)
2021-11-15 14:17:23 +01:00
}
2022-02-13 21:57:38 +01:00
// Dial an unknown peer.
dial_opts ::Opts ::WithoutPeerIdWithAddress (
dial_opts ::WithoutPeerIdWithAddress {
address ,
role_override ,
} ,
) = > {
// If the address ultimately encapsulates an expected peer ID, dial that peer
// such that any mismatch is detected. We do not "pop off" the `P2p` protocol
// from the address, because it may be used by the `Transport`, i.e. `P2p`
// is a protocol component that can influence any transport, like `libp2p-dns`.
let peer_id = match address
. iter ( )
. last ( )
. and_then ( | p | {
if let Protocol ::P2p ( ma ) = p {
Some ( PeerId ::try_from ( ma ) )
} else {
None
}
} )
. transpose ( )
{
Ok ( peer_id ) = > peer_id ,
Err ( multihash ) = > return Err ( DialError ::InvalidPeerId ( multihash ) ) ,
} ;
2022-01-13 18:07:07 +01:00
2022-02-13 21:57:38 +01:00
(
peer_id ,
Either ::Right ( iter ::once ( address ) ) ,
None ,
role_override ,
)
}
} ;
2020-05-12 13:10:18 +02:00
2022-04-06 20:23:16 +02:00
let dials = addresses
. map ( | a | match p2p_addr ( peer_id , a ) {
Ok ( address ) = > {
let dial = match role_override {
2022-07-04 04:16:57 +02:00
Endpoint ::Dialer = > self . transport . dial ( address . clone ( ) ) ,
Endpoint ::Listener = > self . transport . dial_as_listener ( address . clone ( ) ) ,
2022-04-06 20:23:16 +02:00
} ;
match dial {
Ok ( fut ) = > fut
2022-04-19 12:13:45 +02:00
. map ( | r | ( address , r . map_err ( TransportError ::Other ) ) )
2022-04-06 20:23:16 +02:00
. boxed ( ) ,
Err ( err ) = > futures ::future ::ready ( ( address , Err ( err ) ) ) . boxed ( ) ,
}
}
Err ( address ) = > futures ::future ::ready ( (
address . clone ( ) ,
Err ( TransportError ::MultiaddrNotSupported ( address ) ) ,
) )
. boxed ( ) ,
} )
. collect ( ) ;
2022-02-13 21:57:38 +01:00
match self . pool . add_outgoing (
2022-04-06 20:23:16 +02:00
dials ,
2022-02-13 21:57:38 +01:00
peer_id ,
handler ,
role_override ,
dial_concurrency_factor_override ,
) {
2022-01-13 18:07:07 +01:00
Ok ( _connection_id ) = > Ok ( ( ) ) ,
2022-02-13 21:57:38 +01:00
Err ( ( connection_limit , handler ) ) = > {
let error = DialError ::ConnectionLimit ( connection_limit ) ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-09-23 06:57:52 -04:00
self . behaviour . inject_dial_failure ( peer_id , handler , & error ) ;
2022-04-19 12:13:45 +02:00
Err ( error )
2021-08-31 17:00:51 +02:00
}
2019-04-04 12:25:42 -03:00
}
}
/// Returns an iterator that produces the list of addresses we're listening on.
2021-03-18 14:55:33 +01:00
pub fn listeners ( & self ) -> impl Iterator < Item = & Multiaddr > {
2022-07-04 04:16:57 +02:00
self . listened_addrs . values ( ) . flatten ( )
2019-04-04 12:25:42 -03:00
}
/// Returns the peer ID of the swarm passed as parameter.
2021-03-18 14:55:33 +01:00
pub fn local_peer_id ( & self ) -> & PeerId {
2022-02-13 21:57:38 +01:00
& self . local_peer_id
2019-04-04 12:25:42 -03:00
}
2020-11-18 15:52:33 +01:00
/// Returns an iterator for [`AddressRecord`]s of external addresses
/// of the local node, in decreasing order of their current
/// [score](AddressScore).
2021-03-18 14:55:33 +01:00
pub fn external_addresses ( & self ) -> impl Iterator < Item = & AddressRecord > {
self . external_addrs . iter ( )
2020-11-18 15:52:33 +01:00
}
/// Adds an external address record for the local node.
///
/// An external address is an address of the local node known to
/// be (likely) reachable for other nodes, possibly taking into
/// account NAT. The external addresses of the local node may be
/// shared with other nodes by the `NetworkBehaviour`.
2019-04-04 12:25:42 -03:00
///
2020-11-18 15:52:33 +01:00
/// The associated score determines both the position of the address
/// in the list of external addresses (which can determine the
/// order in which addresses are used to connect to) as well as
/// how long the address is retained in the list, depending on
/// how frequently it is reported by the `NetworkBehaviour` via
/// [`NetworkBehaviourAction::ReportObservedAddr`] or explicitly
/// through this method.
2021-03-18 14:55:33 +01:00
pub fn add_external_address ( & mut self , a : Multiaddr , s : AddressScore ) -> AddAddressResult {
2021-03-24 17:21:53 +01:00
let result = self . external_addrs . add ( a . clone ( ) , s ) ;
let expired = match & result {
AddAddressResult ::Inserted { expired } = > {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2021-03-24 17:21:53 +01:00
self . behaviour . inject_new_external_addr ( & a ) ;
expired
}
AddAddressResult ::Updated { expired } = > expired ,
} ;
for a in expired {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2021-03-24 17:21:53 +01:00
self . behaviour . inject_expired_external_addr ( & a . addr ) ;
}
result
2019-04-04 12:25:42 -03:00
}
2019-04-18 19:17:14 +03:00
2020-11-18 15:52:33 +01:00
/// Removes an external address of the local node, regardless of
2021-08-09 15:29:58 +02:00
/// its current score. See [`Swarm::add_external_address`]
2020-11-18 15:52:33 +01:00
/// for details.
///
/// Returns `true` if the address existed and was removed, `false`
/// otherwise.
2021-03-18 14:55:33 +01:00
pub fn remove_external_address ( & mut self , addr : & Multiaddr ) -> bool {
2021-03-24 17:21:53 +01:00
if self . external_addrs . remove ( addr ) {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2021-03-24 17:21:53 +01:00
self . behaviour . inject_expired_external_addr ( addr ) ;
true
} else {
false
}
2019-06-12 16:21:39 +02:00
}
2019-04-18 19:17:14 +03:00
/// Bans a peer by its peer ID.
///
/// Any incoming connection and any dialing attempt will immediately be rejected.
2020-08-04 11:30:09 +02:00
/// This function has no effect if the peer is already banned.
2021-03-18 14:55:33 +01:00
pub fn ban_peer_id ( & mut self , peer_id : PeerId ) {
if self . banned_peers . insert ( peer_id ) {
2022-02-13 21:57:38 +01:00
// Note that established connections to the now banned peer are closed but not
// added to [`Swarm::banned_peer_connections`]. They have been previously reported
// as open to the behaviour and need be reported as closed once closing the
// connection finishes.
self . pool . disconnect ( peer_id ) ;
2019-04-23 10:54:25 +02:00
}
2019-04-18 19:17:14 +03:00
}
/// Unbans a peer.
2021-03-18 14:55:33 +01:00
pub fn unban_peer_id ( & mut self , peer_id : PeerId ) {
self . banned_peers . remove ( & peer_id ) ;
2019-04-18 19:17:14 +03:00
}
2019-04-04 12:25:42 -03:00
2021-07-03 00:35:51 +07:00
/// Disconnects a peer by its peer ID, closing all connections to said peer.
///
/// Returns `Ok(())` if there was one or more established connections to the peer.
///
2021-08-09 15:29:58 +02:00
/// Note: Closing a connection via [`Swarm::disconnect_peer_id`] does
2022-02-21 13:32:24 +01:00
/// not inform the corresponding [`ConnectionHandler`].
/// Closing a connection via a [`ConnectionHandler`] can be done either in a
/// collaborative manner across [`ConnectionHandler`]s
/// with [`ConnectionHandler::connection_keep_alive`] or directly with
/// [`ConnectionHandlerEvent::Close`].
2022-05-03 13:11:48 +02:00
#[ allow(clippy::result_unit_err) ]
2021-07-03 00:35:51 +07:00
pub fn disconnect_peer_id ( & mut self , peer_id : PeerId ) -> Result < ( ) , ( ) > {
2022-02-15 10:19:55 +01:00
let was_connected = self . pool . is_connected ( peer_id ) ;
self . pool . disconnect ( peer_id ) ;
2021-07-03 00:35:51 +07:00
2022-02-15 10:19:55 +01:00
if was_connected {
Ok ( ( ) )
} else {
Err ( ( ) )
}
2021-07-03 00:35:51 +07:00
}
2022-02-13 21:57:38 +01:00
/// Checks whether there is an established connection to a peer.
2021-03-18 14:55:33 +01:00
pub fn is_connected ( & self , peer_id : & PeerId ) -> bool {
2022-02-13 21:57:38 +01:00
self . pool . is_connected ( * peer_id )
2021-03-18 14:55:33 +01:00
}
2021-12-13 17:17:12 +01:00
/// Returns the currently connected peers.
pub fn connected_peers ( & self ) -> impl Iterator < Item = & PeerId > {
2022-02-13 21:57:38 +01:00
self . pool . iter_connected ( )
2021-12-13 17:17:12 +01:00
}
2021-03-18 14:55:33 +01:00
/// Returns a reference to the provided [`NetworkBehaviour`].
pub fn behaviour ( & self ) -> & TBehaviour {
& self . behaviour
}
/// Returns a mutable reference to the provided [`NetworkBehaviour`].
pub fn behaviour_mut ( & mut self ) -> & mut TBehaviour {
& mut self . behaviour
2020-11-26 21:01:38 +01:00
}
2022-05-05 20:15:24 +02:00
fn handle_pool_event (
& mut self ,
event : PoolEvent < THandler < TBehaviour > , transport ::Boxed < ( PeerId , StreamMuxerBox ) > > ,
) -> Option < SwarmEvent < TBehaviour ::OutEvent , THandlerErr < TBehaviour > > > {
match event {
PoolEvent ::ConnectionEstablished {
peer_id ,
id ,
endpoint ,
other_established_connection_ids ,
concurrent_dial_errors ,
} = > {
if self . banned_peers . contains ( & peer_id ) {
// Mark the connection for the banned peer as banned, thus withholding any
// future events from the connection to the behaviour.
self . banned_peer_connections . insert ( id ) ;
self . pool . disconnect ( peer_id ) ;
return Some ( SwarmEvent ::BannedPeer { peer_id , endpoint } ) ;
} else {
let num_established = NonZeroU32 ::new (
u32 ::try_from ( other_established_connection_ids . len ( ) + 1 ) . unwrap ( ) ,
)
. expect ( " n + 1 is always non-zero; qed " ) ;
let non_banned_established = other_established_connection_ids
. into_iter ( )
. filter ( | conn_id | ! self . banned_peer_connections . contains ( conn_id ) )
. count ( ) ;
2019-04-04 12:25:42 -03:00
2022-05-05 20:15:24 +02:00
log ::debug! (
" Connection established: {:?} {:?}; Total (peer): {}. Total non-banned (peer): {} " ,
peer_id ,
endpoint ,
num_established ,
non_banned_established + 1 ,
) ;
let failed_addresses = concurrent_dial_errors
. as_ref ( )
. map ( | es | es . iter ( ) . map ( | ( a , _ ) | a ) . cloned ( ) . collect ( ) ) ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_connection_established (
& peer_id ,
& id ,
& endpoint ,
failed_addresses . as_ref ( ) ,
non_banned_established ,
) ;
return Some ( SwarmEvent ::ConnectionEstablished {
peer_id ,
num_established ,
endpoint ,
concurrent_dial_errors ,
} ) ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
PoolEvent ::PendingOutboundConnectionError {
id : _ ,
error ,
handler ,
peer ,
} = > {
let error = error . into ( ) ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_dial_failure ( peer , handler , & error ) ;
if let Some ( peer ) = peer {
log ::debug! ( " Connection attempt to {:?} failed with {:?}. " , peer , error , ) ;
} else {
log ::debug! ( " Connection attempt to unknown peer failed with {:?} " , error ) ;
}
return Some ( SwarmEvent ::OutgoingConnectionError {
peer_id : peer ,
error ,
} ) ;
}
PoolEvent ::PendingInboundConnectionError {
id : _ ,
send_back_addr ,
local_addr ,
error ,
handler ,
} = > {
log ::debug! ( " Incoming connection failed: {:?} " , error ) ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour
. inject_listen_failure ( & local_addr , & send_back_addr , handler ) ;
return Some ( SwarmEvent ::IncomingConnectionError {
2022-02-13 21:57:38 +01:00
local_addr ,
send_back_addr ,
2022-05-05 20:15:24 +02:00
error ,
} ) ;
}
PoolEvent ::ConnectionClosed {
id ,
connected ,
error ,
remaining_established_connection_ids ,
handler ,
..
} = > {
if let Some ( error ) = error . as_ref ( ) {
2022-02-13 21:57:38 +01:00
log ::debug! (
2022-05-05 20:15:24 +02:00
" Connection closed with error {:?}: {:?}; Total (peer): {}. " ,
error ,
connected ,
remaining_established_connection_ids . len ( )
2022-02-13 21:57:38 +01:00
) ;
2022-05-05 20:15:24 +02:00
} else {
log ::debug! (
" Connection closed: {:?}; Total (peer): {}. " ,
connected ,
remaining_established_connection_ids . len ( )
2022-02-13 21:57:38 +01:00
) ;
}
2022-05-05 20:15:24 +02:00
let peer_id = connected . peer_id ;
let endpoint = connected . endpoint ;
let num_established =
u32 ::try_from ( remaining_established_connection_ids . len ( ) ) . unwrap ( ) ;
let conn_was_reported = ! self . banned_peer_connections . remove ( & id ) ;
if conn_was_reported {
let remaining_non_banned = remaining_established_connection_ids
. into_iter ( )
. filter ( | conn_id | ! self . banned_peer_connections . contains ( conn_id ) )
. count ( ) ;
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_connection_closed (
& peer_id ,
& id ,
& endpoint ,
handler ,
remaining_non_banned ,
) ;
2022-02-13 21:57:38 +01:00
}
2022-05-05 20:15:24 +02:00
return Some ( SwarmEvent ::ConnectionClosed {
2022-05-04 10:33:40 +02:00
peer_id ,
endpoint ,
2022-05-05 20:15:24 +02:00
cause : error ,
num_established ,
} ) ;
}
PoolEvent ::ConnectionEvent { peer_id , id , event } = > {
if self . banned_peer_connections . contains ( & id ) {
log ::debug! ( " Ignoring event from banned peer: {} {:?}. " , peer_id , id ) ;
} else {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_event ( peer_id , id , event ) ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
PoolEvent ::AddressChange {
peer_id ,
id ,
new_endpoint ,
old_endpoint ,
} = > {
if ! self . banned_peer_connections . contains ( & id ) {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_address_change (
& peer_id ,
& id ,
& old_endpoint ,
& new_endpoint ,
) ;
}
}
}
2022-02-13 21:57:38 +01:00
2022-05-05 20:15:24 +02:00
None
}
2022-02-13 21:57:38 +01:00
2022-07-04 04:16:57 +02:00
fn handle_transport_event (
2022-05-05 20:15:24 +02:00
& mut self ,
2022-07-04 04:16:57 +02:00
event : TransportEvent <
< transport ::Boxed < ( PeerId , StreamMuxerBox ) > as Transport > ::ListenerUpgrade ,
io ::Error ,
> ,
2022-05-05 20:15:24 +02:00
) -> Option < SwarmEvent < TBehaviour ::OutEvent , THandlerErr < TBehaviour > > > {
match event {
2022-07-04 04:16:57 +02:00
TransportEvent ::Incoming {
2022-05-05 20:15:24 +02:00
listener_id : _ ,
upgrade ,
local_addr ,
send_back_addr ,
} = > {
let handler = self . behaviour . new_handler ( ) ;
match self . pool . add_incoming (
upgrade ,
handler ,
IncomingInfo {
local_addr : & local_addr ,
send_back_addr : & send_back_addr ,
} ,
) {
Ok ( _connection_id ) = > {
return Some ( SwarmEvent ::IncomingConnection {
local_addr ,
send_back_addr ,
} ) ;
2022-02-13 21:57:38 +01:00
}
2022-05-05 20:15:24 +02:00
Err ( ( connection_limit , handler ) ) = > {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour
. inject_listen_failure ( & local_addr , & send_back_addr , handler ) ;
log ::warn! ( " Incoming connection rejected: {:?} " , connection_limit ) ;
}
} ;
}
2022-07-04 04:16:57 +02:00
TransportEvent ::NewAddress {
2022-05-05 20:15:24 +02:00
listener_id ,
listen_addr ,
} = > {
log ::debug! ( " Listener {:?}; New address: {:?} " , listener_id , listen_addr ) ;
2022-07-04 04:16:57 +02:00
let addrs = self . listened_addrs . entry ( listener_id ) . or_default ( ) ;
if ! addrs . contains ( & listen_addr ) {
addrs . push ( listen_addr . clone ( ) )
2022-02-13 21:57:38 +01:00
}
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour
. inject_new_listen_addr ( listener_id , & listen_addr ) ;
return Some ( SwarmEvent ::NewListenAddr {
listener_id ,
address : listen_addr ,
} ) ;
}
2022-07-04 04:16:57 +02:00
TransportEvent ::AddressExpired {
2022-05-05 20:15:24 +02:00
listener_id ,
listen_addr ,
} = > {
log ::debug! (
" Listener {:?}; Expired address {:?}. " ,
listener_id ,
listen_addr
) ;
2022-07-04 04:16:57 +02:00
if let Some ( addrs ) = self . listened_addrs . get_mut ( & listener_id ) {
addrs . retain ( | a | a ! = & listen_addr ) ;
}
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour
. inject_expired_listen_addr ( listener_id , & listen_addr ) ;
return Some ( SwarmEvent ::ExpiredListenAddr {
listener_id ,
address : listen_addr ,
} ) ;
}
2022-07-04 04:16:57 +02:00
TransportEvent ::ListenerClosed {
2022-05-05 20:15:24 +02:00
listener_id ,
reason ,
} = > {
log ::debug! ( " Listener {:?}; Closed by {:?}. " , listener_id , reason ) ;
2022-07-04 04:16:57 +02:00
let addrs = self . listened_addrs . remove ( & listener_id ) . unwrap_or_default ( ) ;
for addr in addrs . iter ( ) {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_expired_listen_addr ( listener_id , addr ) ;
2022-02-13 21:57:38 +01:00
}
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_listener_closed (
listener_id ,
match & reason {
Ok ( ( ) ) = > Ok ( ( ) ) ,
Err ( err ) = > Err ( err ) ,
} ,
) ;
return Some ( SwarmEvent ::ListenerClosed {
listener_id ,
2022-07-04 04:16:57 +02:00
addresses : addrs . to_vec ( ) ,
2022-05-05 20:15:24 +02:00
reason ,
} ) ;
}
2022-07-04 04:16:57 +02:00
TransportEvent ::ListenerError { listener_id , error } = > {
2022-11-17 09:28:40 +00:00
#[ allow(deprecated) ]
2022-05-05 20:15:24 +02:00
self . behaviour . inject_listener_error ( listener_id , & error ) ;
return Some ( SwarmEvent ::ListenerError { listener_id , error } ) ;
}
}
None
}
fn handle_behaviour_event (
& mut self ,
event : NetworkBehaviourAction < TBehaviour ::OutEvent , TBehaviour ::ConnectionHandler > ,
) -> Option < SwarmEvent < TBehaviour ::OutEvent , THandlerErr < TBehaviour > > > {
match event {
NetworkBehaviourAction ::GenerateEvent ( event ) = > {
return Some ( SwarmEvent ::Behaviour ( event ) )
}
NetworkBehaviourAction ::Dial { opts , handler } = > {
let peer_id = opts . get_peer_id ( ) ;
if let Ok ( ( ) ) = self . dial_with_handler ( opts , handler ) {
if let Some ( peer_id ) = peer_id {
return Some ( SwarmEvent ::Dialing ( peer_id ) ) ;
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
NetworkBehaviourAction ::NotifyHandler {
peer_id ,
handler ,
event ,
} = > {
assert! ( self . pending_event . is_none ( ) ) ;
let handler = match handler {
NotifyHandler ::One ( connection ) = > PendingNotifyHandler ::One ( connection ) ,
NotifyHandler ::Any = > {
let ids = self
. pool
. iter_established_connections_of_peer ( & peer_id )
. collect ( ) ;
PendingNotifyHandler ::Any ( ids )
2019-04-10 10:29:21 +02:00
}
2022-05-05 20:15:24 +02:00
} ;
self . pending_event = Some ( ( peer_id , handler , event ) ) ;
}
NetworkBehaviourAction ::ReportObservedAddr { address , score } = > {
// Maps the given `observed_addr`, representing an address of the local
// node observed by a remote peer, onto the locally known listen addresses
// to yield one or more addresses of the local node that may be publicly
// reachable.
//
// I.e. self method incorporates the view of other peers into the listen
// addresses seen by the local node to account for possible IP and port
// mappings performed by intermediate network devices in an effort to
// obtain addresses for the local peer that are also reachable for peers
// other than the peer who reported the `observed_addr`.
//
// The translation is transport-specific. See [`Transport::address_translation`].
let translated_addresses = {
let mut addrs : Vec < _ > = self
2022-07-04 04:16:57 +02:00
. listened_addrs
. values ( )
. flatten ( )
. filter_map ( | server | self . transport . address_translation ( server , & address ) )
2022-05-05 20:15:24 +02:00
. collect ( ) ;
// remove duplicates
addrs . sort_unstable ( ) ;
addrs . dedup ( ) ;
addrs
} ;
for addr in translated_addresses {
self . add_external_address ( addr , score ) ;
2019-04-10 10:29:21 +02:00
}
2022-05-05 20:15:24 +02:00
}
NetworkBehaviourAction ::CloseConnection {
peer_id ,
connection ,
} = > match connection {
CloseConnection ::One ( connection_id ) = > {
if let Some ( conn ) = self . pool . get_established ( connection_id ) {
conn . start_close ( ) ;
2020-03-23 20:31:38 +10:00
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
CloseConnection ::All = > {
self . pool . disconnect ( peer_id ) ;
}
} ,
}
None
}
/// Internal function used by everything event-related.
///
/// Polls the `Swarm` for the next event.
fn poll_next_event (
mut self : Pin < & mut Self > ,
cx : & mut Context < '_ > ,
) -> Poll < SwarmEvent < TBehaviour ::OutEvent , THandlerErr < TBehaviour > > > {
// We use a `this` variable because the compiler can't mutably borrow multiple times
// across a `Deref`.
let this = & mut * self ;
2019-04-04 12:25:42 -03:00
2022-05-05 20:15:24 +02:00
// This loop polls the components below in a prioritized order.
//
// 1. [`NetworkBehaviour`]
// 2. Connection [`Pool`]
// 3. [`ListenersStream`]
//
// (1) is polled before (2) to prioritize local work over work coming from a remote.
//
// (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections.
loop {
match this . pending_event . take ( ) {
// Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous
// iteration to the connection handler(s).
Some ( ( peer_id , handler , event ) ) = > match handler {
2022-02-13 21:57:38 +01:00
PendingNotifyHandler ::One ( conn_id ) = > {
2022-05-05 20:15:24 +02:00
match this . pool . get_established ( conn_id ) {
2022-11-03 05:47:00 +11:00
Some ( conn ) = > match notify_one ( conn , event , cx ) {
2022-05-05 20:15:24 +02:00
None = > continue ,
Some ( event ) = > {
this . pending_event = Some ( ( peer_id , handler , event ) ) ;
2021-10-26 22:23:55 +02:00
}
2022-05-05 20:15:24 +02:00
} ,
None = > continue ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
}
2019-07-09 16:47:24 +02:00
}
2022-02-13 21:57:38 +01:00
PendingNotifyHandler ::Any ( ids ) = > {
2022-05-05 20:15:24 +02:00
match notify_any ::< _ , _ , TBehaviour > ( ids , & mut this . pool , event , cx ) {
None = > continue ,
Some ( ( event , ids ) ) = > {
let handler = PendingNotifyHandler ::Any ( ids ) ;
this . pending_event = Some ( ( peer_id , handler , event ) ) ;
2022-02-13 21:57:38 +01:00
}
}
}
2022-05-05 20:15:24 +02:00
} ,
// No pending event. Allow the [`NetworkBehaviour`] to make progress.
None = > {
let behaviour_poll = {
let mut parameters = SwarmPollParameters {
local_peer_id : & this . local_peer_id ,
supported_protocols : & this . supported_protocols ,
2022-07-04 04:16:57 +02:00
listened_addrs : this . listened_addrs . values ( ) . flatten ( ) . collect ( ) ,
2022-05-05 20:15:24 +02:00
external_addrs : & this . external_addrs ,
} ;
this . behaviour . poll ( cx , & mut parameters )
} ;
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
2022-05-05 20:15:24 +02:00
match behaviour_poll {
Poll ::Pending = > { }
Poll ::Ready ( behaviour_event ) = > {
if let Some ( swarm_event ) = this . handle_behaviour_event ( behaviour_event )
{
return Poll ::Ready ( swarm_event ) ;
}
2019-04-04 12:25:42 -03:00
2022-05-05 20:15:24 +02:00
continue ;
2021-08-31 17:00:51 +02:00
}
2019-04-18 19:17:14 +03:00
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
}
2022-02-13 21:57:38 +01:00
2022-05-05 20:15:24 +02:00
// Poll the known peers.
match this . pool . poll ( cx ) {
Poll ::Pending = > { }
Poll ::Ready ( pool_event ) = > {
if let Some ( swarm_event ) = this . handle_pool_event ( pool_event ) {
return Poll ::Ready ( swarm_event ) ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
continue ;
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
} ;
// Poll the listener(s) for new connections.
2022-07-04 04:16:57 +02:00
match Pin ::new ( & mut this . transport ) . poll ( cx ) {
2022-05-05 20:15:24 +02:00
Poll ::Pending = > { }
2022-07-04 04:16:57 +02:00
Poll ::Ready ( transport_event ) = > {
if let Some ( swarm_event ) = this . handle_transport_event ( transport_event ) {
2022-05-05 20:15:24 +02:00
return Poll ::Ready ( swarm_event ) ;
2022-02-13 21:57:38 +01:00
}
2022-05-05 20:15:24 +02:00
continue ;
}
2019-04-04 12:25:42 -03:00
}
2022-05-05 20:15:24 +02:00
return Poll ::Pending ;
2019-04-04 12:25:42 -03:00
}
}
}
2020-12-17 11:01:45 +01:00
/// Connection to notify of a pending event.
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
///
2020-12-17 11:01:45 +01:00
/// The connection IDs out of which to notify one of an event are captured at
/// the time the behaviour emits the event, in order not to forward the event to
/// a new connection which the behaviour may not have been aware of at the time
/// it issued the request for sending it.
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
enum PendingNotifyHandler {
One ( ConnectionId ) ,
Any ( SmallVec < [ ConnectionId ; 10 ] > ) ,
}
/// Notify a single connection of an event.
///
/// Returns `Some` with the given event if the connection is not currently
/// ready to receive another event, in which case the current task is
/// scheduled to be woken up.
///
/// Returns `None` if the connection is closing or the event has been
/// successfully sent, in either case the event is consumed.
2022-11-03 05:47:00 +11:00
fn notify_one < THandlerInEvent > (
conn : & mut EstablishedConnection < THandlerInEvent > ,
2021-08-09 15:29:58 +02:00
event : THandlerInEvent ,
2020-07-27 20:27:33 +00:00
cx : & mut Context < '_ > ,
2021-08-09 15:29:58 +02:00
) -> Option < THandlerInEvent > {
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
match conn . poll_ready_notify_handler ( cx ) {
Poll ::Pending = > Some ( event ) ,
Poll ::Ready ( Err ( ( ) ) ) = > None , // connection is closing
Poll ::Ready ( Ok ( ( ) ) ) = > {
// Can now only fail if connection is closing.
let _ = conn . notify_handler ( event ) ;
None
}
}
}
/// Notify any one of a given list of connections of a peer of an event.
///
/// Returns `Some` with the given event and a new list of connections if
/// none of the given connections was able to receive the event but at
/// least one of them is not closing, in which case the current task
/// is scheduled to be woken up. The returned connections are those which
/// may still become ready to receive another event.
///
/// Returns `None` if either all connections are closing or the event
/// was successfully sent to a handler, in either case the event is consumed.
2022-02-13 21:57:38 +01:00
fn notify_any < TTrans , THandler , TBehaviour > (
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
ids : SmallVec < [ ConnectionId ; 10 ] > ,
2022-02-13 21:57:38 +01:00
pool : & mut Pool < THandler , TTrans > ,
2021-08-11 12:41:28 +02:00
event : THandlerInEvent < TBehaviour > ,
2020-07-27 20:27:33 +00:00
cx : & mut Context < '_ > ,
2021-08-11 12:41:28 +02:00
) -> Option < ( THandlerInEvent < TBehaviour > , SmallVec < [ ConnectionId ; 10 ] > ) >
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
where
TTrans : Transport ,
2021-10-14 18:05:07 +02:00
TTrans ::Error : Send + 'static ,
2021-08-11 12:41:28 +02:00
TBehaviour : NetworkBehaviour ,
2022-02-21 13:32:24 +01:00
THandler : IntoConnectionHandler ,
THandler ::Handler : ConnectionHandler <
2021-08-11 12:41:28 +02:00
InEvent = THandlerInEvent < TBehaviour > ,
OutEvent = THandlerOutEvent < TBehaviour > ,
2021-08-11 13:12:12 +02:00
> ,
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
{
let mut pending = SmallVec ::new ( ) ;
let mut event = Some ( event ) ; // (1)
for id in ids . into_iter ( ) {
2022-11-03 05:47:00 +11:00
if let Some ( conn ) = pool . get_established ( id ) {
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
match conn . poll_ready_notify_handler ( cx ) {
Poll ::Pending = > pending . push ( id ) ,
Poll ::Ready ( Err ( ( ) ) ) = > { } // connection is closing
Poll ::Ready ( Ok ( ( ) ) ) = > {
let e = event . take ( ) . expect ( " by (1),(2) " ) ;
if let Err ( e ) = conn . notify_handler ( e ) {
event = Some ( e ) // (2)
} else {
break ;
}
}
}
}
}
event . and_then ( | e | {
if ! pending . is_empty ( ) {
Some ( ( e , pending ) )
} else {
None
2021-08-11 13:12:12 +02:00
}
Multiple connections per peer (#1440)
* Allow multiple connections per peer in libp2p-core.
Instead of trying to enforce a single connection per peer,
which involves quite a bit of additional complexity e.g.
to prioritise simultaneously opened connections and can
have other undesirable consequences [1], we now
make multiple connections per peer a feature.
The gist of these changes is as follows:
The concept of a "node" with an implicit 1-1 correspondence
to a connection has been replaced with the "first-class"
concept of a "connection". The code from `src/nodes` has moved
(with varying degrees of modification) to `src/connection`.
A `HandledNode` has become a `Connection`, a `NodeHandler` a
`ConnectionHandler`, the `CollectionStream` was the basis for
the new `connection::Pool`, and so forth.
Conceptually, a `Network` contains a `connection::Pool` which
in turn internally employs the `connection::Manager` for
handling the background `connection::manager::Task`s, one
per connection, as before. These are all considered implementation
details. On the public API, `Peer`s are managed as before through
the `Network`, except now the API has changed with the shift of focus
to (potentially multiple) connections per peer. The `NetworkEvent`s have
accordingly also undergone changes.
The Swarm APIs remain largely unchanged, except for the fact that
`inject_replaced` is no longer called. It may now practically happen
that multiple `ProtocolsHandler`s are associated with a single
`NetworkBehaviour`, one per connection. If implementations of
`NetworkBehaviour` rely somehow on communicating with exactly
one `ProtocolsHandler`, this may cause issues, but it is unlikely.
[1]: https://github.com/paritytech/substrate/issues/4272
* Fix intra-rustdoc links.
* Update core/src/connection/pool.rs
Co-Authored-By: Max Inden <mail@max-inden.de>
* Address some review feedback and fix doc links.
* Allow responses to be sent on the same connection.
* Remove unnecessary remainders of inject_replaced.
* Update swarm/src/behaviour.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update swarm/src/lib.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/manager.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Update core/src/connection/pool.rs
Co-Authored-By: Pierre Krieger <pierre.krieger1708@gmail.com>
* Incorporate more review feedback.
* Move module declaration below imports.
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Update core/src/connection/manager.rs
Co-Authored-By: Toralf Wittner <tw@dtex.org>
* Simplify as per review.
* Fix rustoc link.
* Add try_notify_handler and simplify.
* Relocate DialingConnection and DialingAttempt.
For better visibility constraints.
* Small cleanup.
* Small cleanup. More robust EstablishedConnectionIter.
* Clarify semantics of `DialingPeer::connect`.
* Don't call inject_disconnected on InvalidPeerId.
To preserve the previous behavior and ensure calls to
`inject_disconnected` are always paired with calls to
`inject_connected`.
* Provide public ConnectionId constructor.
Mainly needed for testing purposes, e.g. in substrate.
* Move the established connection limit check to the right place.
* Clean up connection error handling.
Separate connection errors into those occuring during
connection setup or upon rejecting a newly established
connection (the `PendingConnectionError`) and those
errors occurring on previously established connections,
i.e. for which a `ConnectionEstablished` event has
been emitted by the connection pool earlier.
* Revert change in log level and clarify an invariant.
* Remove inject_replaced entirely.
* Allow notifying all connection handlers.
Thereby simplify by introducing a new enum `NotifyHandler`,
used with a single constructor `NetworkBehaviourAction::NotifyHandler`.
* Finishing touches.
Small API simplifications and code deduplication.
Some more useful debug logging.
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Toralf Wittner <tw@dtex.org>
2020-03-04 13:49:25 +01:00
} )
}
2021-08-09 15:29:58 +02:00
/// Stream of events returned by [`Swarm`].
2021-06-14 20:41:44 +02:00
///
/// Includes events from the [`NetworkBehaviour`] as well as events about
/// connection and listener status. See [`SwarmEvent`] for details.
///
/// Note: This stream is infinite and it is guaranteed that
/// [`Stream::poll_next`] will never return `Poll::Ready(None)`.
2021-08-09 15:29:58 +02:00
impl < TBehaviour > Stream for Swarm < TBehaviour >
where
TBehaviour : NetworkBehaviour ,
2020-01-07 11:57:00 +01:00
{
2021-08-09 15:29:58 +02:00
type Item = SwarmEvent < TBehaviourOutEvent < TBehaviour > , THandlerErr < TBehaviour > > ;
2020-01-07 11:57:00 +01:00
2020-07-27 20:27:33 +00:00
fn poll_next ( mut self : Pin < & mut Self > , cx : & mut Context < '_ > ) -> Poll < Option < Self ::Item > > {
2021-06-14 20:41:44 +02:00
self . as_mut ( ) . poll_next_event ( cx ) . map ( Some )
2020-01-07 11:57:00 +01:00
}
}
2021-06-14 20:41:44 +02:00
/// The stream of swarm events never terminates, so we can implement fused for it.
2021-08-09 15:29:58 +02:00
impl < TBehaviour > FusedStream for Swarm < TBehaviour >
where
TBehaviour : NetworkBehaviour ,
2020-02-18 10:33:01 +01:00
{
fn is_terminated ( & self ) -> bool {
false
}
}
2019-04-04 12:25:42 -03:00
/// Parameters passed to `poll()`, that the `NetworkBehaviour` has access to.
// TODO: #[derive(Debug)]
2019-06-18 10:23:26 +02:00
pub struct SwarmPollParameters < ' a > {
2019-04-04 12:25:42 -03:00
local_peer_id : & ' a PeerId ,
supported_protocols : & ' a [ Vec < u8 > ] ,
2022-07-04 04:16:57 +02:00
listened_addrs : Vec < & ' a Multiaddr > ,
2019-06-18 10:23:26 +02:00
external_addrs : & ' a Addresses ,
2019-04-04 12:25:42 -03:00
}
2019-06-18 10:23:26 +02:00
impl < ' a > PollParameters for SwarmPollParameters < ' a > {
2022-02-28 10:05:17 +01:00
type SupportedProtocolsIter = std ::iter ::Cloned < std ::slice ::Iter < ' a , std ::vec ::Vec < u8 > > > ;
2022-07-04 04:16:57 +02:00
type ListenedAddressesIter = std ::iter ::Cloned < std ::vec ::IntoIter < & ' a Multiaddr > > ;
2019-06-18 10:23:26 +02:00
type ExternalAddressesIter = AddressIntoIter ;
fn supported_protocols ( & self ) -> Self ::SupportedProtocolsIter {
2022-02-28 10:05:17 +01:00
self . supported_protocols . iter ( ) . cloned ( )
2019-04-04 12:25:42 -03:00
}
2019-06-18 10:23:26 +02:00
fn listened_addresses ( & self ) -> Self ::ListenedAddressesIter {
2022-07-04 04:16:57 +02:00
self . listened_addrs . clone ( ) . into_iter ( ) . cloned ( )
2019-04-04 12:25:42 -03:00
}
2019-06-18 10:23:26 +02:00
fn external_addresses ( & self ) -> Self ::ExternalAddressesIter {
self . external_addrs . clone ( ) . into_iter ( )
2019-04-04 12:25:42 -03:00
}
2019-06-18 10:23:26 +02:00
fn local_peer_id ( & self ) -> & PeerId {
2021-09-14 16:00:05 +03:00
self . local_peer_id
2019-04-04 12:25:42 -03:00
}
}
2022-02-13 21:57:38 +01:00
/// A [`SwarmBuilder`] provides an API for configuring and constructing a [`Swarm`].
2020-10-31 01:51:27 +11:00
pub struct SwarmBuilder < TBehaviour > {
2019-04-04 12:25:42 -03:00
local_peer_id : PeerId ,
2020-10-31 01:51:27 +11:00
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2019-04-04 12:25:42 -03:00
behaviour : TBehaviour ,
2022-02-13 21:57:38 +01:00
pool_config : PoolConfig ,
connection_limits : ConnectionLimits ,
2019-04-04 12:25:42 -03:00
}
2020-10-31 01:51:27 +11:00
impl < TBehaviour > SwarmBuilder < TBehaviour >
2021-08-09 15:29:58 +02:00
where
TBehaviour : NetworkBehaviour ,
2019-04-04 12:25:42 -03:00
{
2020-03-31 15:41:13 +02:00
/// Creates a new `SwarmBuilder` from the given transport, behaviour and
/// local peer ID. The `Swarm` with its underlying `Network` is obtained
/// via [`SwarmBuilder::build`].
2022-11-15 15:26:03 +01:00
#[ deprecated(
since = " 0.41.0 " ,
note = " Use `SwarmBuilder::with_executor` or `SwarmBuilder::without_executor` instead. "
) ]
2020-10-16 16:53:02 +02:00
pub fn new (
2020-10-31 01:51:27 +11:00
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
2020-10-16 16:53:02 +02:00
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
2022-11-15 15:26:03 +01:00
let executor : Option < Box < dyn Executor + Send > > = match ThreadPoolBuilder ::new ( )
. name_prefix ( " libp2p-swarm-task- " )
. create ( )
. ok ( )
{
Some ( tp ) = > Some ( Box ::new ( tp ) ) ,
None = > None ,
} ;
2019-04-04 12:25:42 -03:00
SwarmBuilder {
local_peer_id ,
2021-02-15 11:59:51 +01:00
transport ,
2019-04-04 12:25:42 -03:00
behaviour ,
2022-11-15 15:26:03 +01:00
pool_config : PoolConfig ::new ( executor ) ,
connection_limits : Default ::default ( ) ,
}
}
/// Creates a new [`SwarmBuilder`] from the given transport, behaviour, local peer ID and
/// executor. The `Swarm` with its underlying `Network` is obtained via
/// [`SwarmBuilder::build`].
pub fn with_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
executor : impl Executor + Send + 'static ,
) -> Self {
Self {
local_peer_id ,
transport ,
behaviour ,
pool_config : PoolConfig ::new ( Some ( Box ::new ( executor ) ) ) ,
connection_limits : Default ::default ( ) ,
}
}
/// Creates a new [`SwarmBuilder`] from the given transport, behaviour and local peer ID. The
/// `Swarm` with its underlying `Network` is obtained via [`SwarmBuilder::build`].
///
/// ## ⚠️ Performance warning
/// All connections will be polled on the current task, thus quite bad performance
/// characteristics should be expected. Whenever possible use an executor and
/// [`SwarmBuilder::with_executor`].
pub fn without_executor (
transport : transport ::Boxed < ( PeerId , StreamMuxerBox ) > ,
behaviour : TBehaviour ,
local_peer_id : PeerId ,
) -> Self {
Self {
local_peer_id ,
transport ,
behaviour ,
pool_config : PoolConfig ::new ( None ) ,
2022-02-13 21:57:38 +01:00
connection_limits : Default ::default ( ) ,
2019-04-04 12:25:42 -03:00
}
}
2020-03-31 15:41:13 +02:00
/// Configures the `Executor` to use for spawning background tasks.
///
/// By default, unless another executor has been configured,
2022-11-02 15:09:55 +00:00
/// [`SwarmBuilder::build`] will try to set up a
/// [`ThreadPool`](futures::executor::ThreadPool).
2022-11-15 15:26:03 +01:00
#[ deprecated(since = " 0.41.0 " , note = " Use `SwarmBuilder::with_executor` instead. " ) ]
pub fn executor ( mut self , executor : Box < dyn Executor + Send > ) -> Self {
self . pool_config = self . pool_config . with_executor ( executor ) ;
2019-04-04 12:25:42 -03:00
self
}
2020-05-15 14:40:10 +02:00
/// Configures the number of events from the [`NetworkBehaviour`] in
2022-02-21 13:32:24 +01:00
/// destination to the [`ConnectionHandler`] that can be buffered before
2020-05-15 14:40:10 +02:00
/// the [`Swarm`] has to wait. An individual buffer with this number of
/// events exists for each individual connection.
///
/// The ideal value depends on the executor used, the CPU speed, and the
/// volume of events. If this value is too low, then the [`Swarm`] will
/// be sleeping more often than necessary. Increasing this value increases
/// the overall memory usage.
pub fn notify_handler_buffer_size ( mut self , n : NonZeroUsize ) -> Self {
2022-02-13 21:57:38 +01:00
self . pool_config = self . pool_config . with_notify_handler_buffer_size ( n ) ;
2020-05-15 14:40:10 +02:00
self
}
2022-02-21 13:32:24 +01:00
/// Configures the number of extra events from the [`ConnectionHandler`] in
2020-05-15 14:40:10 +02:00
/// destination to the [`NetworkBehaviour`] that can be buffered before
2022-02-21 13:32:24 +01:00
/// the [`ConnectionHandler`] has to go to sleep.
2020-05-15 14:40:10 +02:00
///
2022-02-21 13:32:24 +01:00
/// There exists a buffer of events received from [`ConnectionHandler`]s
2020-05-15 14:40:10 +02:00
/// that the [`NetworkBehaviour`] has yet to process. This buffer is
2022-02-21 13:32:24 +01:00
/// shared between all instances of [`ConnectionHandler`]. Each instance of
/// [`ConnectionHandler`] is guaranteed one slot in this buffer, meaning
2020-05-15 14:40:10 +02:00
/// that delivering an event for the first time is guaranteed to be
/// instantaneous. Any extra event delivery, however, must wait for that
/// first event to be delivered or for an "extra slot" to be available.
///
/// This option configures the number of such "extra slots" in this
/// shared buffer. These extra slots are assigned in a first-come,
/// first-served basis.
///
/// The ideal value depends on the executor used, the CPU speed, the
/// average number of connections, and the volume of events. If this value
2022-02-21 13:32:24 +01:00
/// is too low, then the [`ConnectionHandler`]s will be sleeping more often
2020-05-15 14:40:10 +02:00
/// than necessary. Increasing this value increases the overall memory
/// usage, and more importantly the latency between the moment when an
/// event is emitted and the moment when it is received by the
/// [`NetworkBehaviour`].
pub fn connection_event_buffer_size ( mut self , n : usize ) -> Self {
2022-02-13 21:57:38 +01:00
self . pool_config = self . pool_config . with_connection_event_buffer_size ( n ) ;
2020-05-15 14:40:10 +02:00
self
}
2021-10-14 18:05:07 +02:00
/// Number of addresses concurrently dialed for a single outbound connection attempt.
pub fn dial_concurrency_factor ( mut self , factor : NonZeroU8 ) -> Self {
2022-02-13 21:57:38 +01:00
self . pool_config = self . pool_config . with_dial_concurrency_factor ( factor ) ;
2021-10-14 18:05:07 +02:00
self
}
2020-11-23 17:22:15 +01:00
/// Configures the connection limits.
pub fn connection_limits ( mut self , limits : ConnectionLimits ) -> Self {
2022-02-13 21:57:38 +01:00
self . connection_limits = limits ;
2020-01-20 14:18:35 +01:00
self
}
2020-11-25 14:26:49 +01:00
/// Configures an override for the substream upgrade protocol to use.
///
/// The subtream upgrade protocol is the multistream-select protocol
/// used for protocol negotiation on substreams. Since a listener
/// supports all existing versions, the choice of upgrade protocol
/// only effects the "dialer", i.e. the peer opening a substream.
///
/// > **Note**: If configured, specific upgrade protocols for
/// > individual [`SubstreamProtocol`]s emitted by the `NetworkBehaviour`
/// > are ignored.
pub fn substream_upgrade_protocol_override ( mut self , v : libp2p_core ::upgrade ::Version ) -> Self {
2022-02-18 11:32:58 +01:00
self . pool_config = self . pool_config . with_substream_upgrade_protocol_override ( v ) ;
2020-11-25 14:26:49 +01:00
self
}
2022-10-03 19:01:45 -06:00
/// The maximum number of inbound streams concurrently negotiating on a
/// connection. New inbound streams exceeding the limit are dropped and thus
/// reset.
2022-06-08 11:48:46 +02:00
///
2022-10-03 19:01:45 -06:00
/// Note: This only enforces a limit on the number of concurrently
/// negotiating inbound streams. The total number of inbound streams on a
/// connection is the sum of negotiating and negotiated streams. A limit on
/// the total number of streams can be enforced at the
/// [`StreamMuxerBox`](libp2p_core::muxing::StreamMuxerBox) level.
2022-06-08 11:48:46 +02:00
pub fn max_negotiating_inbound_streams ( mut self , v : usize ) -> Self {
self . pool_config = self . pool_config . with_max_negotiating_inbound_streams ( v ) ;
self
}
2020-03-31 15:41:13 +02:00
/// Builds a `Swarm` with the current configuration.
2020-10-31 01:51:27 +11:00
pub fn build ( mut self ) -> Swarm < TBehaviour > {
2019-04-04 12:25:42 -03:00
let supported_protocols = self
. behaviour
. new_handler ( )
2019-05-08 20:23:28 +02:00
. inbound_protocol ( )
2019-04-04 12:25:42 -03:00
. protocol_info ( )
. into_iter ( )
. map ( | info | info . protocol_name ( ) . to_vec ( ) )
. collect ( ) ;
2021-08-09 15:29:58 +02:00
Swarm {
2022-02-13 21:57:38 +01:00
local_peer_id : self . local_peer_id ,
2022-07-04 04:16:57 +02:00
transport : self . transport ,
2022-11-15 15:26:03 +01:00
pool : Pool ::new ( self . local_peer_id , self . pool_config , self . connection_limits ) ,
2019-04-04 12:25:42 -03:00
behaviour : self . behaviour ,
supported_protocols ,
2022-07-04 04:16:57 +02:00
listened_addrs : HashMap ::new ( ) ,
2019-05-02 19:46:27 +02:00
external_addrs : Addresses ::default ( ) ,
2019-04-18 19:17:14 +03:00
banned_peers : HashSet ::new ( ) ,
2021-11-26 10:48:12 -05:00
banned_peer_connections : HashSet ::new ( ) ,
2020-11-25 14:26:49 +01:00
pending_event : None ,
2019-04-04 12:25:42 -03:00
}
}
}
2021-08-31 17:00:51 +02:00
/// The possible failures of dialing.
2021-10-14 18:05:07 +02:00
#[ derive(Debug) ]
2020-05-12 13:10:18 +02:00
pub enum DialError {
2020-08-04 11:30:09 +02:00
/// The peer is currently banned.
Banned ,
2020-05-12 13:10:18 +02:00
/// The configured limit for simultaneous outgoing connections
/// has been reached.
ConnectionLimit ( ConnectionLimit ) ,
2021-08-31 17:00:51 +02:00
/// The peer being dialed is the local peer and thus the dial was aborted.
LocalPeerId ,
2020-05-12 13:10:18 +02:00
/// [`NetworkBehaviour::addresses_of_peer`] returned no addresses
/// for the peer to dial.
NoAddresses ,
2021-11-15 14:17:23 +01:00
/// The provided [`dial_opts::PeerCondition`] evaluated to false and thus
/// the dial was aborted.
DialPeerConditionFalse ( dial_opts ::PeerCondition ) ,
2021-10-14 18:05:07 +02:00
/// Pending connection attempt has been aborted.
Aborted ,
2022-01-18 21:21:11 +01:00
/// The provided peer identity is invalid.
InvalidPeerId ( Multihash ) ,
/// The peer identity obtained on the connection did not match the one that was expected.
WrongPeerId {
obtained : PeerId ,
endpoint : ConnectedPoint ,
} ,
2021-10-14 18:05:07 +02:00
/// An I/O error occurred on the connection.
ConnectionIo ( io ::Error ) ,
/// An error occurred while negotiating the transport protocol(s) on a connection.
Transport ( Vec < ( Multiaddr , TransportError < io ::Error > ) > ) ,
2020-05-12 13:10:18 +02:00
}
2021-10-14 18:05:07 +02:00
impl From < PendingOutboundConnectionError < io ::Error > > for DialError {
fn from ( error : PendingOutboundConnectionError < io ::Error > ) -> Self {
match error {
PendingConnectionError ::ConnectionLimit ( limit ) = > DialError ::ConnectionLimit ( limit ) ,
PendingConnectionError ::Aborted = > DialError ::Aborted ,
2022-01-18 21:21:11 +01:00
PendingConnectionError ::WrongPeerId { obtained , endpoint } = > {
DialError ::WrongPeerId { obtained , endpoint }
}
2021-10-14 18:05:07 +02:00
PendingConnectionError ::IO ( e ) = > DialError ::ConnectionIo ( e ) ,
PendingConnectionError ::Transport ( e ) = > DialError ::Transport ( e ) ,
}
}
}
2020-05-12 13:10:18 +02:00
impl fmt ::Display for DialError {
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
match self {
DialError ::ConnectionLimit ( err ) = > write! ( f , " Dial error: {} " , err ) ,
2020-08-04 11:30:09 +02:00
DialError ::NoAddresses = > write! ( f , " Dial error: no addresses for peer. " ) ,
2021-08-31 17:00:51 +02:00
DialError ::LocalPeerId = > write! ( f , " Dial error: tried to dial local peer id. " ) ,
2020-08-04 11:30:09 +02:00
DialError ::Banned = > write! ( f , " Dial error: peer is banned. " ) ,
2021-08-31 17:00:51 +02:00
DialError ::DialPeerConditionFalse ( c ) = > {
write! (
f ,
" Dial error: condition {:?} for dialing peer was false. " ,
c
)
}
2021-10-14 18:05:07 +02:00
DialError ::Aborted = > write! (
f ,
" Dial error: Pending connection attempt has been aborted. "
) ,
2022-11-23 11:51:47 +11:00
DialError ::InvalidPeerId ( multihash ) = > {
write! ( f , " Dial error: multihash {:?} is not a PeerId " , multihash )
}
DialError ::WrongPeerId { obtained , endpoint } = > write! (
f ,
" Dial error: Unexpected peer ID {} at {:?}. " ,
obtained , endpoint
) ,
2021-10-14 18:05:07 +02:00
DialError ::ConnectionIo ( e ) = > write! (
f ,
2022-11-23 11:51:47 +11:00
" Dial error: An I/O error occurred on the connection: {:?}. " ,
e
2021-10-14 18:05:07 +02:00
) ,
2022-11-23 11:51:47 +11:00
DialError ::Transport ( errors ) = > {
write! ( f , " Failed to negotiate transport protocol(s): [ " ) ? ;
for ( addr , error ) in errors {
write! ( f , " ({addr} " ) ? ;
print_error_chain ( f , error ) ? ;
write! ( f , " ) " ) ? ;
}
write! ( f , " ] " ) ? ;
Ok ( ( ) )
}
2020-05-12 13:10:18 +02:00
}
}
}
2022-11-23 11:51:47 +11:00
fn print_error_chain ( f : & mut fmt ::Formatter < '_ > , e : & dyn error ::Error ) -> fmt ::Result {
write! ( f , " : {e} " ) ? ;
if let Some ( source ) = e . source ( ) {
print_error_chain ( f , source ) ? ;
}
Ok ( ( ) )
}
2020-05-12 13:10:18 +02:00
impl error ::Error for DialError {
fn source ( & self ) -> Option < & ( dyn error ::Error + 'static ) > {
match self {
DialError ::ConnectionLimit ( err ) = > Some ( err ) ,
2021-08-31 17:00:51 +02:00
DialError ::LocalPeerId = > None ,
2020-08-04 11:30:09 +02:00
DialError ::NoAddresses = > None ,
DialError ::Banned = > None ,
2021-08-31 17:00:51 +02:00
DialError ::DialPeerConditionFalse ( _ ) = > None ,
2021-10-14 18:05:07 +02:00
DialError ::Aborted = > None ,
2022-01-18 21:21:11 +01:00
DialError ::InvalidPeerId { .. } = > None ,
DialError ::WrongPeerId { .. } = > None ,
2021-10-14 18:05:07 +02:00
DialError ::ConnectionIo ( _ ) = > None ,
DialError ::Transport ( _ ) = > None ,
2020-05-12 13:10:18 +02:00
}
}
}
2022-02-13 21:57:38 +01:00
/// Information about the connections obtained by [`Swarm::network_info()`].
#[ derive(Clone, Debug) ]
pub struct NetworkInfo {
/// The total number of connected peers.
num_peers : usize ,
/// Counters of ongoing network connections.
connection_counters : ConnectionCounters ,
}
impl NetworkInfo {
/// The number of connected peers, i.e. peers with whom at least
/// one established connection exists.
pub fn num_peers ( & self ) -> usize {
self . num_peers
}
/// Gets counters for ongoing network connections.
pub fn connection_counters ( & self ) -> & ConnectionCounters {
& self . connection_counters
}
}
2022-04-06 20:23:16 +02:00
/// Ensures a given `Multiaddr` is a `/p2p/...` address for the given peer.
///
/// If the given address is already a `p2p` address for the given peer,
/// i.e. the last encapsulated protocol is `/p2p/<peer-id>`, this is a no-op.
///
/// If the given address is already a `p2p` address for a different peer
/// than the one given, the given `Multiaddr` is returned as an `Err`.
///
/// If the given address is not yet a `p2p` address for the given peer,
/// the `/p2p/<peer-id>` protocol is appended to the returned address.
fn p2p_addr ( peer : Option < PeerId > , addr : Multiaddr ) -> Result < Multiaddr , Multiaddr > {
let peer = match peer {
Some ( p ) = > p ,
None = > return Ok ( addr ) ,
} ;
if let Some ( Protocol ::P2p ( hash ) ) = addr . iter ( ) . last ( ) {
if & hash ! = peer . as_ref ( ) {
return Err ( addr ) ;
}
Ok ( addr )
} else {
Ok ( addr . with ( Protocol ::P2p ( peer . into ( ) ) ) )
}
}
2020-02-07 16:29:30 +01:00
#[ cfg(test) ]
mod tests {
2020-08-04 11:30:09 +02:00
use super ::* ;
use crate ::test ::{ CallTraceBehaviour , MockBehaviour } ;
2022-02-13 21:57:38 +01:00
use futures ::executor ::block_on ;
2022-11-15 15:26:03 +01:00
use futures ::executor ::ThreadPool ;
2022-02-13 21:57:38 +01:00
use futures ::future ::poll_fn ;
use futures ::future ::Either ;
use futures ::{ executor , future , ready } ;
2022-11-23 11:51:47 +11:00
use libp2p_core ::either ::EitherError ;
2022-02-13 21:57:38 +01:00
use libp2p_core ::multiaddr ::multiaddr ;
2022-11-23 11:51:47 +11:00
use libp2p_core ::transport ::memory ::MemoryTransportError ;
2022-07-04 04:16:57 +02:00
use libp2p_core ::transport ::TransportEvent ;
2022-11-13 10:59:14 +11:00
use libp2p_core ::{ identity , multiaddr , transport , upgrade } ;
2022-11-23 11:51:47 +11:00
use libp2p_core ::{ Endpoint , UpgradeError } ;
2022-11-13 10:59:14 +11:00
use libp2p_plaintext as plaintext ;
use libp2p_yamux as yamux ;
2022-09-22 12:48:32 +04:00
use quickcheck ::* ;
2022-11-23 11:51:47 +11:00
use void ::Void ;
2019-04-04 12:25:42 -03:00
2021-07-03 00:35:51 +07:00
// Test execution state.
// Connection => Disconnecting => Connecting.
enum State {
Connecting ,
Disconnecting ,
}
2022-02-13 21:57:38 +01:00
fn new_test_swarm < T , O > (
handler_proto : T ,
) -> SwarmBuilder < CallTraceBehaviour < MockBehaviour < T , O > > >
2020-08-04 11:30:09 +02:00
where
2022-02-21 13:32:24 +01:00
T : ConnectionHandler + Clone ,
2020-08-04 11:30:09 +02:00
T ::OutEvent : Clone ,
O : Send + 'static ,
{
2020-09-07 12:13:10 +02:00
let id_keys = identity ::Keypair ::generate_ed25519 ( ) ;
2021-08-31 17:00:51 +02:00
let local_public_key = id_keys . public ( ) ;
2020-09-07 12:13:10 +02:00
let transport = transport ::MemoryTransport ::default ( )
2020-08-04 11:30:09 +02:00
. upgrade ( upgrade ::Version ::V1 )
2021-08-31 17:00:51 +02:00
. authenticate ( plaintext ::PlainText2Config {
local_public_key : local_public_key . clone ( ) ,
} )
. multiplex ( yamux ::YamuxConfig ::default ( ) )
2020-10-16 16:53:02 +02:00
. boxed ( ) ;
2020-09-07 12:13:10 +02:00
let behaviour = CallTraceBehaviour ::new ( MockBehaviour ::new ( handler_proto ) ) ;
2022-11-15 15:26:03 +01:00
match ThreadPool ::new ( ) . ok ( ) {
Some ( tp ) = > {
SwarmBuilder ::with_executor ( transport , behaviour , local_public_key . into ( ) , tp )
}
None = > SwarmBuilder ::without_executor ( transport , behaviour , local_public_key . into ( ) ) ,
}
2020-08-04 11:30:09 +02:00
}
2021-07-03 00:35:51 +07:00
fn swarms_connected < TBehaviour > (
swarm1 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
swarm2 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
num_connections : usize ,
) -> bool
where
TBehaviour : NetworkBehaviour ,
2022-02-21 13:32:24 +01:00
< < TBehaviour ::ConnectionHandler as IntoConnectionHandler > ::Handler as ConnectionHandler > ::OutEvent : Clone ,
2021-07-03 00:35:51 +07:00
{
2022-03-02 12:10:57 +01:00
swarm1
. behaviour ( )
. num_connections_to_peer ( * swarm2 . local_peer_id ( ) )
= = num_connections
& & swarm2
. behaviour ( )
. num_connections_to_peer ( * swarm1 . local_peer_id ( ) )
= = num_connections
& & swarm1 . is_connected ( swarm2 . local_peer_id ( ) )
& & swarm2 . is_connected ( swarm1 . local_peer_id ( ) )
2021-07-03 00:35:51 +07:00
}
fn swarms_disconnected < TBehaviour : NetworkBehaviour > (
swarm1 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
swarm2 : & Swarm < CallTraceBehaviour < TBehaviour > > ,
) -> bool
where
TBehaviour : NetworkBehaviour ,
2022-02-21 13:32:24 +01:00
< < TBehaviour ::ConnectionHandler as IntoConnectionHandler > ::Handler as ConnectionHandler > ::OutEvent : Clone
2021-07-03 00:35:51 +07:00
{
2022-03-02 12:10:57 +01:00
swarm1
. behaviour ( )
. num_connections_to_peer ( * swarm2 . local_peer_id ( ) )
= = 0
& & swarm2
. behaviour ( )
. num_connections_to_peer ( * swarm1 . local_peer_id ( ) )
= = 0
& & ! swarm1 . is_connected ( swarm2 . local_peer_id ( ) )
& & ! swarm2 . is_connected ( swarm1 . local_peer_id ( ) )
2021-07-03 00:35:51 +07:00
}
/// Establishes multiple connections between two peers,
2020-08-04 11:30:09 +02:00
/// after which one peer bans the other.
///
/// The test expects both behaviours to be notified via pairs of
2022-08-10 12:50:31 +04:30
/// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`]
/// calls while unbanned.
2021-11-26 10:48:12 -05:00
///
/// While the ban is in effect, further dials occur. For these connections no
2022-08-10 12:50:31 +04:30
/// [`NetworkBehaviour::inject_connection_established`], [`NetworkBehaviour::inject_connection_closed`]
/// calls should be registered.
2020-08-04 11:30:09 +02:00
#[ test ]
fn test_connect_disconnect_ban ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2020-08-04 11:30:09 +02:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2020-08-04 11:30:09 +02:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2022-10-04 18:24:38 +11:00
swarm1 . listen_on ( addr1 ) . unwrap ( ) ;
2021-11-26 10:48:12 -05:00
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2020-08-04 11:30:09 +02:00
2021-03-18 14:55:33 +01:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
2020-08-04 11:30:09 +02:00
2021-11-26 10:48:12 -05:00
enum Stage {
/// Waiting for the peers to connect. Banning has not occurred.
Connecting ,
/// Ban occurred.
Banned ,
// Ban is in place and a dial is ongoing.
BannedDial ,
// Mid-ban dial was registered and the peer was unbanned.
Unbanned ,
// There are dial attempts ongoing for the no longer banned peers.
Reconnecting ,
}
2020-08-04 11:30:09 +02:00
let num_connections = 10 ;
2021-07-03 00:35:51 +07:00
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2020-08-04 11:30:09 +02:00
}
2021-11-26 10:48:12 -05:00
let mut s1_expected_conns = num_connections ;
let mut s2_expected_conns = num_connections ;
let mut stage = Stage ::Connecting ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match stage {
Stage ::Connecting = > {
if swarm1 . behaviour . assert_connected ( s1_expected_conns , 1 )
& & swarm2 . behaviour . assert_connected ( s2_expected_conns , 1 )
{
// Setup to test that already established connections are correctly closed
// and reported as such after the peer is banned.
swarm2 . ban_peer_id ( swarm1_id ) ;
stage = Stage ::Banned ;
2020-08-04 11:30:09 +02:00
}
2021-11-26 10:48:12 -05:00
}
Stage ::Banned = > {
if swarm1 . behaviour . assert_disconnected ( s1_expected_conns , 1 )
& & swarm2 . behaviour . assert_disconnected ( s2_expected_conns , 1 )
{
// Setup to test that new connections of banned peers are not reported.
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
s1_expected_conns + = 1 ;
stage = Stage ::BannedDial ;
2021-07-03 00:35:51 +07:00
}
}
2021-11-26 10:48:12 -05:00
Stage ::BannedDial = > {
if swarm2 . network_info ( ) . num_peers ( ) = = 1 {
// The banned connection was established. Check that it was not reported to
// the behaviour of the banning swarm.
assert_eq! (
2022-11-17 09:28:40 +00:00
swarm2 . behaviour . on_connection_established . len ( ) , s2_expected_conns ,
2021-11-26 10:48:12 -05:00
" No additional closed connections should be reported for the banned peer "
) ;
2021-07-03 00:35:51 +07:00
2021-11-26 10:48:12 -05:00
// Setup to test that the banned connection is not reported upon closing
// even if the peer is unbanned.
swarm2 . unban_peer_id ( swarm1_id ) ;
stage = Stage ::Unbanned ;
}
2021-07-03 00:35:51 +07:00
}
2021-11-26 10:48:12 -05:00
Stage ::Unbanned = > {
if swarm2 . network_info ( ) . num_peers ( ) = = 0 {
// The banned connection has closed. Check that it was not reported.
assert_eq! (
2022-11-17 09:28:40 +00:00
swarm2 . behaviour . on_connection_closed . len ( ) , s2_expected_conns ,
2021-11-26 10:48:12 -05:00
" No additional closed connections should be reported for the banned peer "
) ;
assert! ( swarm2 . banned_peer_connections . is_empty ( ) ) ;
// Setup to test that a ban lifted does not affect future connections.
for _ in 0 .. num_connections {
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
}
s1_expected_conns + = num_connections ;
s2_expected_conns + = num_connections ;
stage = Stage ::Reconnecting ;
}
}
Stage ::Reconnecting = > {
2022-11-17 09:28:40 +00:00
if swarm1 . behaviour . on_connection_established . len ( ) = = s1_expected_conns
2021-11-26 10:48:12 -05:00
& & swarm2 . behaviour . assert_connected ( s2_expected_conns , 2 )
{
return Poll ::Ready ( ( ) ) ;
}
}
}
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
2021-07-03 00:35:51 +07:00
}
} ) )
}
/// Establishes multiple connections between two peers,
2021-08-09 15:29:58 +02:00
/// after which one peer disconnects the other using [`Swarm::disconnect_peer_id`].
2021-07-03 00:35:51 +07:00
///
/// The test expects both behaviours to be notified via pairs of
2022-08-10 12:50:31 +04:30
/// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls.
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_swarm_disconnect ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2021-11-26 10:48:12 -05:00
swarm1 . listen_on ( addr1 . clone ( ) ) . unwrap ( ) ;
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let mut reconnected = false ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
2021-08-11 13:12:12 +02:00
swarm2
2021-11-26 10:48:12 -05:00
. disconnect_peer_id ( swarm1_id )
2021-07-03 00:35:51 +07:00
. expect ( " Error disconnecting " ) ;
state = State ::Disconnecting ;
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
2022-03-02 12:10:57 +01:00
if swarms_disconnected ( & swarm1 , & swarm2 ) {
2021-07-03 00:35:51 +07:00
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
reconnected = true ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm2 . dial ( addr1 . clone ( ) ) . unwrap ( ) ;
2020-08-04 11:30:09 +02:00
}
2021-07-03 00:35:51 +07:00
state = State ::Connecting ;
2020-08-04 11:30:09 +02:00
}
}
2021-08-11 13:12:12 +02:00
}
2020-08-04 11:30:09 +02:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
2021-07-03 00:35:51 +07:00
/// Establishes multiple connections between two peers,
/// after which one peer disconnects the other
/// using [`NetworkBehaviourAction::CloseConnection`] returned by a [`NetworkBehaviour`].
///
/// The test expects both behaviours to be notified via pairs of
2022-08-10 12:50:31 +04:30
/// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls.
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_behaviour_disconnect_all ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2021-11-26 10:48:12 -05:00
swarm1 . listen_on ( addr1 . clone ( ) ) . unwrap ( ) ;
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let mut reconnected = false ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
if reconnected {
return Poll ::Ready ( ( ) ) ;
}
swarm2 . behaviour . inner ( ) . next_action . replace (
NetworkBehaviourAction ::CloseConnection {
2021-11-26 10:48:12 -05:00
peer_id : swarm1_id ,
2021-07-03 00:35:51 +07:00
connection : CloseConnection ::All ,
2021-08-11 13:12:12 +02:00
} ,
) ;
2021-07-03 00:35:51 +07:00
state = State ::Disconnecting ;
2022-03-02 12:10:57 +01:00
continue ;
2021-07-03 00:35:51 +07:00
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
2022-03-02 12:10:57 +01:00
if swarms_disconnected ( & swarm1 , & swarm2 ) {
2021-07-03 00:35:51 +07:00
reconnected = true ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm2 . dial ( addr1 . clone ( ) ) . unwrap ( ) ;
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
state = State ::Connecting ;
2022-03-02 12:10:57 +01:00
continue ;
2021-07-03 00:35:51 +07:00
}
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
/// Establishes multiple connections between two peers,
/// after which one peer closes a single connection
/// using [`NetworkBehaviourAction::CloseConnection`] returned by a [`NetworkBehaviour`].
///
/// The test expects both behaviours to be notified via pairs of
2022-08-10 12:50:31 +04:30
/// [`NetworkBehaviour::inject_connection_established`] / [`NetworkBehaviour::inject_connection_closed`] calls.
2021-07-03 00:35:51 +07:00
#[ test ]
fn test_behaviour_disconnect_one ( ) {
// Since the test does not try to open any substreams, we can
// use the dummy protocols handler.
2022-10-06 03:50:11 +11:00
let handler_proto = keep_alive ::ConnectionHandler ;
2021-07-03 00:35:51 +07:00
2022-02-13 21:57:38 +01:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( handler_proto . clone ( ) ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( handler_proto ) . build ( ) ;
2021-07-03 00:35:51 +07:00
let addr1 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
let addr2 : Multiaddr = multiaddr ::Protocol ::Memory ( rand ::random ::< u64 > ( ) ) . into ( ) ;
2022-10-04 18:24:38 +11:00
swarm1 . listen_on ( addr1 ) . unwrap ( ) ;
2021-11-26 10:48:12 -05:00
swarm2 . listen_on ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
let swarm1_id = * swarm1 . local_peer_id ( ) ;
let num_connections = 10 ;
for _ in 0 .. num_connections {
2021-11-15 14:17:23 +01:00
swarm1 . dial ( addr2 . clone ( ) ) . unwrap ( ) ;
2021-07-03 00:35:51 +07:00
}
let mut state = State ::Connecting ;
let mut disconnected_conn_id = None ;
executor ::block_on ( future ::poll_fn ( move | cx | loop {
let poll1 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm1 ) , cx ) ;
let poll2 = Swarm ::poll_next_event ( Pin ::new ( & mut swarm2 ) , cx ) ;
match state {
State ::Connecting = > {
if swarms_connected ( & swarm1 , & swarm2 , num_connections ) {
disconnected_conn_id = {
2022-11-17 09:28:40 +00:00
let conn_id =
swarm2 . behaviour . on_connection_established [ num_connections / 2 ] . 1 ;
2021-07-03 00:35:51 +07:00
swarm2 . behaviour . inner ( ) . next_action . replace (
NetworkBehaviourAction ::CloseConnection {
2021-11-26 10:48:12 -05:00
peer_id : swarm1_id ,
2021-07-03 00:35:51 +07:00
connection : CloseConnection ::One ( conn_id ) ,
} ,
) ;
Some ( conn_id )
} ;
state = State ::Disconnecting ;
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
State ::Disconnecting = > {
for s in & [ & swarm1 , & swarm2 ] {
2022-02-09 10:08:28 -05:00
assert! ( s
. behaviour
2022-11-17 09:28:40 +00:00
. on_connection_closed
2022-02-09 10:08:28 -05:00
. iter ( )
. all ( | ( .. , remaining_conns ) | * remaining_conns > 0 ) ) ;
2022-11-17 09:28:40 +00:00
assert_eq! ( s . behaviour . on_connection_established . len ( ) , num_connections ) ;
2022-02-09 10:08:28 -05:00
s . behaviour . assert_connected ( num_connections , 1 ) ;
2021-07-03 00:35:51 +07:00
}
if [ & swarm1 , & swarm2 ]
. iter ( )
2022-11-17 09:28:40 +00:00
. all ( | s | s . behaviour . on_connection_closed . len ( ) = = 1 )
2021-07-03 00:35:51 +07:00
{
2022-11-17 09:28:40 +00:00
let conn_id = swarm2 . behaviour . on_connection_closed [ 0 ] . 1 ;
2021-07-03 00:35:51 +07:00
assert_eq! ( Some ( conn_id ) , disconnected_conn_id ) ;
return Poll ::Ready ( ( ) ) ;
}
}
2021-08-11 13:12:12 +02:00
}
2021-07-03 00:35:51 +07:00
if poll1 . is_pending ( ) & & poll2 . is_pending ( ) {
return Poll ::Pending ;
}
} ) )
}
2022-02-13 21:57:38 +01:00
#[ test ]
fn concurrent_dialing ( ) {
#[ derive(Clone, Debug) ]
struct DialConcurrencyFactor ( NonZeroU8 ) ;
impl Arbitrary for DialConcurrencyFactor {
2022-09-22 12:48:32 +04:00
fn arbitrary ( g : & mut Gen ) -> Self {
Self ( NonZeroU8 ::new ( g . gen_range ( 1 .. 11 ) ) . unwrap ( ) )
2022-02-13 21:57:38 +01:00
}
}
fn prop ( concurrency_factor : DialConcurrencyFactor ) {
block_on ( async {
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( keep_alive ::ConnectionHandler )
. dial_concurrency_factor ( concurrency_factor . 0 )
. build ( ) ;
2022-02-13 21:57:38 +01:00
// Listen on `concurrency_factor + 1` addresses.
//
// `+ 2` to ensure a subset of addresses is dialed by network_2.
let num_listen_addrs = concurrency_factor . 0. get ( ) + 2 ;
let mut listen_addresses = Vec ::new ( ) ;
2022-07-04 04:16:57 +02:00
let mut transports = Vec ::new ( ) ;
2022-02-13 21:57:38 +01:00
for _ in 0 .. num_listen_addrs {
2022-07-04 04:16:57 +02:00
let mut transport = transport ::MemoryTransport ::default ( ) . boxed ( ) ;
transport . listen_on ( " /memory/0 " . parse ( ) . unwrap ( ) ) . unwrap ( ) ;
2022-02-13 21:57:38 +01:00
2022-07-04 04:16:57 +02:00
match transport . select_next_some ( ) . await {
TransportEvent ::NewAddress { listen_addr , .. } = > {
listen_addresses . push ( listen_addr ) ;
2022-02-13 21:57:38 +01:00
}
_ = > panic! ( " Expected `NewListenAddr` event. " ) ,
}
2022-07-04 04:16:57 +02:00
transports . push ( transport ) ;
2022-02-13 21:57:38 +01:00
}
// Have swarm dial each listener and wait for each listener to receive the incoming
// connections.
swarm
. dial (
DialOpts ::peer_id ( PeerId ::random ( ) )
2022-10-04 18:24:38 +11:00
. addresses ( listen_addresses )
2022-02-13 21:57:38 +01:00
. build ( ) ,
)
. unwrap ( ) ;
2022-07-04 04:16:57 +02:00
for mut transport in transports . into_iter ( ) {
2022-02-13 21:57:38 +01:00
loop {
2022-07-04 04:16:57 +02:00
match futures ::future ::select ( transport . select_next_some ( ) , swarm . next ( ) )
. await
{
Either ::Left ( ( TransportEvent ::Incoming { .. } , _ ) ) = > {
2022-02-13 21:57:38 +01:00
break ;
}
Either ::Left ( _ ) = > {
2022-07-04 04:16:57 +02:00
panic! ( " Unexpected transport event. " )
2022-02-13 21:57:38 +01:00
}
Either ::Right ( ( e , _ ) ) = > {
panic! ( " Expect swarm to not emit any event {:?} " , e )
}
}
}
}
match swarm . next ( ) . await . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError { .. } = > { }
e = > panic! ( " Unexpected swarm event {:?} " , e ) ,
}
} )
}
QuickCheck ::new ( ) . tests ( 10 ) . quickcheck ( prop as fn ( _ ) -> _ ) ;
}
#[ test ]
fn max_outgoing ( ) {
use rand ::Rng ;
2022-09-22 12:48:32 +04:00
let outgoing_limit = rand ::thread_rng ( ) . gen_range ( 1 .. 10 ) ;
2022-02-13 21:57:38 +01:00
let limits = ConnectionLimits ::default ( ) . with_max_pending_outgoing ( Some ( outgoing_limit ) ) ;
2022-10-06 03:50:11 +11:00
let mut network = new_test_swarm ::< _ , ( ) > ( keep_alive ::ConnectionHandler )
. connection_limits ( limits )
. build ( ) ;
2022-02-13 21:57:38 +01:00
let addr : Multiaddr = " /memory/1234 " . parse ( ) . unwrap ( ) ;
let target = PeerId ::random ( ) ;
for _ in 0 .. outgoing_limit {
network
. dial (
DialOpts ::peer_id ( target )
. addresses ( vec! [ addr . clone ( ) ] )
. build ( ) ,
)
. expect ( " Unexpected connection limit. " ) ;
}
match network
2022-10-04 18:24:38 +11:00
. dial ( DialOpts ::peer_id ( target ) . addresses ( vec! [ addr ] ) . build ( ) )
2022-02-13 21:57:38 +01:00
. expect_err ( " Unexpected dialing success. " )
{
DialError ::ConnectionLimit ( limit ) = > {
assert_eq! ( limit . current , outgoing_limit ) ;
assert_eq! ( limit . limit , outgoing_limit ) ;
}
e = > panic! ( " Unexpected error: {:?} " , e ) ,
}
let info = network . network_info ( ) ;
assert_eq! ( info . num_peers ( ) , 0 ) ;
assert_eq! (
info . connection_counters ( ) . num_pending_outgoing ( ) ,
outgoing_limit
) ;
}
#[ test ]
fn max_established_incoming ( ) {
#[ derive(Debug, Clone) ]
struct Limit ( u32 ) ;
impl Arbitrary for Limit {
2022-09-22 12:48:32 +04:00
fn arbitrary ( g : & mut Gen ) -> Self {
Self ( g . gen_range ( 1 .. 10 ) )
2022-02-13 21:57:38 +01:00
}
}
fn limits ( limit : u32 ) -> ConnectionLimits {
ConnectionLimits ::default ( ) . with_max_established_incoming ( Some ( limit ) )
}
fn prop ( limit : Limit ) {
let limit = limit . 0 ;
2022-10-06 03:50:11 +11:00
let mut network1 = new_test_swarm ::< _ , ( ) > ( keep_alive ::ConnectionHandler )
. connection_limits ( limits ( limit ) )
. build ( ) ;
let mut network2 = new_test_swarm ::< _ , ( ) > ( keep_alive ::ConnectionHandler )
. connection_limits ( limits ( limit ) )
. build ( ) ;
2022-02-13 21:57:38 +01:00
let _ = network1 . listen_on ( multiaddr! [ Memory ( 0 u64 ) ] ) . unwrap ( ) ;
let listen_addr = async_std ::task ::block_on ( poll_fn ( | cx | {
match ready! ( network1 . poll_next_unpin ( cx ) ) . unwrap ( ) {
SwarmEvent ::NewListenAddr { address , .. } = > Poll ::Ready ( address ) ,
e = > panic! ( " Unexpected network event: {:?} " , e ) ,
}
} ) ) ;
// Spawn and block on the dialer.
async_std ::task ::block_on ( {
let mut n = 0 ;
2022-10-04 18:24:38 +11:00
network2 . dial ( listen_addr . clone ( ) ) . unwrap ( ) ;
2022-02-13 21:57:38 +01:00
let mut expected_closed = false ;
let mut network_1_established = false ;
let mut network_2_established = false ;
let mut network_1_limit_reached = false ;
let mut network_2_limit_reached = false ;
poll_fn ( move | cx | {
loop {
let mut network_1_pending = false ;
let mut network_2_pending = false ;
match network1 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnection { .. } ) ) = > { }
Poll ::Ready ( Some ( SwarmEvent ::ConnectionEstablished { .. } ) ) = > {
network_1_established = true ;
}
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnectionError {
error : PendingConnectionError ::ConnectionLimit ( err ) ,
..
} ) ) = > {
assert_eq! ( err . limit , limit ) ;
assert_eq! ( err . limit , err . current ) ;
let info = network1 . network_info ( ) ;
let counters = info . connection_counters ( ) ;
assert_eq! ( counters . num_established_incoming ( ) , limit ) ;
assert_eq! ( counters . num_established ( ) , limit ) ;
network_1_limit_reached = true ;
}
Poll ::Pending = > {
network_1_pending = true ;
}
e = > panic! ( " Unexpected network event: {:?} " , e ) ,
}
match network2 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::ConnectionEstablished { .. } ) ) = > {
network_2_established = true ;
}
Poll ::Ready ( Some ( SwarmEvent ::ConnectionClosed { .. } ) ) = > {
assert! ( expected_closed ) ;
let info = network2 . network_info ( ) ;
let counters = info . connection_counters ( ) ;
assert_eq! ( counters . num_established_outgoing ( ) , limit ) ;
assert_eq! ( counters . num_established ( ) , limit ) ;
network_2_limit_reached = true ;
}
Poll ::Pending = > {
network_2_pending = true ;
}
e = > panic! ( " Unexpected network event: {:?} " , e ) ,
}
if network_1_pending & & network_2_pending {
return Poll ::Pending ;
}
if network_1_established & & network_2_established {
network_1_established = false ;
network_2_established = false ;
if n < = limit {
// Dial again until the limit is exceeded.
n + = 1 ;
network2 . dial ( listen_addr . clone ( ) ) . unwrap ( ) ;
if n = = limit {
// The the next dialing attempt exceeds the limit, this
// is the connection we expected to get closed.
expected_closed = true ;
}
} else {
panic! ( " Expect networks not to establish connections beyond the limit. " )
}
}
if network_1_limit_reached & & network_2_limit_reached {
return Poll ::Ready ( ( ) ) ;
}
}
} )
} ) ;
}
quickcheck ( prop as fn ( _ ) ) ;
}
#[ test ]
fn invalid_peer_id ( ) {
// Checks whether dialing an address containing the wrong peer id raises an error
// for the expected peer id instead of the obtained peer id.
2022-10-06 03:50:11 +11:00
let mut swarm1 = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
let mut swarm2 = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
swarm1 . listen_on ( " /memory/0 " . parse ( ) . unwrap ( ) ) . unwrap ( ) ;
let address =
futures ::executor ::block_on ( future ::poll_fn ( | cx | match swarm1 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::NewListenAddr { address , .. } ) ) = > {
Poll ::Ready ( address )
}
Poll ::Pending = > Poll ::Pending ,
_ = > panic! ( " Was expecting the listen address to be reported " ) ,
} ) ) ;
let other_id = PeerId ::random ( ) ;
let other_addr = address . with ( Protocol ::P2p ( other_id . into ( ) ) ) ;
swarm2 . dial ( other_addr . clone ( ) ) . unwrap ( ) ;
let ( peer_id , error ) = futures ::executor ::block_on ( future ::poll_fn ( | cx | {
if let Poll ::Ready ( Some ( SwarmEvent ::IncomingConnection { .. } ) ) =
swarm1 . poll_next_unpin ( cx )
{ }
match swarm2 . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::OutgoingConnectionError {
peer_id , error , ..
} ) ) = > Poll ::Ready ( ( peer_id , error ) ) ,
Poll ::Ready ( x ) = > panic! ( " unexpected {:?} " , x ) ,
Poll ::Pending = > Poll ::Pending ,
}
} ) ) ;
assert_eq! ( peer_id . unwrap ( ) , other_id ) ;
match error {
DialError ::WrongPeerId { obtained , endpoint } = > {
assert_eq! ( obtained , * swarm1 . local_peer_id ( ) ) ;
assert_eq! (
endpoint ,
ConnectedPoint ::Dialer {
address : other_addr ,
role_override : Endpoint ::Dialer ,
}
) ;
}
x = > panic! ( " wrong error {:?} " , x ) ,
}
}
#[ test ]
fn dial_self ( ) {
// Check whether dialing ourselves correctly fails.
//
// Dialing the same address we're listening should result in three events:
//
// - The incoming connection notification (before we know the incoming peer ID).
// - The connection error for the dialing endpoint (once we've determined that it's our own ID).
// - The connection error for the listening endpoint (once we've determined that it's our own ID).
//
// The last two can happen in any order.
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
swarm . listen_on ( " /memory/0 " . parse ( ) . unwrap ( ) ) . unwrap ( ) ;
let local_address =
futures ::executor ::block_on ( future ::poll_fn ( | cx | match swarm . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::NewListenAddr { address , .. } ) ) = > {
Poll ::Ready ( address )
}
Poll ::Pending = > Poll ::Pending ,
_ = > panic! ( " Was expecting the listen address to be reported " ) ,
} ) ) ;
swarm . dial ( local_address . clone ( ) ) . unwrap ( ) ;
let mut got_dial_err = false ;
let mut got_inc_err = false ;
futures ::executor ::block_on ( future ::poll_fn ( | cx | -> Poll < Result < ( ) , io ::Error > > {
loop {
match swarm . poll_next_unpin ( cx ) {
Poll ::Ready ( Some ( SwarmEvent ::OutgoingConnectionError {
peer_id ,
error : DialError ::WrongPeerId { .. } ,
..
} ) ) = > {
assert_eq! ( & peer_id . unwrap ( ) , swarm . local_peer_id ( ) ) ;
assert! ( ! got_dial_err ) ;
got_dial_err = true ;
if got_inc_err {
return Poll ::Ready ( Ok ( ( ) ) ) ;
}
}
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnectionError {
local_addr , ..
} ) ) = > {
assert! ( ! got_inc_err ) ;
assert_eq! ( local_addr , local_address ) ;
got_inc_err = true ;
if got_dial_err {
return Poll ::Ready ( Ok ( ( ) ) ) ;
}
}
Poll ::Ready ( Some ( SwarmEvent ::IncomingConnection { local_addr , .. } ) ) = > {
assert_eq! ( local_addr , local_address ) ;
}
Poll ::Ready ( ev ) = > {
panic! ( " Unexpected event: {:?} " , ev )
}
Poll ::Pending = > break Poll ::Pending ,
}
}
} ) )
. unwrap ( ) ;
}
#[ test ]
fn dial_self_by_id ( ) {
// Trying to dial self by passing the same `PeerId` shouldn't even be possible in the first
// place.
2022-10-06 03:50:11 +11:00
let swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
let peer_id = * swarm . local_peer_id ( ) ;
assert! ( ! swarm . is_connected ( & peer_id ) ) ;
}
2022-09-11 16:55:26 +10:00
#[ async_std::test ]
async fn multiple_addresses_err ( ) {
2022-02-13 21:57:38 +01:00
// Tries dialing multiple addresses, and makes sure there's one dialing error per address.
let target = PeerId ::random ( ) ;
2022-10-06 03:50:11 +11:00
let mut swarm = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
let addresses = HashSet ::from ( [
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Ip4 ( [ 0 , 0 , 0 , 0 ] ) , Tcp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
multiaddr! [ Udp ( rand ::random ::< u16 > ( ) ) ] ,
] ) ;
2022-02-13 21:57:38 +01:00
swarm
. dial (
DialOpts ::peer_id ( target )
2022-09-11 16:55:26 +10:00
. addresses ( addresses . iter ( ) . cloned ( ) . collect ( ) )
2022-02-13 21:57:38 +01:00
. build ( ) ,
)
. unwrap ( ) ;
2022-09-11 16:55:26 +10:00
match swarm . next ( ) . await . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError {
peer_id ,
// multiaddr,
error : DialError ::Transport ( errors ) ,
} = > {
assert_eq! ( target , peer_id . unwrap ( ) ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
let failed_addresses = errors . into_iter ( ) . map ( | ( addr , _ ) | addr ) . collect ::< Vec < _ > > ( ) ;
let expected_addresses = addresses
. into_iter ( )
. map ( | addr | addr . with ( Protocol ::P2p ( target . into ( ) ) ) )
. collect ::< Vec < _ > > ( ) ;
2022-02-13 21:57:38 +01:00
2022-09-11 16:55:26 +10:00
assert_eq! ( expected_addresses , failed_addresses ) ;
2022-02-13 21:57:38 +01:00
}
2022-09-11 16:55:26 +10:00
e = > panic! ( " Unexpected event: {e:?} " ) ,
}
2022-02-13 21:57:38 +01:00
}
2022-02-15 10:19:55 +01:00
#[ test ]
fn aborting_pending_connection_surfaces_error ( ) {
let _ = env_logger ::try_init ( ) ;
2022-10-06 03:50:11 +11:00
let mut dialer = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
let mut listener = new_test_swarm ::< _ , ( ) > ( dummy ::ConnectionHandler ) . build ( ) ;
2022-02-15 10:19:55 +01:00
let listener_peer_id = * listener . local_peer_id ( ) ;
listener . listen_on ( multiaddr! [ Memory ( 0 u64 ) ] ) . unwrap ( ) ;
let listener_address = match block_on ( listener . next ( ) ) . unwrap ( ) {
SwarmEvent ::NewListenAddr { address , .. } = > address ,
e = > panic! ( " Unexpected network event: {:?} " , e ) ,
} ;
dialer
. dial (
DialOpts ::peer_id ( listener_peer_id )
. addresses ( vec! [ listener_address ] )
. build ( ) ,
)
. unwrap ( ) ;
dialer
. disconnect_peer_id ( listener_peer_id )
. expect_err ( " Expect peer to not yet be connected. " ) ;
match block_on ( dialer . next ( ) ) . unwrap ( ) {
SwarmEvent ::OutgoingConnectionError {
error : DialError ::Aborted ,
..
} = > { }
e = > panic! ( " Unexpected swarm event {:?} . " , e ) ,
}
}
2022-11-23 11:51:47 +11:00
#[ test ]
fn dial_error_prints_sources ( ) {
// This constitutes a fairly typical error for chained transports.
let error = DialError ::Transport ( vec! [ (
" /ip4/127.0.0.1/tcp/80 " . parse ( ) . unwrap ( ) ,
TransportError ::Other ( io ::Error ::new (
io ::ErrorKind ::Other ,
EitherError ::< _ , Void > ::A ( EitherError ::< Void , _ > ::B ( UpgradeError ::Apply (
MemoryTransportError ::Unreachable ,
) ) ) ,
) ) ,
) ] ) ;
let string = format! ( " {error} " ) ;
// Unfortunately, we have some "empty" errors that lead to multiple colons without text but that is the best we can do.
assert_eq! ( " Failed to negotiate transport protocol(s): [(/ip4/127.0.0.1/tcp/80: : Handshake failed: No listener on the given port.)] " , string )
}
2019-04-04 12:25:42 -03:00
}