2020-01-25 02:16:02 +11:00
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use std ::{
2021-01-07 18:19:31 +11:00
cmp ::{ max , Ordering } ,
2020-01-25 02:16:02 +11:00
collections ::HashSet ,
collections ::VecDeque ,
2021-01-07 18:19:31 +11:00
collections ::{ BTreeSet , HashMap } ,
fmt ,
net ::IpAddr ,
2020-01-25 02:16:02 +11:00
task ::{ Context , Poll } ,
2021-01-07 18:19:31 +11:00
time ::Duration ,
2020-01-25 02:16:02 +11:00
} ;
2021-01-07 18:19:31 +11:00
use futures ::StreamExt ;
2021-06-12 08:11:55 +02:00
use log ::{ debug , error , trace , warn } ;
2022-02-03 11:31:41 +01:00
use prometheus_client ::registry ::Registry ;
2023-01-27 05:44:04 +01:00
use prost ::Message as _ ;
2021-01-07 18:19:31 +11:00
use rand ::{ seq ::SliceRandom , thread_rng } ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
use libp2p_core ::{
2023-01-18 19:56:32 +11:00
identity ::Keypair , multiaddr ::Protocol ::Ip4 , multiaddr ::Protocol ::Ip6 , Multiaddr , PeerId ,
2021-01-07 18:19:31 +11:00
} ;
use libp2p_swarm ::{
2022-11-17 09:28:40 +00:00
behaviour ::{ AddressChange , ConnectionClosed , ConnectionEstablished , FromSwarm } ,
dial_opts ::DialOpts ,
2023-01-26 22:55:02 +11:00
ConnectionId , NetworkBehaviour , NetworkBehaviourAction , NotifyHandler , PollParameters ,
2023-02-14 14:09:29 +13:00
THandlerInEvent , THandlerOutEvent ,
2021-01-07 18:19:31 +11:00
} ;
2022-02-14 21:24:58 +11:00
use wasm_timer ::Instant ;
2021-01-07 18:19:31 +11:00
2022-02-14 21:24:58 +11:00
use crate ::backoff ::BackoffStorage ;
2023-01-27 05:44:04 +01:00
use crate ::config ::{ Config , ValidationMode } ;
2021-01-07 18:19:31 +11:00
use crate ::error ::{ PublishError , SubscriptionError , ValidationError } ;
use crate ::gossip_promises ::GossipPromises ;
2023-01-27 05:44:04 +01:00
use crate ::handler ::{ Handler , HandlerEvent , HandlerIn } ;
2021-01-07 18:19:31 +11:00
use crate ::mcache ::MessageCache ;
2021-12-21 17:31:19 -05:00
use crate ::metrics ::{ Churn , Config as MetricsConfig , Inclusion , Metrics , Penalty } ;
2021-01-07 18:19:31 +11:00
use crate ::peer_score ::{ PeerScore , PeerScoreParams , PeerScoreThresholds , RejectReason } ;
2022-07-02 05:30:02 -03:00
use crate ::protocol ::{ ProtocolConfig , SIGNING_PREFIX } ;
2021-01-07 18:19:31 +11:00
use crate ::subscription_filter ::{ AllowAllSubscriptionFilter , TopicSubscriptionFilter } ;
use crate ::time_cache ::{ DuplicateCache , TimeCache } ;
use crate ::topic ::{ Hasher , Topic , TopicHash } ;
use crate ::transform ::{ DataTransform , IdentityTransform } ;
use crate ::types ::{
2023-01-27 05:44:04 +01:00
ControlAction , FastMessageId , Message , MessageAcceptance , MessageId , PeerInfo , RawMessage ,
Subscription , SubscriptionAction ,
2021-01-07 18:19:31 +11:00
} ;
2023-01-27 05:44:04 +01:00
use crate ::types ::{ PeerConnections , PeerKind , Rpc } ;
2021-01-07 18:19:31 +11:00
use crate ::{ rpc_proto , TopicScoreParams } ;
use std ::{ cmp ::Ordering ::Equal , fmt ::Debug } ;
2022-02-14 21:24:58 +11:00
use wasm_timer ::Interval ;
2021-01-07 18:19:31 +11:00
#[ cfg(test) ]
2020-01-25 02:16:02 +11:00
mod tests ;
2020-08-03 18:13:43 +10:00
/// Determines if published messages should be signed or not.
///
/// Without signing, a number of privacy preserving modes can be selected.
///
/// NOTE: The default validation settings are to require signatures. The [`ValidationMode`]
2023-01-27 05:44:04 +01:00
/// should be updated in the [`Config`] to allow for unsigned messages.
2020-08-03 18:13:43 +10:00
#[ derive(Clone) ]
pub enum MessageAuthenticity {
/// Message signing is enabled. The author will be the owner of the key and the sequence number
/// will be a random number.
Signed ( Keypair ) ,
/// Message signing is disabled.
///
2021-01-07 18:19:31 +11:00
/// The specified [`PeerId`] will be used as the author of all published messages. The sequence
2020-08-03 18:13:43 +10:00
/// number will be randomized.
Author ( PeerId ) ,
/// Message signing is disabled.
///
2021-01-07 18:19:31 +11:00
/// A random [`PeerId`] will be used when publishing each message. The sequence number will be
2020-08-03 18:13:43 +10:00
/// randomized.
RandomAuthor ,
/// Message signing is disabled.
///
/// The author of the message and the sequence numbers are excluded from the message.
///
/// NOTE: Excluding these fields may make these messages invalid by other nodes who
2023-01-27 05:44:04 +01:00
/// enforce validation of these fields. See [`ValidationMode`] in the [`Config`]
2020-08-03 18:13:43 +10:00
/// for how to customise this for rust-libp2p gossipsub. A custom `message_id`
/// function will need to be set to prevent all messages from a peer being filtered
/// as duplicates.
Anonymous ,
}
impl MessageAuthenticity {
/// Returns true if signing is enabled.
2021-01-07 18:19:31 +11:00
pub fn is_signing ( & self ) -> bool {
matches! ( self , MessageAuthenticity ::Signed ( _ ) )
2020-08-03 18:13:43 +10:00
}
2021-01-07 18:19:31 +11:00
pub fn is_anonymous ( & self ) -> bool {
matches! ( self , MessageAuthenticity ::Anonymous )
2020-08-03 18:13:43 +10:00
}
}
2021-01-07 18:19:31 +11:00
/// Event that can be emitted by the gossipsub behaviour.
#[ derive(Debug) ]
2023-01-27 05:44:04 +01:00
pub enum Event {
2021-01-07 18:19:31 +11:00
/// A message has been received.
Message {
/// The peer that forwarded us this message.
propagation_source : PeerId ,
/// The [`MessageId`] of the message. This should be referenced by the application when
/// validating a message (if required).
message_id : MessageId ,
/// The decompressed message itself.
2023-01-27 05:44:04 +01:00
message : Message ,
2021-01-07 18:19:31 +11:00
} ,
/// A remote subscribed to a topic.
Subscribed {
/// Remote that has subscribed.
peer_id : PeerId ,
/// The topic it has subscribed to.
topic : TopicHash ,
} ,
/// A remote unsubscribed from a topic.
Unsubscribed {
/// Remote that has unsubscribed.
peer_id : PeerId ,
/// The topic it has subscribed from.
topic : TopicHash ,
} ,
2021-09-27 17:21:37 +10:00
/// A peer that does not support gossipsub has connected.
GossipsubNotSupported { peer_id : PeerId } ,
2021-01-07 18:19:31 +11:00
}
2020-08-03 18:13:43 +10:00
/// A data structure for storing configuration for publishing messages. See [`MessageAuthenticity`]
/// for further details.
2021-12-07 02:13:42 +11:00
#[ allow(clippy::large_enum_variant) ]
2022-02-16 17:16:54 +02:00
#[ derive(Clone) ]
2020-08-03 18:13:43 +10:00
enum PublishConfig {
Signing {
keypair : Keypair ,
author : PeerId ,
inline_key : Option < Vec < u8 > > ,
} ,
Author ( PeerId ) ,
RandomAuthor ,
Anonymous ,
}
2021-01-07 18:19:31 +11:00
impl PublishConfig {
pub fn get_own_id ( & self ) -> Option < & PeerId > {
match self {
2021-09-14 16:00:05 +03:00
Self ::Signing { author , .. } = > Some ( author ) ,
Self ::Author ( author ) = > Some ( author ) ,
2021-01-07 18:19:31 +11:00
_ = > None ,
}
}
}
2020-08-03 18:13:43 +10:00
impl From < MessageAuthenticity > for PublishConfig {
fn from ( authenticity : MessageAuthenticity ) -> Self {
match authenticity {
MessageAuthenticity ::Signed ( keypair ) = > {
let public_key = keypair . public ( ) ;
2021-07-22 22:34:13 +02:00
let key_enc = public_key . to_protobuf_encoding ( ) ;
2020-08-03 18:13:43 +10:00
let key = if key_enc . len ( ) < = 42 {
// The public key can be inlined in [`rpc_proto::Message::from`], so we don't include it
// specifically in the [`rpc_proto::Message::key`] field.
None
} else {
// Include the protobuf encoding of the public key in the message.
Some ( key_enc )
} ;
PublishConfig ::Signing {
keypair ,
2021-07-22 22:34:13 +02:00
author : public_key . to_peer_id ( ) ,
2020-08-03 18:13:43 +10:00
inline_key : key ,
}
}
MessageAuthenticity ::Author ( peer_id ) = > PublishConfig ::Author ( peer_id ) ,
MessageAuthenticity ::RandomAuthor = > PublishConfig ::RandomAuthor ,
MessageAuthenticity ::Anonymous = > PublishConfig ::Anonymous ,
}
}
}
2020-02-06 19:17:05 +09:00
/// Network behaviour that handles the gossipsub protocol.
2020-08-03 18:13:43 +10:00
///
2023-01-27 05:44:04 +01:00
/// NOTE: Initialisation requires a [`MessageAuthenticity`] and [`Config`] instance. If
2021-01-07 18:19:31 +11:00
/// message signing is disabled, the [`ValidationMode`] in the config should be adjusted to an
/// appropriate level to accept unsigned messages.
///
/// The DataTransform trait allows applications to optionally add extra encoding/decoding
/// functionality to the underlying messages. This is intended for custom compression algorithms.
///
/// The TopicSubscriptionFilter allows applications to implement specific filters on topics to
/// prevent unwanted messages being propagated and evaluated.
2023-01-27 05:44:04 +01:00
pub struct Behaviour < D = IdentityTransform , F = AllowAllSubscriptionFilter > {
2020-01-25 02:16:02 +11:00
/// Configuration providing gossipsub performance parameters.
2023-01-27 05:44:04 +01:00
config : Config ,
2020-01-25 02:16:02 +11:00
/// Events that need to be yielded to the outside when polling.
2023-02-14 14:09:29 +13:00
events : VecDeque < NetworkBehaviourAction < Event , HandlerIn > > ,
2020-01-25 02:16:02 +11:00
/// Pools non-urgent control messages between heartbeats.
2023-01-27 05:44:04 +01:00
control_pool : HashMap < PeerId , Vec < ControlAction > > ,
2020-01-25 02:16:02 +11:00
2020-08-03 18:13:43 +10:00
/// Information used for publishing messages.
publish_config : PublishConfig ,
/// An LRU Time cache for storing seen messages (based on their ID). This cache prevents
/// duplicates from being propagated to the application and on the network.
2021-01-07 18:19:31 +11:00
duplicate_cache : DuplicateCache < MessageId > ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
/// A set of connected peers, indexed by their [`PeerId`] tracking both the [`PeerKind`] and
2021-05-14 17:16:50 +10:00
/// the set of [`ConnectionId`]s.
connected_peers : HashMap < PeerId , PeerConnections > ,
2020-01-25 02:16:02 +11:00
/// A map of all connected peers - A map of topic hash to a list of gossipsub peer Ids.
2020-08-03 18:13:43 +10:00
topic_peers : HashMap < TopicHash , BTreeSet < PeerId > > ,
2020-01-25 02:16:02 +11:00
/// A map of all connected peers to their subscribed topics.
2020-08-03 18:13:43 +10:00
peer_topics : HashMap < PeerId , BTreeSet < TopicHash > > ,
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
/// A set of all explicit peers. These are peers that remain connected and we unconditionally
/// forward messages to, outside of the scoring system.
explicit_peers : HashSet < PeerId > ,
/// A list of peers that have been blacklisted by the user.
/// Messages are not sent to and are rejected from these peers.
blacklisted_peers : HashSet < PeerId > ,
2020-01-25 02:16:02 +11:00
/// Overlay network of connected peers - Maps topics to connected gossipsub peers.
2020-08-03 18:13:43 +10:00
mesh : HashMap < TopicHash , BTreeSet < PeerId > > ,
2020-01-25 02:16:02 +11:00
/// Map of topics to list of peers that we publish to, but don't subscribe to.
2020-08-03 18:13:43 +10:00
fanout : HashMap < TopicHash , BTreeSet < PeerId > > ,
2020-01-25 02:16:02 +11:00
/// The last publish time for fanout topics.
fanout_last_pub : HashMap < TopicHash , Instant > ,
2021-01-07 18:19:31 +11:00
///Storage for backoffs
backoffs : BackoffStorage ,
2020-01-25 02:16:02 +11:00
/// Message cache for the last few heartbeats.
mcache : MessageCache ,
/// Heartbeat interval stream.
heartbeat : Interval ,
2021-01-07 18:19:31 +11:00
/// Number of heartbeats since the beginning of time; this allows us to amortize some resource
/// clean up -- eg backoff clean up.
heartbeat_ticks : u64 ,
/// We remember all peers we found through peer exchange, since those peers are not considered
/// as safe as randomly discovered outbound peers. This behaviour diverges from the go
/// implementation to avoid possible love bombing attacks in PX. When disconnecting peers will
/// be removed from this list which may result in a true outbound rediscovery.
px_peers : HashSet < PeerId > ,
/// Set of connected outbound peers (we only consider true outbound peers found through
/// discovery and not by PX).
outbound_peers : HashSet < PeerId > ,
/// Stores optional peer score data together with thresholds, decay interval and gossip
/// promises.
peer_score : Option < ( PeerScore , PeerScoreThresholds , Interval , GossipPromises ) > ,
/// Counts the number of `IHAVE` received from each peer since the last heartbeat.
count_received_ihave : HashMap < PeerId , usize > ,
/// Counts the number of `IWANT` that we sent the each peer since the last heartbeat.
count_sent_iwant : HashMap < PeerId , usize > ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
/// Keeps track of IWANT messages that we are awaiting to send.
/// This is used to prevent sending duplicate IWANT messages for the same message.
pending_iwant_msgs : HashSet < MessageId > ,
/// Short term cache for published message ids. This is used for penalizing peers sending
2021-01-07 18:19:31 +11:00
/// our own messages back if the messages are anonymous or use a random author.
published_message_ids : DuplicateCache < MessageId > ,
/// Short term cache for fast message ids mapping them to the real message ids
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
fast_message_id_cache : TimeCache < FastMessageId , MessageId > ,
2021-01-07 18:19:31 +11:00
/// The filter used to handle message subscriptions.
subscription_filter : F ,
/// A general transformation function that can be applied to data received from the wire before
/// calculating the message-id and sending to the application. This is designed to allow the
/// user to implement arbitrary topic-based compression algorithms.
data_transform : D ,
2021-11-16 08:59:39 -05:00
/// Keep track of a set of internal metrics relating to gossipsub.
metrics : Option < Metrics > ,
2021-01-07 18:19:31 +11:00
}
2023-01-27 05:44:04 +01:00
impl < D , F > Behaviour < D , F >
2021-01-07 18:19:31 +11:00
where
D : DataTransform + Default ,
F : TopicSubscriptionFilter + Default ,
{
2023-01-27 05:44:04 +01:00
/// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a
/// [`Config`]. This has no subscription filter and uses no compression.
pub fn new ( privacy : MessageAuthenticity , config : Config ) -> Result < Self , & 'static str > {
2021-01-07 18:19:31 +11:00
Self ::new_with_subscription_filter_and_transform (
privacy ,
config ,
2021-11-16 08:59:39 -05:00
None ,
F ::default ( ) ,
D ::default ( ) ,
)
}
2023-01-27 05:44:04 +01:00
/// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a
/// [`Config`]. This has no subscription filter and uses no compression.
2021-11-16 08:59:39 -05:00
/// Metrics can be evaluated by passing a reference to a [`Registry`].
pub fn new_with_metrics (
privacy : MessageAuthenticity ,
2023-01-27 05:44:04 +01:00
config : Config ,
2021-11-16 08:59:39 -05:00
metrics_registry : & mut Registry ,
metrics_config : MetricsConfig ,
) -> Result < Self , & 'static str > {
Self ::new_with_subscription_filter_and_transform (
privacy ,
config ,
Some ( ( metrics_registry , metrics_config ) ) ,
2021-01-07 18:19:31 +11:00
F ::default ( ) ,
D ::default ( ) ,
)
}
}
2023-01-27 05:44:04 +01:00
impl < D , F > Behaviour < D , F >
2021-01-07 18:19:31 +11:00
where
D : DataTransform + Default ,
F : TopicSubscriptionFilter ,
{
2023-01-27 05:44:04 +01:00
/// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a
/// [`Config`] and a custom subscription filter.
2021-01-07 18:19:31 +11:00
pub fn new_with_subscription_filter (
privacy : MessageAuthenticity ,
2023-01-27 05:44:04 +01:00
config : Config ,
2021-11-16 08:59:39 -05:00
metrics : Option < ( & mut Registry , MetricsConfig ) > ,
2021-01-07 18:19:31 +11:00
subscription_filter : F ,
) -> Result < Self , & 'static str > {
Self ::new_with_subscription_filter_and_transform (
privacy ,
config ,
2021-11-16 08:59:39 -05:00
metrics ,
2021-01-07 18:19:31 +11:00
subscription_filter ,
D ::default ( ) ,
)
}
}
2023-01-27 05:44:04 +01:00
impl < D , F > Behaviour < D , F >
2021-01-07 18:19:31 +11:00
where
D : DataTransform ,
F : TopicSubscriptionFilter + Default ,
{
2023-01-27 05:44:04 +01:00
/// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a
/// [`Config`] and a custom data transform.
2021-01-07 18:19:31 +11:00
pub fn new_with_transform (
privacy : MessageAuthenticity ,
2023-01-27 05:44:04 +01:00
config : Config ,
2021-11-16 08:59:39 -05:00
metrics : Option < ( & mut Registry , MetricsConfig ) > ,
2021-01-07 18:19:31 +11:00
data_transform : D ,
) -> Result < Self , & 'static str > {
Self ::new_with_subscription_filter_and_transform (
privacy ,
config ,
2021-11-16 08:59:39 -05:00
metrics ,
2021-01-07 18:19:31 +11:00
F ::default ( ) ,
data_transform ,
)
}
2020-01-25 02:16:02 +11:00
}
2023-01-27 05:44:04 +01:00
impl < D , F > Behaviour < D , F >
2021-01-07 18:19:31 +11:00
where
D : DataTransform ,
F : TopicSubscriptionFilter ,
{
2023-01-27 05:44:04 +01:00
/// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a
/// [`Config`] and a custom subscription filter and data transform.
2021-01-07 18:19:31 +11:00
pub fn new_with_subscription_filter_and_transform (
privacy : MessageAuthenticity ,
2023-01-27 05:44:04 +01:00
config : Config ,
2021-11-16 08:59:39 -05:00
metrics : Option < ( & mut Registry , MetricsConfig ) > ,
2021-01-07 18:19:31 +11:00
subscription_filter : F ,
data_transform : D ,
) -> Result < Self , & 'static str > {
2020-08-03 18:13:43 +10:00
// Set up the router given the configuration settings.
// We do not allow configurations where a published message would also be rejected if it
// were received locally.
2021-09-14 16:00:05 +03:00
validate_config ( & privacy , config . validation_mode ( ) ) ? ;
2020-08-03 18:13:43 +10:00
2023-01-27 05:44:04 +01:00
Ok ( Behaviour {
2021-11-16 08:59:39 -05:00
metrics : metrics . map ( | ( registry , cfg ) | Metrics ::new ( registry , cfg ) ) ,
2020-01-25 02:16:02 +11:00
events : VecDeque ::new ( ) ,
control_pool : HashMap ::new ( ) ,
2020-08-03 18:13:43 +10:00
publish_config : privacy . into ( ) ,
2021-01-07 18:19:31 +11:00
duplicate_cache : DuplicateCache ::new ( config . duplicate_cache_time ( ) ) ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
fast_message_id_cache : TimeCache ::new ( config . duplicate_cache_time ( ) ) ,
2020-01-25 02:16:02 +11:00
topic_peers : HashMap ::new ( ) ,
peer_topics : HashMap ::new ( ) ,
2021-01-07 18:19:31 +11:00
explicit_peers : HashSet ::new ( ) ,
blacklisted_peers : HashSet ::new ( ) ,
2020-01-25 02:16:02 +11:00
mesh : HashMap ::new ( ) ,
fanout : HashMap ::new ( ) ,
fanout_last_pub : HashMap ::new ( ) ,
2021-01-07 18:19:31 +11:00
backoffs : BackoffStorage ::new (
& config . prune_backoff ( ) ,
config . heartbeat_interval ( ) ,
config . backoff_slack ( ) ,
2020-01-25 02:16:02 +11:00
) ,
2021-01-07 18:19:31 +11:00
mcache : MessageCache ::new ( config . history_gossip ( ) , config . history_length ( ) ) ,
2022-02-14 21:24:58 +11:00
heartbeat : Interval ::new_at (
Instant ::now ( ) + config . heartbeat_initial_delay ( ) ,
2021-01-07 18:19:31 +11:00
config . heartbeat_interval ( ) ,
2020-01-25 02:16:02 +11:00
) ,
2021-01-07 18:19:31 +11:00
heartbeat_ticks : 0 ,
px_peers : HashSet ::new ( ) ,
outbound_peers : HashSet ::new ( ) ,
peer_score : None ,
count_received_ihave : HashMap ::new ( ) ,
count_sent_iwant : HashMap ::new ( ) ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
pending_iwant_msgs : HashSet ::new ( ) ,
2021-05-14 17:16:50 +10:00
connected_peers : HashMap ::new ( ) ,
2021-01-07 18:19:31 +11:00
published_message_ids : DuplicateCache ::new ( config . published_message_ids_cache_time ( ) ) ,
2020-08-03 18:13:43 +10:00
config ,
2021-01-07 18:19:31 +11:00
subscription_filter ,
data_transform ,
} )
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
}
2020-01-25 02:16:02 +11:00
2023-01-27 05:44:04 +01:00
impl < D , F > Behaviour < D , F >
2021-01-07 18:19:31 +11:00
where
2021-08-31 17:00:51 +02:00
D : DataTransform + Send + 'static ,
F : TopicSubscriptionFilter + Send + 'static ,
2021-01-07 18:19:31 +11:00
{
2020-08-13 12:10:52 +02:00
/// Lists the hashes of the topics we are currently subscribed to.
pub fn topics ( & self ) -> impl Iterator < Item = & TopicHash > {
self . mesh . keys ( )
}
2021-01-07 18:19:31 +11:00
/// Lists all mesh peers for a certain topic hash.
pub fn mesh_peers ( & self , topic_hash : & TopicHash ) -> impl Iterator < Item = & PeerId > {
2022-02-28 10:05:17 +01:00
self . mesh . get ( topic_hash ) . into_iter ( ) . flat_map ( | x | x . iter ( ) )
2020-08-13 12:10:52 +02:00
}
2021-01-07 18:19:31 +11:00
pub fn all_mesh_peers ( & self ) -> impl Iterator < Item = & PeerId > {
2020-08-13 12:10:52 +02:00
let mut res = BTreeSet ::new ( ) ;
for peers in self . mesh . values ( ) {
res . extend ( peers ) ;
}
res . into_iter ( )
}
2021-01-07 18:19:31 +11:00
/// Lists all known peers and their associated subscribed topics.
pub fn all_peers ( & self ) -> impl Iterator < Item = ( & PeerId , Vec < & TopicHash > ) > {
self . peer_topics
. iter ( )
. map ( | ( peer_id , topic_set ) | ( peer_id , topic_set . iter ( ) . collect ( ) ) )
}
/// Lists all known peers and their associated protocol.
pub fn peer_protocol ( & self ) -> impl Iterator < Item = ( & PeerId , & PeerKind ) > {
2021-05-14 17:16:50 +10:00
self . connected_peers . iter ( ) . map ( | ( k , v ) | ( k , & v . kind ) )
2021-01-07 18:19:31 +11:00
}
/// Returns the gossipsub score for a given peer, if one exists.
pub fn peer_score ( & self , peer_id : & PeerId ) -> Option < f64 > {
self . peer_score
. as_ref ( )
. map ( | ( score , .. ) | score . score ( peer_id ) )
}
2020-01-25 02:16:02 +11:00
/// Subscribe to a topic.
///
2021-01-07 18:19:31 +11:00
/// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already
/// subscribed.
pub fn subscribe < H : Hasher > ( & mut self , topic : & Topic < H > ) -> Result < bool , SubscriptionError > {
2020-01-25 02:16:02 +11:00
debug! ( " Subscribing to topic: {} " , topic ) ;
2021-01-07 18:19:31 +11:00
let topic_hash = topic . hash ( ) ;
if ! self . subscription_filter . can_subscribe ( & topic_hash ) {
return Err ( SubscriptionError ::NotAllowed ) ;
}
2020-01-25 02:16:02 +11:00
if self . mesh . get ( & topic_hash ) . is_some ( ) {
debug! ( " Topic: {} is already in the mesh. " , topic ) ;
2021-01-07 18:19:31 +11:00
return Ok ( false ) ;
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
// send subscription request to all peers
let peer_list = self . peer_topics . keys ( ) . cloned ( ) . collect ::< Vec < _ > > ( ) ;
if ! peer_list . is_empty ( ) {
2023-01-27 05:44:04 +01:00
let event = Rpc {
2021-05-14 17:16:50 +10:00
messages : Vec ::new ( ) ,
2023-01-27 05:44:04 +01:00
subscriptions : vec ! [ Subscription {
2021-05-14 17:16:50 +10:00
topic_hash : topic_hash . clone ( ) ,
2023-01-27 05:44:04 +01:00
action : SubscriptionAction ::Subscribe ,
2021-05-14 17:16:50 +10:00
} ] ,
control_msgs : Vec ::new ( ) ,
}
. into_protobuf ( ) ;
2020-01-25 02:16:02 +11:00
for peer in peer_list {
debug! ( " Sending SUBSCRIBE to peer: {:?} " , peer ) ;
2021-01-07 18:19:31 +11:00
self . send_message ( peer , event . clone ( ) )
. map_err ( SubscriptionError ::PublishError ) ? ;
2020-01-25 02:16:02 +11:00
}
}
// call JOIN(topic)
// this will add new peers to the mesh for the topic
self . join ( & topic_hash ) ;
2021-06-12 08:11:55 +02:00
debug! ( " Subscribed to topic: {} " , topic ) ;
2021-01-07 18:19:31 +11:00
Ok ( true )
2020-01-25 02:16:02 +11:00
}
/// Unsubscribes from a topic.
///
2021-01-07 18:19:31 +11:00
/// Returns [`Ok(true)`] if we were subscribed to this topic.
pub fn unsubscribe < H : Hasher > ( & mut self , topic : & Topic < H > ) -> Result < bool , PublishError > {
2020-01-25 02:16:02 +11:00
debug! ( " Unsubscribing from topic: {} " , topic ) ;
2021-01-07 18:19:31 +11:00
let topic_hash = topic . hash ( ) ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
if self . mesh . get ( & topic_hash ) . is_none ( ) {
2020-01-25 02:16:02 +11:00
debug! ( " Already unsubscribed from topic: {:?} " , topic_hash ) ;
// we are not subscribed
2021-01-07 18:19:31 +11:00
return Ok ( false ) ;
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
// announce to all peers
let peer_list = self . peer_topics . keys ( ) . cloned ( ) . collect ::< Vec < _ > > ( ) ;
if ! peer_list . is_empty ( ) {
2023-01-27 05:44:04 +01:00
let event = Rpc {
2021-05-14 17:16:50 +10:00
messages : Vec ::new ( ) ,
2023-01-27 05:44:04 +01:00
subscriptions : vec ! [ Subscription {
2021-05-14 17:16:50 +10:00
topic_hash : topic_hash . clone ( ) ,
2023-01-27 05:44:04 +01:00
action : SubscriptionAction ::Unsubscribe ,
2021-05-14 17:16:50 +10:00
} ] ,
control_msgs : Vec ::new ( ) ,
}
. into_protobuf ( ) ;
2020-01-25 02:16:02 +11:00
for peer in peer_list {
2020-08-03 18:13:43 +10:00
debug! ( " Sending UNSUBSCRIBE to peer: {} " , peer . to_string ( ) ) ;
2021-01-07 18:19:31 +11:00
self . send_message ( peer , event . clone ( ) ) ? ;
2020-01-25 02:16:02 +11:00
}
}
// call LEAVE(topic)
// this will remove the topic from the mesh
self . leave ( & topic_hash ) ;
2021-06-12 08:11:55 +02:00
debug! ( " Unsubscribed from topic: {:?} " , topic_hash ) ;
2021-01-07 18:19:31 +11:00
Ok ( true )
2020-01-25 02:16:02 +11:00
}
/// Publishes a message with multiple topics to the network.
2022-09-05 07:31:13 +03:00
pub fn publish (
2020-01-25 02:16:02 +11:00
& mut self ,
2022-09-05 07:31:13 +03:00
topic : impl Into < TopicHash > ,
2020-01-25 02:16:02 +11:00
data : impl Into < Vec < u8 > > ,
2021-01-07 18:19:31 +11:00
) -> Result < MessageId , PublishError > {
let data = data . into ( ) ;
2022-09-05 07:31:13 +03:00
let topic = topic . into ( ) ;
2021-01-07 18:19:31 +11:00
// Transform the data before building a raw_message.
let transformed_data = self
. data_transform
2022-09-05 07:31:13 +03:00
. outbound_transform ( & topic , data . clone ( ) ) ? ;
2021-01-07 18:19:31 +11:00
2022-09-05 07:31:13 +03:00
let raw_message = self . build_raw_message ( topic , transformed_data ) ? ;
2021-01-07 18:19:31 +11:00
// calculate the message id from the un-transformed data
2023-01-27 05:44:04 +01:00
let msg_id = self . config . message_id ( & Message {
2021-02-15 11:59:51 +01:00
source : raw_message . source ,
2021-01-07 18:19:31 +11:00
data , // the uncompressed form
sequence_number : raw_message . sequence_number ,
topic : raw_message . topic . clone ( ) ,
} ) ;
2023-01-27 05:44:04 +01:00
let event = Rpc {
2021-05-14 17:16:50 +10:00
subscriptions : Vec ::new ( ) ,
messages : vec ! [ raw_message . clone ( ) ] ,
control_msgs : Vec ::new ( ) ,
}
. into_protobuf ( ) ;
2021-01-07 18:19:31 +11:00
// check that the size doesn't exceed the max transmission size
if event . encoded_len ( ) > self . config . max_transmit_size ( ) {
return Err ( PublishError ::MessageTooLarge ) ;
}
2020-01-25 02:16:02 +11:00
2021-01-20 20:47:05 +11:00
// Check the if the message has been published before
if self . duplicate_cache . contains ( & msg_id ) {
2020-08-03 18:13:43 +10:00
// This message has already been seen. We don't re-publish messages that have already
// been published on the network.
warn! (
" Not publishing a message that has already been published. Msg-id {} " ,
msg_id
) ;
return Err ( PublishError ::Duplicate ) ;
}
2021-12-21 17:31:19 -05:00
trace! ( " Publishing message: {:?} " , msg_id ) ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
let topic_hash = raw_message . topic . clone ( ) ;
// If we are not flood publishing forward the message to mesh peers.
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
let mesh_peers_sent = ! self . config . flood_publish ( )
& & self . forward_msg ( & msg_id , raw_message . clone ( ) , None , HashSet ::new ( ) ) ? ;
2020-01-25 02:16:02 +11:00
let mut recipient_peers = HashSet ::new ( ) ;
2021-01-07 18:19:31 +11:00
if let Some ( set ) = self . topic_peers . get ( & topic_hash ) {
if self . config . flood_publish ( ) {
// Forward to all peers above score and all explicit peers
recipient_peers . extend (
set . iter ( )
. filter ( | p | {
self . explicit_peers . contains ( * p )
2022-09-16 16:30:11 +02:00
| | ! self . score_below_threshold ( p , | ts | ts . publish_threshold ) . 0
2021-01-07 18:19:31 +11:00
} )
. cloned ( ) ,
) ;
} else {
// Explicit peers
for peer in & self . explicit_peers {
if set . contains ( peer ) {
2021-02-15 11:59:51 +01:00
recipient_peers . insert ( * peer ) ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
}
// Floodsub peers
2021-05-14 17:16:50 +10:00
for ( peer , connections ) in & self . connected_peers {
if connections . kind = = PeerKind ::Floodsub
2021-01-07 18:19:31 +11:00
& & ! self
. score_below_threshold ( peer , | ts | ts . publish_threshold )
. 0
{
2021-02-15 11:59:51 +01:00
recipient_peers . insert ( * peer ) ;
2020-01-25 02:16:02 +11:00
}
}
2021-01-07 18:19:31 +11:00
// Gossipsub peers
if self . mesh . get ( & topic_hash ) . is_none ( ) {
debug! ( " Topic: {:?} not in the mesh " , topic_hash ) ;
// If we have fanout peers add them to the map.
if self . fanout . contains_key ( & topic_hash ) {
for peer in self . fanout . get ( & topic_hash ) . expect ( " Topic must exist " ) {
2021-02-15 11:59:51 +01:00
recipient_peers . insert ( * peer ) ;
2021-01-07 18:19:31 +11:00
}
} else {
// We have no fanout peers, select mesh_n of them and add them to the fanout
let mesh_n = self . config . mesh_n ( ) ;
let new_peers = get_random_peers (
& self . topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-01-07 18:19:31 +11:00
& topic_hash ,
mesh_n ,
{
| p | {
! self . explicit_peers . contains ( p )
& & ! self
. score_below_threshold ( p , | pst | pst . publish_threshold )
. 0
}
} ,
) ;
// Add the new peers to the fanout and recipient peers
self . fanout . insert ( topic_hash . clone ( ) , new_peers . clone ( ) ) ;
for peer in new_peers {
debug! ( " Peer added to fanout: {:?} " , peer ) ;
2021-02-15 11:59:51 +01:00
recipient_peers . insert ( peer ) ;
2021-01-07 18:19:31 +11:00
}
}
// We are publishing to fanout peers - update the time we published
self . fanout_last_pub
. insert ( topic_hash . clone ( ) , Instant ::now ( ) ) ;
}
2020-01-25 02:16:02 +11:00
}
}
2020-08-03 18:13:43 +10:00
if recipient_peers . is_empty ( ) & & ! mesh_peers_sent {
return Err ( PublishError ::InsufficientPeers ) ;
}
2020-01-25 02:16:02 +11:00
2021-01-20 20:47:05 +11:00
// If the message isn't a duplicate and we have sent it to some peers add it to the
// duplicate cache and memcache.
self . duplicate_cache . insert ( msg_id . clone ( ) ) ;
self . mcache . put ( & msg_id , raw_message ) ;
// If the message is anonymous or has a random author add it to the published message ids
// cache.
if let PublishConfig ::RandomAuthor | PublishConfig ::Anonymous = self . publish_config {
if ! self . config . allow_self_origin ( ) {
self . published_message_ids . insert ( msg_id . clone ( ) ) ;
}
}
2020-01-25 02:16:02 +11:00
// Send to peers we know are subscribed to the topic.
2021-12-21 17:31:19 -05:00
let msg_bytes = event . encoded_len ( ) ;
2020-01-25 02:16:02 +11:00
for peer_id in recipient_peers . iter ( ) {
2021-12-21 17:31:19 -05:00
trace! ( " Sending message to peer: {:?} " , peer_id ) ;
2021-02-15 11:59:51 +01:00
self . send_message ( * peer_id , event . clone ( ) ) ? ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . msg_sent ( & topic_hash , msg_bytes ) ;
}
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
2021-06-12 08:11:55 +02:00
debug! ( " Published message: {:?} " , & msg_id ) ;
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_published_message ( & topic_hash ) ;
}
2021-01-07 18:19:31 +11:00
Ok ( msg_id )
2020-01-25 02:16:02 +11:00
}
2023-01-27 05:44:04 +01:00
/// This function should be called when [`Config::validate_messages()`] is `true` after
2021-01-07 18:19:31 +11:00
/// the message got validated by the caller. Messages are stored in the ['Memcache'] and
/// validation is expected to be fast enough that the messages should still exist in the cache.
/// There are three possible validation outcomes and the outcome is given in acceptance.
2020-01-25 02:16:02 +11:00
///
2021-01-07 18:19:31 +11:00
/// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the
/// network. The `propagation_source` parameter indicates who the message was received by and
/// will not be forwarded back to that peer.
2020-08-03 18:13:43 +10:00
///
2021-01-07 18:19:31 +11:00
/// If acceptance = [`MessageAcceptance::Reject`] the message will be deleted from the memcache
/// and the P₄ penalty will be applied to the `propagation_source`.
//
/// If acceptance = [`MessageAcceptance::Ignore`] the message will be deleted from the memcache
/// but no P₄ penalty will be applied.
///
/// This function will return true if the message was found in the cache and false if was not
/// in the cache anymore.
2020-08-03 18:13:43 +10:00
///
/// This should only be called once per message.
2021-01-07 18:19:31 +11:00
pub fn report_message_validation_result (
2020-01-25 02:16:02 +11:00
& mut self ,
2021-01-07 18:19:31 +11:00
msg_id : & MessageId ,
2020-01-25 02:16:02 +11:00
propagation_source : & PeerId ,
2021-01-07 18:19:31 +11:00
acceptance : MessageAcceptance ,
) -> Result < bool , PublishError > {
let reject_reason = match acceptance {
MessageAcceptance ::Accept = > {
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
let ( raw_message , originating_peers ) = match self . mcache . validate ( msg_id ) {
Some ( ( raw_message , originating_peers ) ) = > {
( raw_message . clone ( ) , originating_peers )
}
2021-01-07 18:19:31 +11:00
None = > {
warn! (
" Message not in cache. Ignoring forwarding. Message Id: {} " ,
msg_id
) ;
2022-02-17 01:54:24 +11:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . memcache_miss ( ) ;
}
2021-01-07 18:19:31 +11:00
return Ok ( false ) ;
}
} ;
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_msg_validation ( & raw_message . topic , & acceptance ) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
self . forward_msg (
msg_id ,
raw_message ,
Some ( propagation_source ) ,
originating_peers ,
) ? ;
2021-01-07 18:19:31 +11:00
return Ok ( true ) ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
MessageAcceptance ::Reject = > RejectReason ::ValidationFailed ,
MessageAcceptance ::Ignore = > RejectReason ::ValidationIgnored ,
2020-01-25 02:16:02 +11:00
} ;
2021-01-07 18:19:31 +11:00
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
if let Some ( ( raw_message , originating_peers ) ) = self . mcache . remove ( msg_id ) {
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_msg_validation ( & raw_message . topic , & acceptance ) ;
}
2021-01-07 18:19:31 +11:00
// Tell peer_score about reject
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// Reject the original source, and any duplicates we've seen from other peers.
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . reject_message (
propagation_source ,
msg_id ,
& raw_message . topic ,
reject_reason ,
) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
for peer in originating_peers . iter ( ) {
peer_score . reject_message ( peer , msg_id , & raw_message . topic , reject_reason ) ;
}
2021-01-07 18:19:31 +11:00
}
Ok ( true )
} else {
warn! ( " Rejected message not in cache. Message Id: {} " , msg_id ) ;
Ok ( false )
}
}
/// Adds a new peer to the list of explicitly connected peers.
pub fn add_explicit_peer ( & mut self , peer_id : & PeerId ) {
debug! ( " Adding explicit peer {} " , peer_id ) ;
2021-02-15 11:59:51 +01:00
self . explicit_peers . insert ( * peer_id ) ;
2021-01-07 18:19:31 +11:00
self . check_explicit_peer_connection ( peer_id ) ;
}
/// This removes the peer from explicitly connected peers, note that this does not disconnect
/// the peer.
pub fn remove_explicit_peer ( & mut self , peer_id : & PeerId ) {
debug! ( " Removing explicit peer {} " , peer_id ) ;
self . explicit_peers . remove ( peer_id ) ;
}
/// Blacklists a peer. All messages from this peer will be rejected and any message that was
/// created by this peer will be rejected.
pub fn blacklist_peer ( & mut self , peer_id : & PeerId ) {
2021-02-15 11:59:51 +01:00
if self . blacklisted_peers . insert ( * peer_id ) {
2021-01-07 18:19:31 +11:00
debug! ( " Peer has been blacklisted: {} " , peer_id ) ;
}
}
/// Removes a peer from the blacklist if it has previously been blacklisted.
pub fn remove_blacklisted_peer ( & mut self , peer_id : & PeerId ) {
if self . blacklisted_peers . remove ( peer_id ) {
debug! ( " Peer has been removed from the blacklist: {} " , peer_id ) ;
}
}
/// Activates the peer scoring system with the given parameters. This will reset all scores
/// if there was already another peer scoring system activated. Returns an error if the
/// params are not valid or if they got already set.
pub fn with_peer_score (
& mut self ,
params : PeerScoreParams ,
threshold : PeerScoreThresholds ,
) -> Result < ( ) , String > {
self . with_peer_score_and_message_delivery_time_callback ( params , threshold , None )
}
/// Activates the peer scoring system with the given parameters and a message delivery time
/// callback. Returns an error if the parameters got already set.
pub fn with_peer_score_and_message_delivery_time_callback (
& mut self ,
params : PeerScoreParams ,
threshold : PeerScoreThresholds ,
callback : Option < fn ( & PeerId , & TopicHash , f64 ) > ,
) -> Result < ( ) , String > {
params . validate ( ) ? ;
threshold . validate ( ) ? ;
if self . peer_score . is_some ( ) {
return Err ( " Peer score set twice " . into ( ) ) ;
}
let interval = Interval ::new ( params . decay_interval ) ;
let peer_score = PeerScore ::new_with_message_delivery_time_callback ( params , callback ) ;
self . peer_score = Some ( ( peer_score , threshold , interval , GossipPromises ::default ( ) ) ) ;
Ok ( ( ) )
}
/// Sets scoring parameters for a topic.
///
/// The [`Self::with_peer_score()`] must first be called to initialise peer scoring.
pub fn set_topic_params < H : Hasher > (
& mut self ,
topic : Topic < H > ,
params : TopicScoreParams ,
) -> Result < ( ) , & 'static str > {
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . set_topic_params ( topic . hash ( ) , params ) ;
Ok ( ( ) )
} else {
Err ( " Peer score must be initialised with `with_peer_score()` " )
}
}
/// Sets the application specific score for a peer. Returns true if scoring is active and
/// the peer is connected or if the score of the peer is not yet expired, false otherwise.
pub fn set_application_score ( & mut self , peer_id : & PeerId , new_score : f64 ) -> bool {
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . set_application_score ( peer_id , new_score )
} else {
false
}
2020-01-25 02:16:02 +11:00
}
/// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages.
fn join ( & mut self , topic_hash : & TopicHash ) {
debug! ( " Running JOIN for topic: {:?} " , topic_hash ) ;
// if we are already in the mesh, return
if self . mesh . contains_key ( topic_hash ) {
2021-06-12 08:11:55 +02:00
debug! ( " JOIN: The topic is already in the mesh, ignoring JOIN " ) ;
2020-01-25 02:16:02 +11:00
return ;
}
2021-01-07 18:19:31 +11:00
let mut added_peers = HashSet ::new ( ) ;
2020-01-25 02:16:02 +11:00
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . joined ( topic_hash )
}
2020-01-25 02:16:02 +11:00
// check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do,
// removing the fanout entry.
2021-01-07 18:19:31 +11:00
if let Some ( ( _ , mut peers ) ) = self . fanout . remove_entry ( topic_hash ) {
2020-01-25 02:16:02 +11:00
debug! (
" JOIN: Removing peers from the fanout for topic: {:?} " ,
topic_hash
) ;
2021-01-07 18:19:31 +11:00
// remove explicit peers, peers with negative scores, and backoffed peers
2022-09-16 16:30:11 +02:00
peers . retain ( | p | {
! self . explicit_peers . contains ( p )
& & ! self . score_below_threshold ( p , | _ | 0.0 ) . 0
& & ! self . backoffs . is_backoff_with_slack ( topic_hash , p )
} ) ;
2021-01-07 18:19:31 +11:00
// Add up to mesh_n of them them to the mesh
// NOTE: These aren't randomly added, currently FIFO
let add_peers = std ::cmp ::min ( peers . len ( ) , self . config . mesh_n ( ) ) ;
2020-01-25 02:16:02 +11:00
debug! (
" JOIN: Adding {:?} peers from the fanout for topic: {:?} " ,
add_peers , topic_hash
) ;
2020-08-03 18:13:43 +10:00
added_peers . extend ( peers . iter ( ) . cloned ( ) . take ( add_peers ) ) ;
2021-05-14 17:16:50 +10:00
2020-08-03 18:13:43 +10:00
self . mesh . insert (
topic_hash . clone ( ) ,
peers . into_iter ( ) . take ( add_peers ) . collect ( ) ,
) ;
2021-11-16 08:59:39 -05:00
2020-01-25 02:16:02 +11:00
// remove the last published time
self . fanout_last_pub . remove ( topic_hash ) ;
}
2021-11-16 08:59:39 -05:00
let fanaout_added = added_peers . len ( ) ;
if let Some ( m ) = self . metrics . as_mut ( ) {
2021-12-21 17:31:19 -05:00
m . peers_included ( topic_hash , Inclusion ::Fanout , fanaout_added )
2021-11-16 08:59:39 -05:00
}
2020-01-25 02:16:02 +11:00
// check if we need to get more peers, which we randomly select
2021-01-07 18:19:31 +11:00
if added_peers . len ( ) < self . config . mesh_n ( ) {
2020-01-25 02:16:02 +11:00
// get the peers
2021-01-07 18:19:31 +11:00
let new_peers = get_random_peers (
2020-01-25 02:16:02 +11:00
& self . topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2020-01-25 02:16:02 +11:00
topic_hash ,
2021-01-07 18:19:31 +11:00
self . config . mesh_n ( ) - added_peers . len ( ) ,
| peer | {
! added_peers . contains ( peer )
& & ! self . explicit_peers . contains ( peer )
& & ! self . score_below_threshold ( peer , | _ | 0.0 ) . 0
& & ! self . backoffs . is_backoff_with_slack ( topic_hash , peer )
} ,
2020-01-25 02:16:02 +11:00
) ;
2020-08-03 18:13:43 +10:00
added_peers . extend ( new_peers . clone ( ) ) ;
2020-01-25 02:16:02 +11:00
// add them to the mesh
debug! (
" JOIN: Inserting {:?} random peers into the mesh " ,
new_peers . len ( )
) ;
let mesh_peers = self
. mesh
. entry ( topic_hash . clone ( ) )
2020-08-03 18:13:43 +10:00
. or_insert_with ( Default ::default ) ;
mesh_peers . extend ( new_peers ) ;
2020-01-25 02:16:02 +11:00
}
2021-11-16 08:59:39 -05:00
let random_added = added_peers . len ( ) - fanaout_added ;
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( topic_hash , Inclusion ::Random , random_added )
}
2020-01-25 02:16:02 +11:00
for peer_id in added_peers {
// Send a GRAFT control message
2021-06-12 08:11:55 +02:00
debug! ( " JOIN: Sending Graft message to peer: {:?} " , peer_id ) ;
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . graft ( & peer_id , topic_hash . clone ( ) ) ;
}
2020-01-25 02:16:02 +11:00
Self ::control_pool_add (
& mut self . control_pool ,
2021-02-15 11:59:51 +01:00
peer_id ,
2023-01-27 05:44:04 +01:00
ControlAction ::Graft {
2020-01-25 02:16:02 +11:00
topic_hash : topic_hash . clone ( ) ,
} ,
) ;
2021-05-14 17:16:50 +10:00
// If the peer did not previously exist in any mesh, inform the handler
peer_added_to_mesh (
peer_id ,
vec! [ topic_hash ] ,
& self . mesh ,
self . peer_topics . get ( & peer_id ) ,
& mut self . events ,
& self . connected_peers ,
) ;
2020-01-25 02:16:02 +11:00
}
2021-11-16 08:59:39 -05:00
let mesh_peers = self . mesh_peers ( topic_hash ) . count ( ) ;
if let Some ( m ) = self . metrics . as_mut ( ) {
m . set_mesh_peers ( topic_hash , mesh_peers )
}
2020-01-25 02:16:02 +11:00
debug! ( " Completed JOIN for topic: {:?} " , topic_hash ) ;
}
2021-01-07 18:19:31 +11:00
/// Creates a PRUNE gossipsub action.
fn make_prune (
& mut self ,
topic_hash : & TopicHash ,
peer : & PeerId ,
do_px : bool ,
2022-01-17 12:08:34 -05:00
on_unsubscribe : bool ,
2023-01-27 05:44:04 +01:00
) -> ControlAction {
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . prune ( peer , topic_hash . clone ( ) ) ;
}
2021-05-14 17:16:50 +10:00
match self . connected_peers . get ( peer ) . map ( | v | & v . kind ) {
2021-01-07 18:19:31 +11:00
Some ( PeerKind ::Floodsub ) = > {
error! ( " Attempted to prune a Floodsub peer " ) ;
}
Some ( PeerKind ::Gossipsub ) = > {
// GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway
2023-01-27 05:44:04 +01:00
return ControlAction ::Prune {
2021-01-07 18:19:31 +11:00
topic_hash : topic_hash . clone ( ) ,
peers : Vec ::new ( ) ,
backoff : None ,
} ;
}
None = > {
error! ( " Attempted to Prune an unknown peer " ) ;
}
_ = > { } // Gossipsub 1.1 peer perform the `Prune`
}
// Select peers for peer exchange
let peers = if do_px {
get_random_peers (
& self . topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-09-14 16:00:05 +03:00
topic_hash ,
2021-01-07 18:19:31 +11:00
self . config . prune_peers ( ) ,
| p | p ! = peer & & ! self . score_below_threshold ( p , | _ | 0.0 ) . 0 ,
)
. into_iter ( )
. map ( | p | PeerInfo { peer_id : Some ( p ) } )
. collect ( )
} else {
Vec ::new ( )
} ;
2022-01-17 12:08:34 -05:00
let backoff = if on_unsubscribe {
self . config . unsubscribe_backoff ( )
} else {
self . config . prune_backoff ( )
} ;
2021-01-07 18:19:31 +11:00
// update backoff
2022-01-17 12:08:34 -05:00
self . backoffs . update_backoff ( topic_hash , peer , backoff ) ;
2021-01-07 18:19:31 +11:00
2023-01-27 05:44:04 +01:00
ControlAction ::Prune {
2021-01-07 18:19:31 +11:00
topic_hash : topic_hash . clone ( ) ,
peers ,
2022-01-17 12:08:34 -05:00
backoff : Some ( backoff . as_secs ( ) ) ,
2021-01-07 18:19:31 +11:00
}
}
2020-02-10 15:17:08 +01:00
/// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages.
2020-01-25 02:16:02 +11:00
fn leave ( & mut self , topic_hash : & TopicHash ) {
debug! ( " Running LEAVE for topic {:?} " , topic_hash ) ;
2021-01-07 18:19:31 +11:00
// If our mesh contains the topic, send prune to peers and delete it from the mesh
2020-01-25 02:16:02 +11:00
if let Some ( ( _ , peers ) ) = self . mesh . remove_entry ( topic_hash ) {
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . left ( topic_hash )
}
2020-01-25 02:16:02 +11:00
for peer in peers {
// Send a PRUNE control message
2021-06-12 08:11:55 +02:00
debug! ( " LEAVE: Sending PRUNE to peer: {:?} " , peer ) ;
2022-01-17 12:08:34 -05:00
let on_unsubscribe = true ;
let control =
self . make_prune ( topic_hash , & peer , self . config . do_px ( ) , on_unsubscribe ) ;
2021-02-15 11:59:51 +01:00
Self ::control_pool_add ( & mut self . control_pool , peer , control ) ;
2021-05-14 17:16:50 +10:00
// If the peer did not previously exist in any mesh, inform the handler
peer_removed_from_mesh (
peer ,
topic_hash ,
& self . mesh ,
self . peer_topics . get ( & peer ) ,
& mut self . events ,
& self . connected_peers ,
) ;
2020-01-25 02:16:02 +11:00
}
}
debug! ( " Completed LEAVE for topic: {:?} " , topic_hash ) ;
}
2021-01-07 18:19:31 +11:00
/// Checks if the given peer is still connected and if not dials the peer again.
fn check_explicit_peer_connection ( & mut self , peer_id : & PeerId ) {
if ! self . peer_topics . contains_key ( peer_id ) {
// Connect to peer
debug! ( " Connecting to explicit peer {:?} " , peer_id ) ;
2021-11-15 14:17:23 +01:00
self . events . push_back ( NetworkBehaviourAction ::Dial {
2022-08-10 12:20:24 +04:30
opts : DialOpts ::peer_id ( * peer_id ) . build ( ) ,
2021-01-07 18:19:31 +11:00
} ) ;
}
}
/// Determines if a peer's score is below a given `PeerScoreThreshold` chosen via the
/// `threshold` parameter.
fn score_below_threshold (
& self ,
peer_id : & PeerId ,
threshold : impl Fn ( & PeerScoreThresholds ) -> f64 ,
) -> ( bool , f64 ) {
Self ::score_below_threshold_from_scores ( & self . peer_score , peer_id , threshold )
}
fn score_below_threshold_from_scores (
peer_score : & Option < ( PeerScore , PeerScoreThresholds , Interval , GossipPromises ) > ,
peer_id : & PeerId ,
threshold : impl Fn ( & PeerScoreThresholds ) -> f64 ,
) -> ( bool , f64 ) {
if let Some ( ( peer_score , thresholds , .. ) ) = peer_score {
let score = peer_score . score ( peer_id ) ;
if score < threshold ( thresholds ) {
return ( true , score ) ;
}
( false , score )
} else {
( false , 0.0 )
}
}
2020-01-25 02:16:02 +11:00
/// Handles an IHAVE control message. Checks our cache of messages. If the message is unknown,
/// requests it with an IWANT control message.
fn handle_ihave ( & mut self , peer_id : & PeerId , ihave_msgs : Vec < ( TopicHash , Vec < MessageId > ) > ) {
2021-01-07 18:19:31 +11:00
// We ignore IHAVE gossip from any peer whose score is below the gossip threshold
if let ( true , score ) = self . score_below_threshold ( peer_id , | pst | pst . gossip_threshold ) {
debug! (
" IHAVE: ignoring peer {:?} with score below threshold [score = {}] " ,
peer_id , score
) ;
return ;
}
// IHAVE flood protection
2021-05-14 17:16:50 +10:00
let peer_have = self . count_received_ihave . entry ( * peer_id ) . or_insert ( 0 ) ;
2021-01-07 18:19:31 +11:00
* peer_have + = 1 ;
if * peer_have > self . config . max_ihave_messages ( ) {
debug! (
" IHAVE: peer {} has advertised too many times ({}) within this heartbeat \
interval ; ignoring " ,
peer_id , * peer_have
) ;
return ;
}
if let Some ( iasked ) = self . count_sent_iwant . get ( peer_id ) {
if * iasked > = self . config . max_ihave_length ( ) {
debug! (
" IHAVE: peer {} has already advertised too many messages ({}); ignoring " ,
peer_id , * iasked
) ;
return ;
}
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
trace! ( " Handling IHAVE for peer: {:?} " , peer_id ) ;
2021-01-07 18:19:31 +11:00
2022-02-17 01:54:24 +11:00
let mut iwant_ids = HashSet ::new ( ) ;
2020-01-25 02:16:02 +11:00
2022-05-03 13:11:48 +02:00
let want_message = | id : & MessageId | {
if self . duplicate_cache . contains ( id ) {
return false ;
}
if self . pending_iwant_msgs . contains ( id ) {
return false ;
}
self . peer_score
. as_ref ( )
. map ( | ( _ , _ , _ , promises ) | ! promises . contains ( id ) )
. unwrap_or ( true )
} ;
2020-01-25 02:16:02 +11:00
for ( topic , ids ) in ihave_msgs {
// only process the message if we are subscribed
if ! self . mesh . contains_key ( & topic ) {
debug! (
" IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?} " ,
topic
) ;
continue ;
}
2022-05-03 13:11:48 +02:00
for id in ids . into_iter ( ) . filter ( want_message ) {
// have not seen this message and are not currently requesting it
if iwant_ids . insert ( id ) {
// Register the IWANT metric
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_iwant ( & topic ) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
}
2020-01-25 02:16:02 +11:00
}
}
}
if ! iwant_ids . is_empty ( ) {
2021-02-15 11:59:51 +01:00
let iasked = self . count_sent_iwant . entry ( * peer_id ) . or_insert ( 0 ) ;
2021-01-07 18:19:31 +11:00
let mut iask = iwant_ids . len ( ) ;
if * iasked + iask > self . config . max_ihave_length ( ) {
iask = self . config . max_ihave_length ( ) . saturating_sub ( * iasked ) ;
}
2020-01-25 02:16:02 +11:00
// Send the list of IWANT control messages
2021-01-07 18:19:31 +11:00
debug! (
" IHAVE: Asking for {} out of {} messages from {} " ,
iask ,
iwant_ids . len ( ) ,
peer_id
) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// Ask in random order
2022-02-17 01:54:24 +11:00
let mut iwant_ids_vec : Vec < _ > = iwant_ids . into_iter ( ) . collect ( ) ;
2021-01-07 18:19:31 +11:00
let mut rng = thread_rng ( ) ;
2022-11-11 21:30:58 +01:00
iwant_ids_vec . partial_shuffle ( & mut rng , iask ) ;
2021-01-07 18:19:31 +11:00
2022-11-11 21:30:58 +01:00
iwant_ids_vec . truncate ( iask ) ;
2021-01-07 18:19:31 +11:00
* iasked + = iask ;
2022-02-17 01:54:24 +11:00
for message_id in & iwant_ids_vec {
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// Add all messages to the pending list
self . pending_iwant_msgs . insert ( message_id . clone ( ) ) ;
}
2021-01-07 18:19:31 +11:00
if let Some ( ( _ , _ , _ , gossip_promises ) ) = & mut self . peer_score {
gossip_promises . add_promise (
2021-02-15 11:59:51 +01:00
* peer_id ,
2022-02-17 01:54:24 +11:00
& iwant_ids_vec ,
2021-01-07 18:19:31 +11:00
Instant ::now ( ) + self . config . iwant_followup_time ( ) ,
) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
trace! (
2021-01-07 18:19:31 +11:00
" IHAVE: Asking for the following messages from {}: {:?} " ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
peer_id ,
2022-02-17 01:54:24 +11:00
iwant_ids_vec
2021-01-07 18:19:31 +11:00
) ;
Self ::control_pool_add (
2020-01-25 02:16:02 +11:00
& mut self . control_pool ,
2021-02-15 11:59:51 +01:00
* peer_id ,
2023-01-27 05:44:04 +01:00
ControlAction ::IWant {
2022-02-17 01:54:24 +11:00
message_ids : iwant_ids_vec ,
} ,
2020-01-25 02:16:02 +11:00
) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
trace! ( " Completed IHAVE handling for peer: {:?} " , peer_id ) ;
2020-01-25 02:16:02 +11:00
}
/// Handles an IWANT control message. Checks our cache of messages. If the message exists it is
/// forwarded to the requesting peer.
fn handle_iwant ( & mut self , peer_id : & PeerId , iwant_msgs : Vec < MessageId > ) {
2021-01-07 18:19:31 +11:00
// We ignore IWANT gossip from any peer whose score is below the gossip threshold
if let ( true , score ) = self . score_below_threshold ( peer_id , | pst | pst . gossip_threshold ) {
debug! (
" IWANT: ignoring peer {:?} with score below threshold [score = {}] " ,
peer_id , score
) ;
return ;
}
2020-01-25 02:16:02 +11:00
debug! ( " Handling IWANT for peer: {:?} " , peer_id ) ;
// build a hashmap of available messages
let mut cached_messages = HashMap ::new ( ) ;
for id in iwant_msgs {
2021-01-07 18:19:31 +11:00
// If we have it and the IHAVE count is not above the threshold, add it do the
// cached_messages mapping
if let Some ( ( msg , count ) ) = self . mcache . get_with_iwant_counts ( & id , peer_id ) {
if count > self . config . gossip_retransimission ( ) {
debug! (
" IWANT: Peer {} has asked for message {} too many times; ignoring \
request " ,
peer_id , & id
) ;
} else {
cached_messages . insert ( id . clone ( ) , msg . clone ( ) ) ;
}
2020-01-25 02:16:02 +11:00
}
}
if ! cached_messages . is_empty ( ) {
debug! ( " IWANT: Sending cached messages to peer: {:?} " , peer_id ) ;
// Send the messages to the peer
2021-11-16 08:59:39 -05:00
let message_list : Vec < _ > = cached_messages . into_iter ( ) . map ( | entry | entry . 1 ) . collect ( ) ;
2021-12-21 17:31:19 -05:00
let topics = message_list
. iter ( )
. map ( | message | message . topic . clone ( ) )
. collect ::< HashSet < TopicHash > > ( ) ;
2023-01-27 05:44:04 +01:00
let message = Rpc {
2021-12-21 17:31:19 -05:00
subscriptions : Vec ::new ( ) ,
messages : message_list ,
control_msgs : Vec ::new ( ) ,
2021-11-16 08:59:39 -05:00
}
2021-12-21 17:31:19 -05:00
. into_protobuf ( ) ;
2021-11-16 08:59:39 -05:00
2021-12-21 17:31:19 -05:00
let msg_bytes = message . encoded_len ( ) ;
if self . send_message ( * peer_id , message ) . is_err ( ) {
2021-01-07 18:19:31 +11:00
error! ( " Failed to send cached messages. Messages too large " ) ;
2021-11-16 08:59:39 -05:00
} else if let Some ( m ) = self . metrics . as_mut ( ) {
// Sending of messages succeeded, register them on the internal metrics.
2021-12-21 17:31:19 -05:00
for topic in topics . iter ( ) {
2022-05-03 13:11:48 +02:00
m . msg_sent ( topic , msg_bytes ) ;
2021-11-16 08:59:39 -05:00
}
2021-01-07 18:19:31 +11:00
}
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
debug! ( " Completed IWANT handling for peer: {} " , peer_id ) ;
2020-01-25 02:16:02 +11:00
}
/// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not,
/// responds with PRUNE messages.
fn handle_graft ( & mut self , peer_id : & PeerId , topics : Vec < TopicHash > ) {
2021-01-07 18:19:31 +11:00
debug! ( " Handling GRAFT message for peer: {} " , peer_id ) ;
2020-01-25 02:16:02 +11:00
let mut to_prune_topics = HashSet ::new ( ) ;
2021-01-07 18:19:31 +11:00
let mut do_px = self . config . do_px ( ) ;
2021-08-11 17:37:10 +02:00
// For each topic, if a peer has grafted us, then we necessarily must be in their mesh
// and they must be subscribed to the topic. Ensure we have recorded the mapping.
for topic in & topics {
self . peer_topics
. entry ( * peer_id )
. or_default ( )
. insert ( topic . clone ( ) ) ;
2021-11-03 18:21:51 -05:00
self . topic_peers
. entry ( topic . clone ( ) )
. or_default ( )
. insert ( * peer_id ) ;
2021-08-11 17:37:10 +02:00
}
2021-01-07 18:19:31 +11:00
// we don't GRAFT to/from explicit peers; complain loudly if this happens
if self . explicit_peers . contains ( peer_id ) {
warn! ( " GRAFT: ignoring request from direct peer {} " , peer_id ) ;
// this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics
2021-05-26 22:54:42 +10:00
to_prune_topics = topics . into_iter ( ) . collect ( ) ;
2021-01-07 18:19:31 +11:00
// but don't PX
do_px = false
} else {
let ( below_zero , score ) = self . score_below_threshold ( peer_id , | _ | 0.0 ) ;
let now = Instant ::now ( ) ;
for topic_hash in topics {
if let Some ( peers ) = self . mesh . get_mut ( & topic_hash ) {
// if the peer is already in the mesh ignore the graft
if peers . contains ( peer_id ) {
debug! (
" GRAFT: Received graft for peer {:?} that is already in topic {:?} " ,
peer_id , & topic_hash
) ;
continue ;
}
// make sure we are not backing off that peer
if let Some ( backoff_time ) = self . backoffs . get_backoff_time ( & topic_hash , peer_id )
{
if backoff_time > now {
warn! (
2021-12-21 17:31:19 -05:00
" [Penalty] Peer attempted graft within backoff time, penalizing {} " ,
2021-01-07 18:19:31 +11:00
peer_id
) ;
// add behavioural penalty
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_score_penalty ( Penalty ::GraftBackoff ) ;
}
2021-01-07 18:19:31 +11:00
peer_score . add_penalty ( peer_id , 1 ) ;
// check the flood cutoff
2023-01-13 23:28:04 +11:00
// See: https://github.com/rust-lang/rust-clippy/issues/10061
#[ allow(unknown_lints, clippy::unchecked_duration_subtraction) ]
2021-01-07 18:19:31 +11:00
let flood_cutoff = ( backoff_time
+ self . config . graft_flood_threshold ( ) )
- self . config . prune_backoff ( ) ;
if flood_cutoff > now {
//extra penalty
peer_score . add_penalty ( peer_id , 1 ) ;
}
}
2021-08-11 17:37:10 +02:00
// no PX
2021-01-07 18:19:31 +11:00
do_px = false ;
to_prune_topics . insert ( topic_hash . clone ( ) ) ;
continue ;
}
}
2021-05-14 17:16:50 +10:00
// check the score
2021-01-07 18:19:31 +11:00
if below_zero {
// we don't GRAFT peers with negative score
debug! (
" GRAFT: ignoring peer {:?} with negative score [score = {}, \
topic = { } ] " ,
peer_id , score , topic_hash
) ;
// we do send them PRUNE however, because it's a matter of protocol correctness
to_prune_topics . insert ( topic_hash . clone ( ) ) ;
// but we won't PX to them
do_px = false ;
continue ;
}
// check mesh upper bound and only allow graft if the upper bound is not reached or
// if it is an outbound peer
if peers . len ( ) > = self . config . mesh_n_high ( )
& & ! self . outbound_peers . contains ( peer_id )
{
to_prune_topics . insert ( topic_hash . clone ( ) ) ;
continue ;
}
// add peer to the mesh
2021-06-12 08:11:55 +02:00
debug! (
2021-01-07 18:19:31 +11:00
" GRAFT: Mesh link added for peer: {:?} in topic: {:?} " ,
peer_id , & topic_hash
) ;
2021-11-16 08:59:39 -05:00
if peers . insert ( * peer_id ) {
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( & topic_hash , Inclusion ::Subscribed , 1 )
}
}
2021-05-14 17:16:50 +10:00
// If the peer did not previously exist in any mesh, inform the handler
peer_added_to_mesh (
* peer_id ,
vec! [ & topic_hash ] ,
& self . mesh ,
2021-09-14 16:00:05 +03:00
self . peer_topics . get ( peer_id ) ,
2021-05-14 17:16:50 +10:00
& mut self . events ,
& self . connected_peers ,
) ;
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . graft ( peer_id , topic_hash ) ;
}
} else {
// don't do PX when there is an unknown topic to avoid leaking our peers
do_px = false ;
debug! (
" GRAFT: Received graft for unknown topic {:?} from peer {:?} " ,
& topic_hash , peer_id
) ;
// spam hardening: ignore GRAFTs for unknown topics
continue ;
}
2020-01-25 02:16:02 +11:00
}
}
if ! to_prune_topics . is_empty ( ) {
// build the prune messages to send
2022-01-17 12:08:34 -05:00
let on_unsubscribe = false ;
2020-01-25 02:16:02 +11:00
let prune_messages = to_prune_topics
. iter ( )
2022-01-17 12:08:34 -05:00
. map ( | t | self . make_prune ( t , peer_id , do_px , on_unsubscribe ) )
2020-01-25 02:16:02 +11:00
. collect ( ) ;
// Send the prune messages to the peer
2021-06-12 08:11:55 +02:00
debug! (
2021-01-07 18:19:31 +11:00
" GRAFT: Not subscribed to topics - Sending PRUNE to peer: {} " ,
2020-01-25 02:16:02 +11:00
peer_id
) ;
2021-01-07 18:19:31 +11:00
2022-06-30 17:20:26 +09:00
if let Err ( e ) = self . send_message (
* peer_id ,
2023-01-27 05:44:04 +01:00
Rpc {
2022-06-30 17:20:26 +09:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
control_msgs : prune_messages ,
}
. into_protobuf ( ) ,
) {
error! ( " Failed to send PRUNE: {:?} " , e ) ;
2021-01-07 18:19:31 +11:00
}
}
debug! ( " Completed GRAFT handling for peer: {} " , peer_id ) ;
}
fn remove_peer_from_mesh (
& mut self ,
peer_id : & PeerId ,
topic_hash : & TopicHash ,
backoff : Option < u64 > ,
always_update_backoff : bool ,
2021-11-16 08:59:39 -05:00
reason : Churn ,
2021-01-07 18:19:31 +11:00
) {
let mut update_backoff = always_update_backoff ;
2021-09-14 16:00:05 +03:00
if let Some ( peers ) = self . mesh . get_mut ( topic_hash ) {
2021-01-07 18:19:31 +11:00
// remove the peer if it exists in the mesh
if peers . remove ( peer_id ) {
2021-06-12 08:11:55 +02:00
debug! (
2021-01-07 18:19:31 +11:00
" PRUNE: Removing peer: {} from the mesh for topic: {} " ,
peer_id . to_string ( ) ,
topic_hash
) ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_removed ( topic_hash , reason , 1 )
}
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . prune ( peer_id , topic_hash . clone ( ) ) ;
}
update_backoff = true ;
2021-05-14 17:16:50 +10:00
// inform the handler
peer_removed_from_mesh (
* peer_id ,
topic_hash ,
& self . mesh ,
2021-09-14 16:00:05 +03:00
self . peer_topics . get ( peer_id ) ,
2021-05-14 17:16:50 +10:00
& mut self . events ,
& self . connected_peers ,
) ;
2021-01-07 18:19:31 +11:00
}
}
if update_backoff {
let time = if let Some ( backoff ) = backoff {
Duration ::from_secs ( backoff )
} else {
self . config . prune_backoff ( )
} ;
// is there a backoff specified by the peer? if so obey it.
2021-09-14 16:00:05 +03:00
self . backoffs . update_backoff ( topic_hash , peer_id , time ) ;
2020-01-25 02:16:02 +11:00
}
}
/// Handles PRUNE control messages. Removes peer from the mesh.
2021-01-07 18:19:31 +11:00
fn handle_prune (
& mut self ,
peer_id : & PeerId ,
prune_data : Vec < ( TopicHash , Vec < PeerInfo > , Option < u64 > ) > ,
) {
debug! ( " Handling PRUNE message for peer: {} " , peer_id ) ;
let ( below_threshold , score ) =
self . score_below_threshold ( peer_id , | pst | pst . accept_px_threshold ) ;
for ( topic_hash , px , backoff ) in prune_data {
2021-11-16 08:59:39 -05:00
self . remove_peer_from_mesh ( peer_id , & topic_hash , backoff , true , Churn ::Prune ) ;
2021-01-07 18:19:31 +11:00
if self . mesh . contains_key ( & topic_hash ) {
//connect to px peers
if ! px . is_empty ( ) {
// we ignore PX from peers with insufficient score
if below_threshold {
debug! (
" PRUNE: ignoring PX from peer {:?} with insufficient score \
[ score = { } topic = { } ] " ,
peer_id , score , topic_hash
) ;
continue ;
}
// NOTE: We cannot dial any peers from PX currently as we typically will not
// know their multiaddr. Until SignedRecords are spec'd this
// remains a stub. By default `config.prune_peers()` is set to zero and
// this is skipped. If the user modifies this, this will only be able to
// dial already known peers (from an external discovery mechanism for
// example).
if self . config . prune_peers ( ) > 0 {
self . px_connect ( px ) ;
}
2020-08-03 18:13:43 +10:00
}
2020-01-25 02:16:02 +11:00
}
}
2020-08-03 18:13:43 +10:00
debug! ( " Completed PRUNE handling for peer: {} " , peer_id . to_string ( ) ) ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
fn px_connect ( & mut self , mut px : Vec < PeerInfo > ) {
let n = self . config . prune_peers ( ) ;
// Ignore peerInfo with no ID
//
//TODO: Once signed records are spec'd: Can we use peerInfo without any IDs if they have a
// signed peer record?
2022-09-16 16:30:11 +02:00
px . retain ( | p | p . peer_id . is_some ( ) ) ;
2021-01-07 18:19:31 +11:00
if px . len ( ) > n {
// only use at most prune_peers many random peers
let mut rng = thread_rng ( ) ;
px . partial_shuffle ( & mut rng , n ) ;
px = px . into_iter ( ) . take ( n ) . collect ( ) ;
}
for p in px {
// TODO: Once signed records are spec'd: extract signed peer record if given and handle
// it, see https://github.com/libp2p/specs/pull/217
if let Some ( peer_id ) = p . peer_id {
// mark as px peer
2021-02-15 11:59:51 +01:00
self . px_peers . insert ( peer_id ) ;
2021-01-07 18:19:31 +11:00
// dial peer
2021-11-15 14:17:23 +01:00
self . events . push_back ( NetworkBehaviourAction ::Dial {
2022-08-10 12:20:24 +04:30
opts : DialOpts ::peer_id ( peer_id ) . build ( ) ,
2021-01-07 18:19:31 +11:00
} ) ;
}
}
}
/// Applies some basic checks to whether this message is valid. Does not apply user validation
/// checks.
fn message_is_valid (
& mut self ,
msg_id : & MessageId ,
2023-01-27 05:44:04 +01:00
raw_message : & mut RawMessage ,
2021-01-07 18:19:31 +11:00
propagation_source : & PeerId ,
) -> bool {
2020-01-25 02:16:02 +11:00
debug! (
2020-08-03 18:13:43 +10:00
" Handling message: {:?} from peer: {} " ,
msg_id ,
propagation_source . to_string ( )
2020-01-25 02:16:02 +11:00
) ;
2020-08-03 18:13:43 +10:00
2021-01-07 18:19:31 +11:00
// Reject any message from a blacklisted peer
if self . blacklisted_peers . contains ( propagation_source ) {
debug! (
" Rejecting message from blacklisted peer: {} " ,
propagation_source
) ;
if let Some ( ( peer_score , .. , gossip_promises ) ) = & mut self . peer_score {
peer_score . reject_message (
propagation_source ,
msg_id ,
& raw_message . topic ,
RejectReason ::BlackListedPeer ,
) ;
gossip_promises . reject_message ( msg_id , & RejectReason ::BlackListedPeer ) ;
}
return false ;
}
// Also reject any message that originated from a blacklisted peer
if let Some ( source ) = raw_message . source . as_ref ( ) {
if self . blacklisted_peers . contains ( source ) {
debug! (
" Rejecting message from peer {} because of blacklisted source: {} " ,
propagation_source , source
) ;
2021-12-21 17:31:19 -05:00
self . handle_invalid_message (
propagation_source ,
raw_message ,
RejectReason ::BlackListedSource ,
) ;
2021-01-07 18:19:31 +11:00
return false ;
}
}
2020-08-03 18:13:43 +10:00
// If we are not validating messages, assume this message is validated
// This will allow the message to be gossiped without explicitly calling
// `validate_message`.
2021-01-07 18:19:31 +11:00
if ! self . config . validate_messages ( ) {
raw_message . validated = true ;
}
// reject messages claiming to be from ourselves but not locally published
let self_published = ! self . config . allow_self_origin ( )
& & if let Some ( own_id ) = self . publish_config . get_own_id ( ) {
own_id ! = propagation_source
& & raw_message . source . as_ref ( ) . map_or ( false , | s | s = = own_id )
} else {
2021-09-14 16:00:05 +03:00
self . published_message_ids . contains ( msg_id )
2021-01-07 18:19:31 +11:00
} ;
if self_published {
debug! (
" Dropping message {} claiming to be from self but forwarded from {} " ,
msg_id , propagation_source
) ;
2021-12-21 17:31:19 -05:00
self . handle_invalid_message ( propagation_source , raw_message , RejectReason ::SelfOrigin ) ;
2021-01-07 18:19:31 +11:00
return false ;
}
true
}
2023-01-27 05:44:04 +01:00
/// Handles a newly received [`RawMessage`].
2021-01-07 18:19:31 +11:00
///
/// Forwards the message to all peers in the mesh.
fn handle_received_message (
& mut self ,
2023-01-27 05:44:04 +01:00
mut raw_message : RawMessage ,
2021-01-07 18:19:31 +11:00
propagation_source : & PeerId ,
) {
2021-12-21 17:31:19 -05:00
// Record the received metric
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . msg_recvd_unfiltered ( & raw_message . topic , raw_message . raw_protobuf_len ( ) ) ;
}
2021-01-07 18:19:31 +11:00
let fast_message_id = self . config . fast_message_id ( & raw_message ) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
2021-01-07 18:19:31 +11:00
if let Some ( fast_message_id ) = fast_message_id . as_ref ( ) {
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
if let Some ( msg_id ) = self . fast_message_id_cache . get ( fast_message_id ) {
2021-01-07 18:19:31 +11:00
let msg_id = msg_id . clone ( ) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// Report the duplicate
if self . message_is_valid ( & msg_id , & mut raw_message , propagation_source ) {
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . duplicated_message (
propagation_source ,
& msg_id ,
& raw_message . topic ,
) ;
}
// Update the cache, informing that we have received a duplicate from another peer.
// The peers in this cache are used to prevent us forwarding redundant messages onto
// these peers.
self . mcache . observe_duplicate ( & msg_id , propagation_source ) ;
2021-01-07 18:19:31 +11:00
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// This message has been seen previously. Ignore it
2021-01-07 18:19:31 +11:00
return ;
}
}
// Try and perform the data transform to the message. If it fails, consider it invalid.
let message = match self . data_transform . inbound_transform ( raw_message . clone ( ) ) {
Ok ( message ) = > message ,
Err ( e ) = > {
debug! ( " Invalid message. Transform error: {:?} " , e ) ;
// Reject the message and return
self . handle_invalid_message (
propagation_source ,
2021-12-21 17:31:19 -05:00
& raw_message ,
RejectReason ::ValidationError ( ValidationError ::TransformFailed ) ,
2021-01-07 18:19:31 +11:00
) ;
return ;
}
} ;
// Calculate the message id on the transformed data.
let msg_id = self . config . message_id ( & message ) ;
// Check the validity of the message
// Peers get penalized if this message is invalid. We don't add it to the duplicate cache
// and instead continually penalize peers that repeatedly send this message.
if ! self . message_is_valid ( & msg_id , & mut raw_message , propagation_source ) {
return ;
2020-08-03 18:13:43 +10:00
}
2021-01-07 18:19:31 +11:00
// Add the message to the duplicate caches
if let Some ( fast_message_id ) = fast_message_id {
// add id to cache
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
self . fast_message_id_cache
2021-01-07 18:19:31 +11:00
. entry ( fast_message_id )
. or_insert_with ( | | msg_id . clone ( ) ) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
2021-01-07 18:19:31 +11:00
if ! self . duplicate_cache . insert ( msg_id . clone ( ) ) {
2021-05-14 17:16:50 +10:00
debug! ( " Message already received, ignoring. Message: {} " , msg_id ) ;
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
peer_score . duplicated_message ( propagation_source , & msg_id , & message . topic ) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
self . mcache . observe_duplicate ( & msg_id , propagation_source ) ;
2020-01-25 02:16:02 +11:00
return ;
}
2021-01-07 18:19:31 +11:00
debug! (
" Put message {:?} in duplicate_cache and resolve promises " ,
msg_id
) ;
2020-01-25 02:16:02 +11:00
2021-12-21 17:31:19 -05:00
// Record the received message with the metrics
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . msg_recvd ( & message . topic ) ;
}
2021-01-07 18:19:31 +11:00
// Tells score that message arrived (but is maybe not fully validated yet).
// Consider the message as delivered for gossip promises.
if let Some ( ( peer_score , .. , gossip_promises ) ) = & mut self . peer_score {
peer_score . validate_message ( propagation_source , & msg_id , & message . topic ) ;
gossip_promises . message_delivered ( & msg_id ) ;
}
// Add the message to our memcache
self . mcache . put ( & msg_id , raw_message . clone ( ) ) ;
// Dispatch the message to the user if we are subscribed to any of the topics
if self . mesh . contains_key ( & message . topic ) {
2020-01-25 02:16:02 +11:00
debug! ( " Sending received message to user " ) ;
2023-01-27 05:44:04 +01:00
self . events
. push_back ( NetworkBehaviourAction ::GenerateEvent ( Event ::Message {
2021-02-15 11:59:51 +01:00
propagation_source : * propagation_source ,
2021-01-07 18:19:31 +11:00
message_id : msg_id . clone ( ) ,
message ,
2023-01-27 05:44:04 +01:00
} ) ) ;
2021-01-07 18:19:31 +11:00
} else {
debug! (
" Received message on a topic we are not subscribed to: {:?} " ,
message . topic
) ;
return ;
2020-01-25 02:16:02 +11:00
}
// forward the message to mesh peers, if no validation is required
2021-01-07 18:19:31 +11:00
if ! self . config . validate_messages ( ) {
if self
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
. forward_msg (
& msg_id ,
raw_message ,
Some ( propagation_source ) ,
HashSet ::new ( ) ,
)
2021-01-07 18:19:31 +11:00
. is_err ( )
{
error! ( " Failed to forward message. Too large " ) ;
}
debug! ( " Completed message handling for message: {:?} " , msg_id ) ;
}
}
// Handles invalid messages received.
fn handle_invalid_message (
& mut self ,
propagation_source : & PeerId ,
2023-01-27 05:44:04 +01:00
raw_message : & RawMessage ,
2021-12-21 17:31:19 -05:00
reject_reason : RejectReason ,
2021-01-07 18:19:31 +11:00
) {
if let Some ( ( peer_score , .. , gossip_promises ) ) = & mut self . peer_score {
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_invalid_message ( & raw_message . topic ) ;
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
let fast_message_id_cache = & self . fast_message_id_cache ;
2021-12-21 17:31:19 -05:00
2021-01-07 18:19:31 +11:00
if let Some ( msg_id ) = self
. config
2021-12-21 17:31:19 -05:00
. fast_message_id ( raw_message )
2021-01-07 18:19:31 +11:00
. and_then ( | id | fast_message_id_cache . get ( & id ) )
{
2021-12-21 17:31:19 -05:00
peer_score . reject_message (
propagation_source ,
msg_id ,
& raw_message . topic ,
reject_reason ,
) ;
gossip_promises . reject_message ( msg_id , & reject_reason ) ;
2021-01-07 18:19:31 +11:00
} else {
// The message is invalid, we reject it ignoring any gossip promises. If a peer is
// advertising this message via an IHAVE and it's invalid it will be double
// penalized, one for sending us an invalid and again for breaking a promise.
peer_score . reject_invalid_message ( propagation_source , & raw_message . topic ) ;
}
2020-01-25 02:16:02 +11:00
}
}
/// Handles received subscriptions.
fn handle_received_subscriptions (
& mut self ,
2023-01-27 05:44:04 +01:00
subscriptions : & [ Subscription ] ,
2020-01-25 02:16:02 +11:00
propagation_source : & PeerId ,
) {
debug! (
2020-08-03 18:13:43 +10:00
" Handling subscriptions: {:?}, from source: {} " ,
subscriptions ,
propagation_source . to_string ( )
2020-01-25 02:16:02 +11:00
) ;
2021-01-07 18:19:31 +11:00
let mut unsubscribed_peers = Vec ::new ( ) ;
2020-02-13 10:36:14 +01:00
let subscribed_topics = match self . peer_topics . get_mut ( propagation_source ) {
2020-01-25 02:16:02 +11:00
Some ( topics ) = > topics ,
None = > {
2020-08-03 18:13:43 +10:00
error! (
" Subscription by unknown peer: {} " ,
propagation_source . to_string ( )
) ;
2020-01-25 02:16:02 +11:00
return ;
}
} ;
2021-05-14 17:16:50 +10:00
// Collect potential graft topics for the peer.
let mut topics_to_graft = Vec ::new ( ) ;
2020-08-03 18:13:43 +10:00
// Notify the application about the subscription, after the grafts are sent.
let mut application_event = Vec ::new ( ) ;
2021-01-07 18:19:31 +11:00
let filtered_topics = match self
. subscription_filter
. filter_incoming_subscriptions ( subscriptions , subscribed_topics )
{
Ok ( topics ) = > topics ,
Err ( s ) = > {
error! (
" Subscription filter error: {}; ignoring RPC from peer {} " ,
s ,
propagation_source . to_string ( )
) ;
return ;
}
} ;
for subscription in filtered_topics {
// get the peers from the mapping, or insert empty lists if the topic doesn't exist
2021-11-16 08:59:39 -05:00
let topic_hash = & subscription . topic_hash ;
2020-01-25 02:16:02 +11:00
let peer_list = self
. topic_peers
2021-11-16 08:59:39 -05:00
. entry ( topic_hash . clone ( ) )
2020-08-03 18:13:43 +10:00
. or_insert_with ( Default ::default ) ;
2020-01-25 02:16:02 +11:00
match subscription . action {
2023-01-27 05:44:04 +01:00
SubscriptionAction ::Subscribe = > {
2021-02-15 11:59:51 +01:00
if peer_list . insert ( * propagation_source ) {
2020-01-25 02:16:02 +11:00
debug! (
2020-08-03 18:13:43 +10:00
" SUBSCRIPTION: Adding gossip peer: {} to topic: {:?} " ,
propagation_source . to_string ( ) ,
2021-11-16 08:59:39 -05:00
topic_hash
2020-01-25 02:16:02 +11:00
) ;
}
// add to the peer_topics mapping
2021-11-16 08:59:39 -05:00
subscribed_topics . insert ( topic_hash . clone ( ) ) ;
2020-01-25 02:16:02 +11:00
// if the mesh needs peers add the peer to the mesh
2021-01-07 18:19:31 +11:00
if ! self . explicit_peers . contains ( propagation_source )
2021-05-26 22:54:42 +10:00
& & matches! (
self . connected_peers
. get ( propagation_source )
. map ( | v | & v . kind ) ,
Some ( PeerKind ::Gossipsubv1_1 ) | Some ( PeerKind ::Gossipsub )
)
2021-01-07 18:19:31 +11:00
& & ! Self ::score_below_threshold_from_scores (
& self . peer_score ,
propagation_source ,
| _ | 0.0 ,
)
. 0
& & ! self
. backoffs
2021-11-16 08:59:39 -05:00
. is_backoff_with_slack ( topic_hash , propagation_source )
2021-01-07 18:19:31 +11:00
{
2021-11-16 08:59:39 -05:00
if let Some ( peers ) = self . mesh . get_mut ( topic_hash ) {
2021-01-07 18:19:31 +11:00
if peers . len ( ) < self . config . mesh_n_low ( )
2021-02-15 11:59:51 +01:00
& & peers . insert ( * propagation_source )
2021-01-07 18:19:31 +11:00
{
2020-08-03 18:13:43 +10:00
debug! (
" SUBSCRIPTION: Adding peer {} to the mesh for topic {:?} " ,
propagation_source . to_string ( ) ,
2021-11-16 08:59:39 -05:00
topic_hash
2020-08-03 18:13:43 +10:00
) ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( topic_hash , Inclusion ::Subscribed , 1 )
}
2020-08-03 18:13:43 +10:00
// send graft to the peer
debug! (
" Sending GRAFT to peer {} for topic {:?} " ,
propagation_source . to_string ( ) ,
2021-11-16 08:59:39 -05:00
topic_hash
2020-08-03 18:13:43 +10:00
) ;
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
2021-11-16 08:59:39 -05:00
peer_score . graft ( propagation_source , topic_hash . clone ( ) ) ;
2021-01-07 18:19:31 +11:00
}
2021-11-16 08:59:39 -05:00
topics_to_graft . push ( topic_hash . clone ( ) ) ;
2020-08-03 18:13:43 +10:00
}
2020-01-25 02:16:02 +11:00
}
}
// generates a subscription event to be polled
2020-08-03 18:13:43 +10:00
application_event . push ( NetworkBehaviourAction ::GenerateEvent (
2023-01-27 05:44:04 +01:00
Event ::Subscribed {
2021-02-15 11:59:51 +01:00
peer_id : * propagation_source ,
2021-11-16 08:59:39 -05:00
topic : topic_hash . clone ( ) ,
2020-01-25 02:16:02 +11:00
} ,
) ) ;
}
2023-01-27 05:44:04 +01:00
SubscriptionAction ::Unsubscribe = > {
2020-08-03 18:13:43 +10:00
if peer_list . remove ( propagation_source ) {
2021-06-12 08:11:55 +02:00
debug! (
2020-08-03 18:13:43 +10:00
" SUBSCRIPTION: Removing gossip peer: {} from topic: {:?} " ,
propagation_source . to_string ( ) ,
2021-11-16 08:59:39 -05:00
topic_hash
2020-01-25 02:16:02 +11:00
) ;
}
2021-11-16 08:59:39 -05:00
2020-01-25 02:16:02 +11:00
// remove topic from the peer_topics mapping
2021-11-16 08:59:39 -05:00
subscribed_topics . remove ( topic_hash ) ;
unsubscribed_peers . push ( ( * propagation_source , topic_hash . clone ( ) ) ) ;
2020-01-25 02:16:02 +11:00
// generate an unsubscribe event to be polled
2020-08-03 18:13:43 +10:00
application_event . push ( NetworkBehaviourAction ::GenerateEvent (
2023-01-27 05:44:04 +01:00
Event ::Unsubscribed {
2021-02-15 11:59:51 +01:00
peer_id : * propagation_source ,
2021-11-16 08:59:39 -05:00
topic : topic_hash . clone ( ) ,
2020-01-25 02:16:02 +11:00
} ,
) ) ;
}
}
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . set_topic_peers ( topic_hash , peer_list . len ( ) ) ;
}
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
2021-01-07 18:19:31 +11:00
// remove unsubscribed peers from the mesh if it exists
for ( peer_id , topic_hash ) in unsubscribed_peers {
2021-11-16 08:59:39 -05:00
self . remove_peer_from_mesh ( & peer_id , & topic_hash , None , false , Churn ::Unsub ) ;
2021-01-07 18:19:31 +11:00
}
2021-05-14 17:16:50 +10:00
// Potentially inform the handler if we have added this peer to a mesh for the first time.
let topics_joined = topics_to_graft . iter ( ) . collect ::< Vec < _ > > ( ) ;
if ! topics_joined . is_empty ( ) {
peer_added_to_mesh (
2021-05-26 22:54:42 +10:00
* propagation_source ,
2021-05-14 17:16:50 +10:00
topics_joined ,
& self . mesh ,
self . peer_topics . get ( propagation_source ) ,
& mut self . events ,
& self . connected_peers ,
) ;
}
2020-08-03 18:13:43 +10:00
// If we need to send grafts to peer, do so immediately, rather than waiting for the
// heartbeat.
2021-05-14 17:16:50 +10:00
if ! topics_to_graft . is_empty ( )
2021-01-07 18:19:31 +11:00
& & self
. send_message (
2021-02-15 11:59:51 +01:00
* propagation_source ,
2023-01-27 05:44:04 +01:00
Rpc {
2021-01-07 18:19:31 +11:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
2021-05-14 17:16:50 +10:00
control_msgs : topics_to_graft
. into_iter ( )
2023-01-27 05:44:04 +01:00
. map ( | topic_hash | ControlAction ::Graft { topic_hash } )
2021-05-14 17:16:50 +10:00
. collect ( ) ,
2021-01-07 18:19:31 +11:00
}
. into_protobuf ( ) ,
)
. is_err ( )
{
error! ( " Failed sending grafts. Message too large " ) ;
2020-08-03 18:13:43 +10:00
}
// Notify the application of the subscriptions
for event in application_event {
self . events . push_back ( event ) ;
}
2020-01-25 02:16:02 +11:00
trace! (
" Completed handling subscriptions from source: {:?} " ,
propagation_source
) ;
}
2021-01-07 18:19:31 +11:00
/// Applies penalties to peers that did not respond to our IWANT requests.
fn apply_iwant_penalties ( & mut self ) {
if let Some ( ( peer_score , .. , gossip_promises ) ) = & mut self . peer_score {
for ( peer , count ) in gossip_promises . get_broken_promises ( ) {
peer_score . add_penalty ( & peer , count ) ;
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . register_score_penalty ( Penalty ::BrokenPromise ) ;
}
2021-01-07 18:19:31 +11:00
}
}
}
2020-01-25 02:16:02 +11:00
/// Heartbeat function which shifts the memcache and updates the mesh.
fn heartbeat ( & mut self ) {
debug! ( " Starting heartbeat " ) ;
2021-12-21 17:31:19 -05:00
let start = Instant ::now ( ) ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
self . heartbeat_ticks + = 1 ;
2020-01-25 02:16:02 +11:00
let mut to_graft = HashMap ::new ( ) ;
let mut to_prune = HashMap ::new ( ) ;
2021-01-07 18:19:31 +11:00
let mut no_px = HashSet ::new ( ) ;
// clean up expired backoffs
self . backoffs . heartbeat ( ) ;
// clean up ihave counters
self . count_sent_iwant . clear ( ) ;
self . count_received_ihave . clear ( ) ;
// apply iwant penalties
self . apply_iwant_penalties ( ) ;
// check connections to explicit peers
if self . heartbeat_ticks % self . config . check_explicit_peers_ticks ( ) = = 0 {
for p in self . explicit_peers . clone ( ) {
self . check_explicit_peer_connection ( & p ) ;
}
}
2021-12-21 17:31:19 -05:00
// Cache the scores of all connected peers, and record metrics for current penalties.
let mut scores = HashMap ::with_capacity ( self . connected_peers . len ( ) ) ;
if let Some ( ( peer_score , .. ) ) = & self . peer_score {
for peer_id in self . connected_peers . keys ( ) {
scores
. entry ( peer_id )
2022-05-03 13:11:48 +02:00
. or_insert_with ( | | peer_score . metric_score ( peer_id , self . metrics . as_mut ( ) ) ) ;
2021-12-21 17:31:19 -05:00
}
}
2020-01-25 02:16:02 +11:00
// maintain the mesh for each topic
for ( topic_hash , peers ) in self . mesh . iter_mut ( ) {
2021-01-07 18:19:31 +11:00
let explicit_peers = & self . explicit_peers ;
let backoffs = & self . backoffs ;
let topic_peers = & self . topic_peers ;
let outbound_peers = & self . outbound_peers ;
// drop all peers with negative score, without PX
// if there is at some point a stable retain method for BTreeSet the following can be
// written more efficiently with retain.
2021-12-21 17:31:19 -05:00
let mut to_remove_peers = Vec ::new ( ) ;
for peer_id in peers . iter ( ) {
let peer_score = * scores . get ( peer_id ) . unwrap_or ( & 0.0 ) ;
// Record the score per mesh
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . observe_mesh_peers_score ( topic_hash , peer_score ) ;
}
if peer_score < 0.0 {
debug! (
" HEARTBEAT: Prune peer {:?} with negative score [score = {}, topic = \
2021-01-07 18:19:31 +11:00
{ } ] " ,
2021-12-21 17:31:19 -05:00
peer_id , peer_score , topic_hash
) ;
2021-01-07 18:19:31 +11:00
2021-12-21 17:31:19 -05:00
let current_topic = to_prune . entry ( * peer_id ) . or_insert_with ( Vec ::new ) ;
current_topic . push ( topic_hash . clone ( ) ) ;
no_px . insert ( * peer_id ) ;
to_remove_peers . push ( * peer_id ) ;
}
}
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
2021-12-21 17:31:19 -05:00
m . peers_removed ( topic_hash , Churn ::BadScore , to_remove_peers . len ( ) )
2021-11-16 08:59:39 -05:00
}
2021-12-21 17:31:19 -05:00
for peer_id in to_remove_peers {
peers . remove ( & peer_id ) ;
2021-01-07 18:19:31 +11:00
}
2020-01-25 02:16:02 +11:00
// too little peers - add some
2021-01-07 18:19:31 +11:00
if peers . len ( ) < self . config . mesh_n_low ( ) {
2020-01-25 02:16:02 +11:00
debug! (
2020-08-03 18:13:43 +10:00
" HEARTBEAT: Mesh low. Topic: {} Contains: {} needs: {} " ,
topic_hash ,
2020-01-25 02:16:02 +11:00
peers . len ( ) ,
2021-01-07 18:19:31 +11:00
self . config . mesh_n_low ( )
2020-01-25 02:16:02 +11:00
) ;
// not enough peers - get mesh_n - current_length more
2021-01-07 18:19:31 +11:00
let desired_peers = self . config . mesh_n ( ) - peers . len ( ) ;
let peer_list = get_random_peers (
topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-01-07 18:19:31 +11:00
topic_hash ,
desired_peers ,
| peer | {
! peers . contains ( peer )
& & ! explicit_peers . contains ( peer )
& & ! backoffs . is_backoff_with_slack ( topic_hash , peer )
2021-12-21 17:31:19 -05:00
& & * scores . get ( peer ) . unwrap_or ( & 0.0 ) > = 0.0
2021-01-07 18:19:31 +11:00
} ,
) ;
2020-01-25 02:16:02 +11:00
for peer in & peer_list {
2021-02-15 11:59:51 +01:00
let current_topic = to_graft . entry ( * peer ) . or_insert_with ( Vec ::new ) ;
2020-01-25 02:16:02 +11:00
current_topic . push ( topic_hash . clone ( ) ) ;
}
// update the mesh
debug! ( " Updating mesh, new mesh: {:?} " , peer_list ) ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( topic_hash , Inclusion ::Random , peer_list . len ( ) )
}
2020-01-25 02:16:02 +11:00
peers . extend ( peer_list ) ;
}
// too many peers - remove some
2021-01-07 18:19:31 +11:00
if peers . len ( ) > self . config . mesh_n_high ( ) {
2020-01-25 02:16:02 +11:00
debug! (
2020-08-03 18:13:43 +10:00
" HEARTBEAT: Mesh high. Topic: {} Contains: {} needs: {} " ,
2020-01-25 02:16:02 +11:00
topic_hash ,
peers . len ( ) ,
2021-01-07 18:19:31 +11:00
self . config . mesh_n_high ( )
2020-01-25 02:16:02 +11:00
) ;
2021-01-07 18:19:31 +11:00
let excess_peer_no = peers . len ( ) - self . config . mesh_n ( ) ;
// shuffle the peers and then sort by score ascending beginning with the worst
2020-01-25 02:16:02 +11:00
let mut rng = thread_rng ( ) ;
2020-08-03 18:13:43 +10:00
let mut shuffled = peers . iter ( ) . cloned ( ) . collect ::< Vec < _ > > ( ) ;
shuffled . shuffle ( & mut rng ) ;
2021-12-21 17:31:19 -05:00
shuffled . sort_by ( | p1 , p2 | {
let score_p1 = * scores . get ( p1 ) . unwrap_or ( & 0.0 ) ;
let score_p2 = * scores . get ( p2 ) . unwrap_or ( & 0.0 ) ;
score_p1 . partial_cmp ( & score_p2 ) . unwrap_or ( Ordering ::Equal )
} ) ;
2021-01-07 18:19:31 +11:00
// shuffle everything except the last retain_scores many peers (the best ones)
shuffled [ .. peers . len ( ) - self . config . retain_scores ( ) ] . shuffle ( & mut rng ) ;
// count total number of outbound peers
let mut outbound = {
let outbound_peers = & self . outbound_peers ;
shuffled
. iter ( )
. filter ( | p | outbound_peers . contains ( * p ) )
. count ( )
} ;
// remove the first excess_peer_no allowed (by outbound restrictions) peers adding
// them to to_prune
let mut removed = 0 ;
for peer in shuffled {
if removed = = excess_peer_no {
break ;
}
if self . outbound_peers . contains ( & peer ) {
if outbound < = self . config . mesh_outbound_min ( ) {
2021-05-14 17:16:50 +10:00
// do not remove anymore outbound peers
2021-01-07 18:19:31 +11:00
continue ;
} else {
2021-05-14 17:16:50 +10:00
// an outbound peer gets removed
2021-01-07 18:19:31 +11:00
outbound - = 1 ;
}
}
2021-05-14 17:16:50 +10:00
// remove the peer
2020-08-03 18:13:43 +10:00
peers . remove ( & peer ) ;
let current_topic = to_prune . entry ( peer ) . or_insert_with ( Vec ::new ) ;
2020-01-25 02:16:02 +11:00
current_topic . push ( topic_hash . clone ( ) ) ;
2021-01-07 18:19:31 +11:00
removed + = 1 ;
}
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_removed ( topic_hash , Churn ::Excess , removed )
}
2021-01-07 18:19:31 +11:00
}
// do we have enough outbound peers?
if peers . len ( ) > = self . config . mesh_n_low ( ) {
// count number of outbound peers we have
let outbound = { peers . iter ( ) . filter ( | p | outbound_peers . contains ( * p ) ) . count ( ) } ;
// if we have not enough outbound peers, graft to some new outbound peers
if outbound < self . config . mesh_outbound_min ( ) {
let needed = self . config . mesh_outbound_min ( ) - outbound ;
let peer_list = get_random_peers (
topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-01-07 18:19:31 +11:00
topic_hash ,
needed ,
| peer | {
! peers . contains ( peer )
& & ! explicit_peers . contains ( peer )
& & ! backoffs . is_backoff_with_slack ( topic_hash , peer )
2021-12-21 17:31:19 -05:00
& & * scores . get ( peer ) . unwrap_or ( & 0.0 ) > = 0.0
2021-01-07 18:19:31 +11:00
& & outbound_peers . contains ( peer )
} ,
) ;
for peer in & peer_list {
2021-02-15 11:59:51 +01:00
let current_topic = to_graft . entry ( * peer ) . or_insert_with ( Vec ::new ) ;
2021-01-07 18:19:31 +11:00
current_topic . push ( topic_hash . clone ( ) ) ;
}
// update the mesh
debug! ( " Updating mesh, new mesh: {:?} " , peer_list ) ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( topic_hash , Inclusion ::Outbound , peer_list . len ( ) )
}
2021-01-07 18:19:31 +11:00
peers . extend ( peer_list ) ;
}
}
// should we try to improve the mesh with opportunistic grafting?
if self . heartbeat_ticks % self . config . opportunistic_graft_ticks ( ) = = 0
& & peers . len ( ) > 1
& & self . peer_score . is_some ( )
{
if let Some ( ( _ , thresholds , _ , _ ) ) = & self . peer_score {
// Opportunistic grafting works as follows: we check the median score of peers
// in the mesh; if this score is below the opportunisticGraftThreshold, we
// select a few peers at random with score over the median.
// The intention is to (slowly) improve an underperforming mesh by introducing
// good scoring peers that may have been gossiping at us. This allows us to
// get out of sticky situations where we are stuck with poor peers and also
// recover from churn of good peers.
// now compute the median peer score in the mesh
let mut peers_by_score : Vec < _ > = peers . iter ( ) . collect ( ) ;
2021-12-21 17:31:19 -05:00
peers_by_score . sort_by ( | p1 , p2 | {
let p1_score = * scores . get ( p1 ) . unwrap_or ( & 0.0 ) ;
let p2_score = * scores . get ( p2 ) . unwrap_or ( & 0.0 ) ;
p1_score . partial_cmp ( & p2_score ) . unwrap_or ( Equal )
} ) ;
2021-01-07 18:19:31 +11:00
let middle = peers_by_score . len ( ) / 2 ;
let median = if peers_by_score . len ( ) % 2 = = 0 {
2021-12-21 17:31:19 -05:00
let sub_middle_peer = * peers_by_score
. get ( middle - 1 )
. expect ( " middle < vector length and middle > 0 since peers.len() > 0 " ) ;
let sub_middle_score = * scores . get ( sub_middle_peer ) . unwrap_or ( & 0.0 ) ;
let middle_peer =
* peers_by_score . get ( middle ) . expect ( " middle < vector length " ) ;
let middle_score = * scores . get ( middle_peer ) . unwrap_or ( & 0.0 ) ;
( sub_middle_score + middle_score ) * 0.5
2021-01-07 18:19:31 +11:00
} else {
2021-12-21 17:31:19 -05:00
* scores
. get ( * peers_by_score . get ( middle ) . expect ( " middle < vector length " ) )
. unwrap_or ( & 0.0 )
2021-01-07 18:19:31 +11:00
} ;
// if the median score is below the threshold, select a better peer (if any) and
// GRAFT
if median < thresholds . opportunistic_graft_threshold {
let peer_list = get_random_peers (
topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-01-07 18:19:31 +11:00
topic_hash ,
self . config . opportunistic_graft_peers ( ) ,
2021-12-21 17:31:19 -05:00
| peer_id | {
! peers . contains ( peer_id )
& & ! explicit_peers . contains ( peer_id )
& & ! backoffs . is_backoff_with_slack ( topic_hash , peer_id )
& & * scores . get ( peer_id ) . unwrap_or ( & 0.0 ) > median
2021-01-07 18:19:31 +11:00
} ,
) ;
for peer in & peer_list {
2021-02-15 11:59:51 +01:00
let current_topic = to_graft . entry ( * peer ) . or_insert_with ( Vec ::new ) ;
2021-01-07 18:19:31 +11:00
current_topic . push ( topic_hash . clone ( ) ) ;
}
// update the mesh
debug! (
" Opportunistically graft in topic {} with peers {:?} " ,
topic_hash , peer_list
) ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_included ( topic_hash , Inclusion ::Random , peer_list . len ( ) )
}
2021-01-07 18:19:31 +11:00
peers . extend ( peer_list ) ;
}
2020-01-25 02:16:02 +11:00
}
}
2021-11-16 08:59:39 -05:00
// Register the final count of peers in the mesh
if let Some ( m ) = self . metrics . as_mut ( ) {
m . set_mesh_peers ( topic_hash , peers . len ( ) )
}
2020-01-25 02:16:02 +11:00
}
// remove expired fanout topics
{
let fanout = & mut self . fanout ; // help the borrow checker
2021-01-07 18:19:31 +11:00
let fanout_ttl = self . config . fanout_ttl ( ) ;
2020-01-25 02:16:02 +11:00
self . fanout_last_pub . retain ( | topic_hash , last_pub_time | {
if * last_pub_time + fanout_ttl < Instant ::now ( ) {
debug! (
" HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?} " ,
topic_hash
) ;
2021-09-14 16:00:05 +03:00
fanout . remove ( topic_hash ) ;
2020-01-25 02:16:02 +11:00
return false ;
}
true
} ) ;
}
// maintain fanout
// check if our peers are still a part of the topic
for ( topic_hash , peers ) in self . fanout . iter_mut ( ) {
let mut to_remove_peers = Vec ::new ( ) ;
2021-01-07 18:19:31 +11:00
let publish_threshold = match & self . peer_score {
Some ( ( _ , thresholds , _ , _ ) ) = > thresholds . publish_threshold ,
_ = > 0.0 ,
} ;
2020-01-25 02:16:02 +11:00
for peer in peers . iter ( ) {
// is the peer still subscribed to the topic?
2021-12-21 17:31:19 -05:00
let peer_score = * scores . get ( peer ) . unwrap_or ( & 0.0 ) ;
2020-01-25 02:16:02 +11:00
match self . peer_topics . get ( peer ) {
Some ( topics ) = > {
2021-12-21 17:31:19 -05:00
if ! topics . contains ( topic_hash ) | | peer_score < publish_threshold {
2020-01-25 02:16:02 +11:00
debug! (
" HEARTBEAT: Peer removed from fanout for topic: {:?} " ,
topic_hash
) ;
2021-02-15 11:59:51 +01:00
to_remove_peers . push ( * peer ) ;
2020-01-25 02:16:02 +11:00
}
}
None = > {
// remove if the peer has disconnected
2021-02-15 11:59:51 +01:00
to_remove_peers . push ( * peer ) ;
2020-01-25 02:16:02 +11:00
}
}
}
2020-08-03 18:13:43 +10:00
for to_remove in to_remove_peers {
peers . remove ( & to_remove ) ;
}
2020-01-25 02:16:02 +11:00
// not enough peers
2021-01-07 18:19:31 +11:00
if peers . len ( ) < self . config . mesh_n ( ) {
2020-01-25 02:16:02 +11:00
debug! (
" HEARTBEAT: Fanout low. Contains: {:?} needs: {:?} " ,
peers . len ( ) ,
2021-01-07 18:19:31 +11:00
self . config . mesh_n ( )
2020-01-25 02:16:02 +11:00
) ;
2021-01-07 18:19:31 +11:00
let needed_peers = self . config . mesh_n ( ) - peers . len ( ) ;
let explicit_peers = & self . explicit_peers ;
let new_peers = get_random_peers (
& self . topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-01-07 18:19:31 +11:00
topic_hash ,
needed_peers ,
2021-12-21 17:31:19 -05:00
| peer_id | {
! peers . contains ( peer_id )
& & ! explicit_peers . contains ( peer_id )
& & * scores . get ( peer_id ) . unwrap_or ( & 0.0 ) < publish_threshold
2021-01-07 18:19:31 +11:00
} ,
) ;
2020-01-25 02:16:02 +11:00
peers . extend ( new_peers ) ;
}
}
2021-01-07 18:19:31 +11:00
if self . peer_score . is_some ( ) {
trace! ( " Mesh message deliveries: {:?} " , {
self . mesh
. iter ( )
. map ( | ( t , peers ) | {
(
t . clone ( ) ,
peers
. iter ( )
. map ( | p | {
(
2021-02-15 11:59:51 +01:00
* p ,
2021-12-21 17:31:19 -05:00
self . peer_score
2021-01-07 18:19:31 +11:00
. as_ref ( )
. expect ( " peer_score.is_some() " )
. 0
. mesh_message_deliveries ( p , t )
. unwrap_or ( 0.0 ) ,
)
} )
. collect ::< HashMap < PeerId , f64 > > ( ) ,
)
} )
. collect ::< HashMap < TopicHash , HashMap < PeerId , f64 > > > ( )
} )
}
2020-01-25 02:16:02 +11:00
self . emit_gossip ( ) ;
// send graft/prunes
if ! to_graft . is_empty ( ) | ! to_prune . is_empty ( ) {
2021-01-07 18:19:31 +11:00
self . send_graft_prune ( to_graft , to_prune , no_px ) ;
2020-01-25 02:16:02 +11:00
}
// piggyback pooled control messages
self . flush_control_pool ( ) ;
// shift the memcache
self . mcache . shift ( ) ;
2021-01-07 18:19:31 +11:00
2020-01-25 02:16:02 +11:00
debug! ( " Completed Heartbeat " ) ;
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
let duration = u64 ::try_from ( start . elapsed ( ) . as_millis ( ) ) . unwrap_or ( u64 ::MAX ) ;
metrics . observe_heartbeat_duration ( duration ) ;
}
2020-01-25 02:16:02 +11:00
}
/// Emits gossip - Send IHAVE messages to a random set of gossip peers. This is applied to mesh
/// and fanout peers
fn emit_gossip ( & mut self ) {
2021-01-07 18:19:31 +11:00
let mut rng = thread_rng ( ) ;
2020-01-25 02:16:02 +11:00
for ( topic_hash , peers ) in self . mesh . iter ( ) . chain ( self . fanout . iter ( ) ) {
2021-09-14 16:00:05 +03:00
let mut message_ids = self . mcache . get_gossip_message_ids ( topic_hash ) ;
2020-01-25 02:16:02 +11:00
if message_ids . is_empty ( ) {
2022-02-07 14:00:46 -05:00
continue ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
// if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list
if message_ids . len ( ) > self . config . max_ihave_length ( ) {
// we do the truncation (with shuffling) per peer below
debug! (
" too many messages for gossip; will truncate IHAVE list ({} messages) " ,
message_ids . len ( )
) ;
} else {
// shuffle to emit in random order
message_ids . shuffle ( & mut rng ) ;
}
// dynamic number of peers to gossip based on `gossip_factor` with minimum `gossip_lazy`
let n_map = | m | {
max (
self . config . gossip_lazy ( ) ,
( self . config . gossip_factor ( ) * m as f64 ) as usize ,
)
} ;
2020-01-25 02:16:02 +11:00
// get gossip_lazy random peers
2021-01-07 18:19:31 +11:00
let to_msg_peers = get_random_peers_dynamic (
2020-01-25 02:16:02 +11:00
& self . topic_peers ,
2021-05-14 17:16:50 +10:00
& self . connected_peers ,
2021-09-14 16:00:05 +03:00
topic_hash ,
2021-01-07 18:19:31 +11:00
n_map ,
| peer | {
! peers . contains ( peer )
& & ! self . explicit_peers . contains ( peer )
& & ! self . score_below_threshold ( peer , | ts | ts . gossip_threshold ) . 0
} ,
2020-01-25 02:16:02 +11:00
) ;
2020-08-03 18:13:43 +10:00
debug! ( " Gossiping IHAVE to {} peers. " , to_msg_peers . len ( ) ) ;
2020-01-25 02:16:02 +11:00
for peer in to_msg_peers {
2021-01-07 18:19:31 +11:00
let mut peer_message_ids = message_ids . clone ( ) ;
if peer_message_ids . len ( ) > self . config . max_ihave_length ( ) {
// We do this per peer so that we emit a different set for each peer.
// we have enough redundancy in the system that this will significantly increase
// the message coverage when we do truncate.
peer_message_ids . partial_shuffle ( & mut rng , self . config . max_ihave_length ( ) ) ;
peer_message_ids . truncate ( self . config . max_ihave_length ( ) ) ;
}
2020-01-25 02:16:02 +11:00
// send an IHAVE message
Self ::control_pool_add (
& mut self . control_pool ,
2021-02-15 11:59:51 +01:00
peer ,
2023-01-27 05:44:04 +01:00
ControlAction ::IHave {
2020-01-25 02:16:02 +11:00
topic_hash : topic_hash . clone ( ) ,
2021-01-07 18:19:31 +11:00
message_ids : peer_message_ids ,
2020-01-25 02:16:02 +11:00
} ,
) ;
}
}
}
/// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control
/// messages.
fn send_graft_prune (
& mut self ,
to_graft : HashMap < PeerId , Vec < TopicHash > > ,
mut to_prune : HashMap < PeerId , Vec < TopicHash > > ,
2021-01-07 18:19:31 +11:00
no_px : HashSet < PeerId > ,
2020-01-25 02:16:02 +11:00
) {
2020-08-03 18:13:43 +10:00
// handle the grafts and overlapping prunes per peer
2021-05-14 17:16:50 +10:00
for ( peer , topics ) in to_graft . into_iter ( ) {
for topic in & topics {
// inform scoring of graft
2021-01-07 18:19:31 +11:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
2021-05-14 17:16:50 +10:00
peer_score . graft ( & peer , topic . clone ( ) ) ;
2021-01-07 18:19:31 +11:00
}
2021-05-14 17:16:50 +10:00
// inform the handler of the peer being added to the mesh
// If the peer did not previously exist in any mesh, inform the handler
peer_added_to_mesh (
peer ,
vec! [ topic ] ,
& self . mesh ,
self . peer_topics . get ( & peer ) ,
& mut self . events ,
& self . connected_peers ,
) ;
2021-01-07 18:19:31 +11:00
}
2023-01-27 05:44:04 +01:00
let mut control_msgs : Vec < ControlAction > = topics
2020-01-25 02:16:02 +11:00
. iter ( )
2023-01-27 05:44:04 +01:00
. map ( | topic_hash | ControlAction ::Graft {
2020-01-25 02:16:02 +11:00
topic_hash : topic_hash . clone ( ) ,
} )
. collect ( ) ;
2020-08-03 18:13:43 +10:00
// If there are prunes associated with the same peer add them.
2021-05-14 17:16:50 +10:00
// NOTE: In this case a peer has been added to a topic mesh, and removed from another.
// It therefore must be in at least one mesh and we do not need to inform the handler
// of its removal from another.
2022-01-17 12:08:34 -05:00
// The following prunes are not due to unsubscribing.
let on_unsubscribe = false ;
2021-05-14 17:16:50 +10:00
if let Some ( topics ) = to_prune . remove ( & peer ) {
2020-08-03 18:13:43 +10:00
let mut prunes = topics
. iter ( )
2021-01-07 18:19:31 +11:00
. map ( | topic_hash | {
self . make_prune (
topic_hash ,
2021-05-14 17:16:50 +10:00
& peer ,
self . config . do_px ( ) & & ! no_px . contains ( & peer ) ,
2022-01-17 12:08:34 -05:00
on_unsubscribe ,
2021-01-07 18:19:31 +11:00
)
2020-08-03 18:13:43 +10:00
} )
. collect ::< Vec < _ > > ( ) ;
control_msgs . append ( & mut prunes ) ;
}
2020-01-25 02:16:02 +11:00
// send the control messages
2021-01-07 18:19:31 +11:00
if self
. send_message (
2021-05-14 17:16:50 +10:00
peer ,
2023-01-27 05:44:04 +01:00
Rpc {
2021-01-07 18:19:31 +11:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
control_msgs ,
}
. into_protobuf ( ) ,
)
. is_err ( )
{
error! ( " Failed to send control messages. Message too large " ) ;
}
2020-01-25 02:16:02 +11:00
}
// handle the remaining prunes
2022-01-17 12:08:34 -05:00
// The following prunes are not due to unsubscribing.
let on_unsubscribe = false ;
2020-01-25 02:16:02 +11:00
for ( peer , topics ) in to_prune . iter ( ) {
2021-05-14 17:16:50 +10:00
let mut remaining_prunes = Vec ::new ( ) ;
for topic_hash in topics {
let prune = self . make_prune (
topic_hash ,
peer ,
self . config . do_px ( ) & & ! no_px . contains ( peer ) ,
2022-01-17 12:08:34 -05:00
on_unsubscribe ,
2021-05-14 17:16:50 +10:00
) ;
remaining_prunes . push ( prune ) ;
// inform the handler
peer_removed_from_mesh (
2021-05-26 22:54:42 +10:00
* peer ,
2021-05-14 17:16:50 +10:00
topic_hash ,
& self . mesh ,
2021-09-14 16:00:05 +03:00
self . peer_topics . get ( peer ) ,
2021-05-14 17:16:50 +10:00
& mut self . events ,
& self . connected_peers ,
) ;
}
2021-01-07 18:19:31 +11:00
if self
. send_message (
2021-02-15 11:59:51 +01:00
* peer ,
2023-01-27 05:44:04 +01:00
Rpc {
2021-01-07 18:19:31 +11:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
control_msgs : remaining_prunes ,
}
. into_protobuf ( ) ,
)
. is_err ( )
{
error! ( " Failed to send prune messages. Message too large " ) ;
}
2020-01-25 02:16:02 +11:00
}
}
2020-02-10 15:17:08 +01:00
/// Helper function which forwards a message to mesh\[topic\] peers.
2021-01-07 18:19:31 +11:00
///
2020-08-03 18:13:43 +10:00
/// Returns true if at least one peer was messaged.
2021-01-07 18:19:31 +11:00
fn forward_msg (
& mut self ,
msg_id : & MessageId ,
2023-01-27 05:44:04 +01:00
message : RawMessage ,
2021-01-07 18:19:31 +11:00
propagation_source : Option < & PeerId > ,
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
originating_peers : HashSet < PeerId > ,
2021-01-07 18:19:31 +11:00
) -> Result < bool , PublishError > {
// message is fully validated inform peer_score
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
if let Some ( peer ) = propagation_source {
peer_score . deliver_message ( peer , msg_id , & message . topic ) ;
}
}
2020-01-25 02:16:02 +11:00
debug! ( " Forwarding message: {:?} " , msg_id ) ;
let mut recipient_peers = HashSet ::new ( ) ;
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
{
// Populate the recipient peers mapping
// Add explicit peers
for peer_id in & self . explicit_peers {
if let Some ( topics ) = self . peer_topics . get ( peer_id ) {
if Some ( peer_id ) ! = propagation_source
& & ! originating_peers . contains ( peer_id )
& & Some ( peer_id ) ! = message . source . as_ref ( )
& & topics . contains ( & message . topic )
{
recipient_peers . insert ( * peer_id ) ;
}
2021-01-07 18:19:31 +11:00
}
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// add mesh peers
let topic = & message . topic ;
// mesh
if let Some ( mesh_peers ) = self . mesh . get ( topic ) {
for peer_id in mesh_peers {
if Some ( peer_id ) ! = propagation_source
& & ! originating_peers . contains ( peer_id )
& & Some ( peer_id ) ! = message . source . as_ref ( )
{
recipient_peers . insert ( * peer_id ) ;
}
2020-01-25 02:16:02 +11:00
}
}
}
// forward the message to peers
if ! recipient_peers . is_empty ( ) {
2023-01-27 05:44:04 +01:00
let event = Rpc {
2021-05-14 17:16:50 +10:00
subscriptions : Vec ::new ( ) ,
messages : vec ! [ message . clone ( ) ] ,
control_msgs : Vec ::new ( ) ,
}
. into_protobuf ( ) ;
2020-01-25 02:16:02 +11:00
2021-12-21 17:31:19 -05:00
let msg_bytes = event . encoded_len ( ) ;
2020-01-25 02:16:02 +11:00
for peer in recipient_peers . iter ( ) {
debug! ( " Sending message: {:?} to peer {:?} " , msg_id , peer ) ;
2021-02-15 11:59:51 +01:00
self . send_message ( * peer , event . clone ( ) ) ? ;
2021-11-16 08:59:39 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
2021-12-21 17:31:19 -05:00
m . msg_sent ( & message . topic , msg_bytes ) ;
2021-11-16 08:59:39 -05:00
}
2020-08-03 18:13:43 +10:00
}
debug! ( " Completed forwarding message " ) ;
2021-01-07 18:19:31 +11:00
Ok ( true )
2020-08-03 18:13:43 +10:00
} else {
2021-01-07 18:19:31 +11:00
Ok ( false )
2020-08-03 18:13:43 +10:00
}
}
2023-01-27 05:44:04 +01:00
/// Constructs a [`RawMessage`] performing message signing if required.
2021-01-07 18:19:31 +11:00
pub ( crate ) fn build_raw_message (
2020-08-03 18:13:43 +10:00
& self ,
2021-01-07 18:19:31 +11:00
topic : TopicHash ,
2020-08-03 18:13:43 +10:00
data : Vec < u8 > ,
2023-01-27 05:44:04 +01:00
) -> Result < RawMessage , PublishError > {
2020-08-03 18:13:43 +10:00
match & self . publish_config {
PublishConfig ::Signing {
ref keypair ,
author ,
inline_key ,
} = > {
// Build and sign the message
let sequence_number : u64 = rand ::random ( ) ;
let signature = {
let message = rpc_proto ::Message {
2020-12-15 14:40:39 +01:00
from : Some ( author . clone ( ) . to_bytes ( ) ) ,
2020-08-03 18:13:43 +10:00
data : Some ( data . clone ( ) ) ,
seqno : Some ( sequence_number . to_be_bytes ( ) . to_vec ( ) ) ,
2021-01-07 18:19:31 +11:00
topic : topic . clone ( ) . into_string ( ) ,
2020-08-03 18:13:43 +10:00
signature : None ,
key : None ,
} ;
let mut buf = Vec ::with_capacity ( message . encoded_len ( ) ) ;
message
. encode ( & mut buf )
. expect ( " Buffer has sufficient capacity " ) ;
// the signature is over the bytes "libp2p-pubsub:<protobuf-message>"
let mut signature_bytes = SIGNING_PREFIX . to_vec ( ) ;
signature_bytes . extend_from_slice ( & buf ) ;
Some ( keypair . sign ( & signature_bytes ) ? )
} ;
2023-01-27 05:44:04 +01:00
Ok ( RawMessage {
2021-02-15 11:59:51 +01:00
source : Some ( * author ) ,
2020-08-03 18:13:43 +10:00
data ,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
sequence_number : Some ( sequence_number ) ,
2021-01-07 18:19:31 +11:00
topic ,
2020-08-03 18:13:43 +10:00
signature ,
key : inline_key . clone ( ) ,
validated : true , // all published messages are valid
} )
}
PublishConfig ::Author ( peer_id ) = > {
2023-01-27 05:44:04 +01:00
Ok ( RawMessage {
2021-02-15 11:59:51 +01:00
source : Some ( * peer_id ) ,
2020-08-03 18:13:43 +10:00
data ,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
sequence_number : Some ( rand ::random ( ) ) ,
2021-01-07 18:19:31 +11:00
topic ,
2020-08-03 18:13:43 +10:00
signature : None ,
key : None ,
validated : true , // all published messages are valid
} )
}
PublishConfig ::RandomAuthor = > {
2023-01-27 05:44:04 +01:00
Ok ( RawMessage {
2020-08-03 18:13:43 +10:00
source : Some ( PeerId ::random ( ) ) ,
data ,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
sequence_number : Some ( rand ::random ( ) ) ,
2021-01-07 18:19:31 +11:00
topic ,
2020-08-03 18:13:43 +10:00
signature : None ,
key : None ,
validated : true , // all published messages are valid
} )
}
PublishConfig ::Anonymous = > {
2023-01-27 05:44:04 +01:00
Ok ( RawMessage {
2020-08-03 18:13:43 +10:00
source : None ,
data ,
// To be interoperable with the go-implementation this is treated as a 64-bit
// big-endian uint.
sequence_number : None ,
2021-01-07 18:19:31 +11:00
topic ,
2020-08-03 18:13:43 +10:00
signature : None ,
key : None ,
validated : true , // all published messages are valid
} )
2020-01-25 02:16:02 +11:00
}
}
}
// adds a control action to control_pool
fn control_pool_add (
2023-01-27 05:44:04 +01:00
control_pool : & mut HashMap < PeerId , Vec < ControlAction > > ,
2020-01-25 02:16:02 +11:00
peer : PeerId ,
2023-01-27 05:44:04 +01:00
control : ControlAction ,
2020-01-25 02:16:02 +11:00
) {
control_pool
2021-01-07 18:19:31 +11:00
. entry ( peer )
2020-01-25 02:16:02 +11:00
. or_insert_with ( Vec ::new )
. push ( control ) ;
}
/// Takes each control action mapping and turns it into a message
fn flush_control_pool ( & mut self ) {
2020-08-03 18:13:43 +10:00
for ( peer , controls ) in self . control_pool . drain ( ) . collect ::< Vec < _ > > ( ) {
2021-01-07 18:19:31 +11:00
if self
. send_message (
peer ,
2023-01-27 05:44:04 +01:00
Rpc {
2021-01-07 18:19:31 +11:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
control_msgs : controls ,
}
. into_protobuf ( ) ,
)
. is_err ( )
{
error! ( " Failed to flush control pool. Message too large " ) ;
}
2020-01-25 02:16:02 +11:00
}
protocols/gossipsub: Improve bandwidth (#2327)
This PR adds some bandwidth improvements to gossipsub.
After a bit of inspection on live networks a number of improvements have been
made that can help reduce unnecessary bandwidth on gossipsub networks. This PR
introduces the following:
- A 1:1 tracking of all in-flight IWANT requests. This not only ensures that all
IWANT requests are answered and peers penalized accordingly, but gossipsub
will no no longer create multiple IWANT requests for multiple peers.
Previously, gossipsub sampled the in-flight IWANT requests in order to
penalize peers for not responding with a high probability that we would detect
non-responsive nodes. Futher, it was possible to re-request IWANT messages
that are already being requested causing added duplication in messages and
wasted unnecessary IWANT control messages. This PR shifts this logic to only
request message ids that we are not currently requesting from peers.
- Triangle routing naturally gives rise to unnecessary duplicates. Consider a
mesh of 4 peers that are interconnected. Peer 1 sends a new message to 2,3,4.
2 propagates to 3,4 and 3 propagates to 2,4 and 4 propagates to 2,3. In this
case 3 has received the message 3 times. If we keep track of peers that send
us messages, when publishing or forwarding we no longer send to peers that
have sent us a duplicate, we can eliminate one of the sends in the scenario
above. This only occurs when message validation is async however. This PR adds
this logic to remove some elements of triangle-routing duplicates.
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
Co-authored-by: Max Inden <mail@max-inden.de>
Co-authored-by: Diva M <divma@protonmail.com>
2021-12-21 22:09:15 +11:00
// This clears all pending IWANT messages
self . pending_iwant_msgs . clear ( ) ;
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
2023-01-27 05:44:04 +01:00
/// Send a [`Rpc`] message to a peer. This will wrap the message in an arc if it
2020-08-03 18:13:43 +10:00
/// is not already an arc.
2021-01-07 18:19:31 +11:00
fn send_message (
& mut self ,
peer_id : PeerId ,
2021-05-14 17:16:50 +10:00
message : rpc_proto ::Rpc ,
2021-01-07 18:19:31 +11:00
) -> Result < ( ) , PublishError > {
// If the message is oversized, try and fragment it. If it cannot be fragmented, log an
// error and drop the message (all individual messages should be small enough to fit in the
// max_transmit_size)
2021-05-26 22:54:42 +10:00
let messages = self . fragment_message ( message ) ? ;
2021-01-07 18:19:31 +11:00
for message in messages {
self . events
. push_back ( NetworkBehaviourAction ::NotifyHandler {
2021-02-15 11:59:51 +01:00
peer_id ,
2023-01-27 05:44:04 +01:00
event : HandlerIn ::Message ( message ) ,
2021-01-07 18:19:31 +11:00
handler : NotifyHandler ::Any ,
} )
}
Ok ( ( ) )
}
// If a message is too large to be sent as-is, this attempts to fragment it into smaller RPC
// messages to be sent.
2021-05-14 17:16:50 +10:00
fn fragment_message ( & self , rpc : rpc_proto ::Rpc ) -> Result < Vec < rpc_proto ::Rpc > , PublishError > {
2021-01-07 18:19:31 +11:00
if rpc . encoded_len ( ) < self . config . max_transmit_size ( ) {
return Ok ( vec! [ rpc ] ) ;
}
let new_rpc = rpc_proto ::Rpc {
subscriptions : Vec ::new ( ) ,
publish : Vec ::new ( ) ,
control : None ,
} ;
let mut rpc_list = vec! [ new_rpc . clone ( ) ] ;
// Gets an RPC if the object size will fit, otherwise create a new RPC. The last element
// will be the RPC to add an object.
macro_rules ! create_or_add_rpc {
( $object_size : ident ) = > {
let list_index = rpc_list . len ( ) - 1 ; // the list is never empty
// create a new RPC if the new object plus 5% of its size (for length prefix
// buffers) exceeds the max transmit size.
if rpc_list [ list_index ] . encoded_len ( ) + ( ( $object_size as f64 ) * 1.05 ) as usize
> self . config . max_transmit_size ( )
& & rpc_list [ list_index ] ! = new_rpc
{
// create a new rpc and use this as the current
rpc_list . push ( new_rpc . clone ( ) ) ;
}
} ;
2021-04-10 21:40:06 +02:00
}
2021-01-07 18:19:31 +11:00
macro_rules ! add_item {
( $object : ident , $type : ident ) = > {
let object_size = $object . encoded_len ( ) ;
if object_size + 2 > self . config . max_transmit_size ( ) {
// This should not be possible. All received and published messages have already
// been vetted to fit within the size.
error! ( " Individual message too large to fragment " ) ;
return Err ( PublishError ::MessageTooLarge ) ;
}
create_or_add_rpc! ( object_size ) ;
rpc_list
. last_mut ( )
. expect ( " Must have at least one element " )
. $type
. push ( $object . clone ( ) ) ;
} ;
}
// Add messages until the limit
for message in & rpc . publish {
add_item! ( message , publish ) ;
}
for subscription in & rpc . subscriptions {
add_item! ( subscription , subscriptions ) ;
}
// handle the control messages. If all are within the max_transmit_size, send them without
// fragmenting, otherwise, fragment the control messages
let empty_control = rpc_proto ::ControlMessage ::default ( ) ;
if let Some ( control ) = rpc . control . as_ref ( ) {
if control . encoded_len ( ) + 2 > self . config . max_transmit_size ( ) {
// fragment the RPC
for ihave in & control . ihave {
let len = ihave . encoded_len ( ) ;
create_or_add_rpc! ( len ) ;
rpc_list
. last_mut ( )
. expect ( " Always an element " )
. control
. get_or_insert_with ( | | empty_control . clone ( ) )
. ihave
. push ( ihave . clone ( ) ) ;
}
for iwant in & control . iwant {
let len = iwant . encoded_len ( ) ;
create_or_add_rpc! ( len ) ;
rpc_list
. last_mut ( )
. expect ( " Always an element " )
. control
. get_or_insert_with ( | | empty_control . clone ( ) )
. iwant
. push ( iwant . clone ( ) ) ;
}
for graft in & control . graft {
let len = graft . encoded_len ( ) ;
create_or_add_rpc! ( len ) ;
rpc_list
. last_mut ( )
. expect ( " Always an element " )
. control
. get_or_insert_with ( | | empty_control . clone ( ) )
. graft
. push ( graft . clone ( ) ) ;
}
for prune in & control . prune {
let len = prune . encoded_len ( ) ;
create_or_add_rpc! ( len ) ;
rpc_list
. last_mut ( )
. expect ( " Always an element " )
. control
. get_or_insert_with ( | | empty_control . clone ( ) )
. prune
. push ( prune . clone ( ) ) ;
}
} else {
let len = control . encoded_len ( ) ;
create_or_add_rpc! ( len ) ;
rpc_list . last_mut ( ) . expect ( " Always an element " ) . control = Some ( control . clone ( ) ) ;
}
}
2021-05-14 17:16:50 +10:00
Ok ( rpc_list )
2020-08-03 18:13:43 +10:00
}
2020-01-25 02:16:02 +11:00
2022-11-17 09:28:40 +00:00
fn on_connection_established (
2021-01-07 18:19:31 +11:00
& mut self ,
2022-11-17 09:28:40 +00:00
ConnectionEstablished {
peer_id ,
connection_id ,
endpoint ,
other_established ,
..
} : ConnectionEstablished ,
2021-01-07 18:19:31 +11:00
) {
2022-02-09 10:08:28 -05:00
// Diverging from the go implementation we only want to consider a peer as outbound peer
// if its first connection is outbound.
2022-11-17 09:28:40 +00:00
if endpoint . is_dialer ( ) & & other_established = = 0 & & ! self . px_peers . contains ( & peer_id ) {
2022-02-09 10:08:28 -05:00
// The first connection is outbound and it is not a peer from peer exchange => mark
// it as outbound peer
2022-11-17 09:28:40 +00:00
self . outbound_peers . insert ( peer_id ) ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
// Add the IP to the peer scoring system
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
if let Some ( ip ) = get_ip_addr ( endpoint . get_remote_address ( ) ) {
2022-11-17 09:28:40 +00:00
peer_score . add_ip ( & peer_id , ip ) ;
2021-01-07 18:19:31 +11:00
} else {
trace! (
" Couldn't extract ip from endpoint of peer {} with endpoint {:?} " ,
peer_id ,
endpoint
)
2020-01-25 02:16:02 +11:00
}
}
2021-05-14 17:16:50 +10:00
// By default we assume a peer is only a floodsub peer.
//
// The protocol negotiation occurs once a message is sent/received. Once this happens we
// update the type of peer that this is in order to determine which kind of routing should
// occur.
self . connected_peers
2022-11-17 09:28:40 +00:00
. entry ( peer_id )
2021-05-14 17:16:50 +10:00
. or_insert ( PeerConnections {
kind : PeerKind ::Floodsub ,
2022-06-14 23:31:27 +09:00
connections : vec ! [ ] ,
2021-05-14 17:16:50 +10:00
} )
. connections
2022-11-17 09:28:40 +00:00
. push ( connection_id ) ;
2022-02-09 10:08:28 -05:00
if other_established = = 0 {
// Ignore connections from blacklisted peers.
2022-11-17 09:28:40 +00:00
if self . blacklisted_peers . contains ( & peer_id ) {
2022-02-09 10:08:28 -05:00
debug! ( " Ignoring connection from blacklisted peer: {} " , peer_id ) ;
} else {
debug! ( " New peer connected: {} " , peer_id ) ;
// We need to send our subscriptions to the newly-connected node.
let mut subscriptions = vec! [ ] ;
for topic_hash in self . mesh . keys ( ) {
2023-01-27 05:44:04 +01:00
subscriptions . push ( Subscription {
2022-02-09 10:08:28 -05:00
topic_hash : topic_hash . clone ( ) ,
2023-01-27 05:44:04 +01:00
action : SubscriptionAction ::Subscribe ,
2022-02-09 10:08:28 -05:00
} ) ;
}
if ! subscriptions . is_empty ( ) {
// send our subscriptions to the peer
if self
. send_message (
2022-11-17 09:28:40 +00:00
peer_id ,
2023-01-27 05:44:04 +01:00
Rpc {
2022-02-09 10:08:28 -05:00
messages : Vec ::new ( ) ,
subscriptions ,
control_msgs : Vec ::new ( ) ,
}
. into_protobuf ( ) ,
)
. is_err ( )
{
error! ( " Failed to send subscriptions, message too large " ) ;
}
}
}
// Insert an empty set of the topics of this peer until known.
2022-11-17 09:28:40 +00:00
self . peer_topics . insert ( peer_id , Default ::default ( ) ) ;
2022-02-09 10:08:28 -05:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
2022-11-17 09:28:40 +00:00
peer_score . add_peer ( peer_id ) ;
2022-02-09 10:08:28 -05:00
}
}
2021-01-07 18:19:31 +11:00
}
2022-11-17 09:28:40 +00:00
fn on_connection_closed (
2021-01-07 18:19:31 +11:00
& mut self ,
2022-11-17 09:28:40 +00:00
ConnectionClosed {
peer_id ,
connection_id ,
endpoint ,
remaining_established ,
..
} : ConnectionClosed < < Self as NetworkBehaviour > ::ConnectionHandler > ,
2021-01-07 18:19:31 +11:00
) {
// Remove IP from peer scoring system
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
if let Some ( ip ) = get_ip_addr ( endpoint . get_remote_address ( ) ) {
2022-11-17 09:28:40 +00:00
peer_score . remove_ip ( & peer_id , & ip ) ;
2021-01-07 18:19:31 +11:00
} else {
trace! (
" Couldn't extract ip from endpoint of peer {} with endpoint {:?} " ,
2021-05-14 17:16:50 +10:00
peer_id ,
2021-01-07 18:19:31 +11:00
endpoint
)
}
2020-01-25 02:16:02 +11:00
}
2021-05-14 17:16:50 +10:00
2022-02-09 10:08:28 -05:00
if remaining_established ! = 0 {
// Remove the connection from the list
2022-11-17 09:28:40 +00:00
if let Some ( connections ) = self . connected_peers . get_mut ( & peer_id ) {
2022-02-09 10:08:28 -05:00
let index = connections
. connections
. iter ( )
2022-11-17 09:28:40 +00:00
. position ( | v | v = = & connection_id )
2022-02-09 10:08:28 -05:00
. expect ( " Previously established connection to peer must be present " ) ;
connections . connections . remove ( index ) ;
// If there are more connections and this peer is in a mesh, inform the first connection
// handler.
if ! connections . connections . is_empty ( ) {
2022-11-17 09:28:40 +00:00
if let Some ( topics ) = self . peer_topics . get ( & peer_id ) {
2022-02-09 10:08:28 -05:00
for topic in topics {
if let Some ( mesh_peers ) = self . mesh . get ( topic ) {
2022-11-17 09:28:40 +00:00
if mesh_peers . contains ( & peer_id ) {
2022-02-09 10:08:28 -05:00
self . events
. push_back ( NetworkBehaviourAction ::NotifyHandler {
2022-11-17 09:28:40 +00:00
peer_id ,
2023-01-27 05:44:04 +01:00
event : HandlerIn ::JoinedMesh ,
2022-02-09 10:08:28 -05:00
handler : NotifyHandler ::One ( connections . connections [ 0 ] ) ,
} ) ;
break ;
}
}
}
}
}
}
} else {
// remove from mesh, topic_peers, peer_topic and the fanout
debug! ( " Peer disconnected: {} " , peer_id ) ;
{
2022-11-17 09:28:40 +00:00
let topics = match self . peer_topics . get ( & peer_id ) {
2022-09-16 16:30:11 +02:00
Some ( topics ) = > topics ,
2022-02-09 10:08:28 -05:00
None = > {
debug_assert! (
2022-11-17 09:28:40 +00:00
self . blacklisted_peers . contains ( & peer_id ) ,
2022-02-09 10:08:28 -05:00
" Disconnected node not in connected list "
) ;
return ;
}
} ;
2021-05-14 17:16:50 +10:00
2022-02-09 10:08:28 -05:00
// remove peer from all mappings
for topic in topics {
// check the mesh for the topic
if let Some ( mesh_peers ) = self . mesh . get_mut ( topic ) {
// check if the peer is in the mesh and remove it
2022-11-17 09:28:40 +00:00
if mesh_peers . remove ( & peer_id ) {
2022-02-09 10:08:28 -05:00
if let Some ( m ) = self . metrics . as_mut ( ) {
m . peers_removed ( topic , Churn ::Dc , 1 ) ;
m . set_mesh_peers ( topic , mesh_peers . len ( ) ) ;
2021-05-14 17:16:50 +10:00
}
2022-02-09 10:08:28 -05:00
} ;
}
// remove from topic_peers
if let Some ( peer_list ) = self . topic_peers . get_mut ( topic ) {
2022-11-17 09:28:40 +00:00
if ! peer_list . remove ( & peer_id ) {
2022-02-09 10:08:28 -05:00
// debugging purposes
warn! (
" Disconnected node: {} not in topic_peers peer list " ,
peer_id
) ;
}
if let Some ( m ) = self . metrics . as_mut ( ) {
m . set_topic_peers ( topic , peer_list . len ( ) )
2021-05-14 17:16:50 +10:00
}
2022-02-09 10:08:28 -05:00
} else {
warn! (
" Disconnected node: {} with topic: {:?} not in topic_peers " ,
& peer_id , & topic
) ;
2021-05-14 17:16:50 +10:00
}
2022-02-09 10:08:28 -05:00
// remove from fanout
self . fanout
. get_mut ( topic )
2022-11-17 09:28:40 +00:00
. map ( | peers | peers . remove ( & peer_id ) ) ;
2021-05-14 17:16:50 +10:00
}
}
2022-02-09 10:08:28 -05:00
// Forget px and outbound status for this peer
2022-11-17 09:28:40 +00:00
self . px_peers . remove ( & peer_id ) ;
self . outbound_peers . remove ( & peer_id ) ;
2022-02-09 10:08:28 -05:00
// Remove peer from peer_topics and connected_peers
// NOTE: It is possible the peer has already been removed from all mappings if it does not
// support the protocol.
2022-11-17 09:28:40 +00:00
self . peer_topics . remove ( & peer_id ) ;
2022-02-09 10:08:28 -05:00
// If metrics are enabled, register the disconnection of a peer based on its protocol.
if let Some ( metrics ) = self . metrics . as_mut ( ) {
let peer_kind = & self
. connected_peers
2022-11-17 09:28:40 +00:00
. get ( & peer_id )
2022-02-09 10:08:28 -05:00
. expect ( " Connected peer must be registered " )
. kind ;
metrics . peer_protocol_disconnected ( peer_kind . clone ( ) ) ;
}
2022-11-17 09:28:40 +00:00
self . connected_peers . remove ( & peer_id ) ;
2022-02-09 10:08:28 -05:00
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
2022-11-17 09:28:40 +00:00
peer_score . remove_peer ( & peer_id ) ;
2022-02-09 10:08:28 -05:00
}
2021-05-14 17:16:50 +10:00
}
2021-01-07 18:19:31 +11:00
}
2022-11-17 09:28:40 +00:00
fn on_address_change (
2021-01-07 18:19:31 +11:00
& mut self ,
2022-11-17 09:28:40 +00:00
AddressChange {
peer_id ,
old : endpoint_old ,
new : endpoint_new ,
..
} : AddressChange ,
2021-01-07 18:19:31 +11:00
) {
// Exchange IP in peer scoring system
if let Some ( ( peer_score , .. ) ) = & mut self . peer_score {
if let Some ( ip ) = get_ip_addr ( endpoint_old . get_remote_address ( ) ) {
2022-11-17 09:28:40 +00:00
peer_score . remove_ip ( & peer_id , & ip ) ;
2021-01-07 18:19:31 +11:00
} else {
trace! (
" Couldn't extract ip from endpoint of peer {} with endpoint {:?} " ,
2022-11-17 09:28:40 +00:00
& peer_id ,
2021-01-07 18:19:31 +11:00
endpoint_old
)
}
if let Some ( ip ) = get_ip_addr ( endpoint_new . get_remote_address ( ) ) {
2022-11-17 09:28:40 +00:00
peer_score . add_ip ( & peer_id , ip ) ;
2021-01-07 18:19:31 +11:00
} else {
trace! (
" Couldn't extract ip from endpoint of peer {} with endpoint {:?} " ,
2022-11-17 09:28:40 +00:00
peer_id ,
2021-01-07 18:19:31 +11:00
endpoint_new
)
}
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
}
2022-11-17 09:28:40 +00:00
}
2021-01-07 18:19:31 +11:00
2022-11-17 09:28:40 +00:00
fn get_ip_addr ( addr : & Multiaddr ) -> Option < IpAddr > {
addr . iter ( ) . find_map ( | p | match p {
Ip4 ( addr ) = > Some ( IpAddr ::V4 ( addr ) ) ,
Ip6 ( addr ) = > Some ( IpAddr ::V6 ( addr ) ) ,
_ = > None ,
} )
}
2023-01-27 05:44:04 +01:00
impl < C , F > NetworkBehaviour for Behaviour < C , F >
2022-11-17 09:28:40 +00:00
where
C : Send + 'static + DataTransform ,
F : Send + 'static + TopicSubscriptionFilter ,
{
2023-01-27 05:44:04 +01:00
type ConnectionHandler = Handler ;
type OutEvent = Event ;
2022-11-17 09:28:40 +00:00
fn new_handler ( & mut self ) -> Self ::ConnectionHandler {
2023-01-27 05:44:04 +01:00
Handler ::new (
2023-01-27 01:39:48 +11:00
ProtocolConfig ::new ( & self . config ) ,
self . config . idle_timeout ( ) ,
)
2022-11-17 09:28:40 +00:00
}
fn on_connection_handler_event (
2021-01-07 18:19:31 +11:00
& mut self ,
propagation_source : PeerId ,
2022-11-17 09:28:40 +00:00
_connection_id : ConnectionId ,
2023-01-26 22:55:02 +11:00
handler_event : THandlerOutEvent < Self > ,
2021-01-07 18:19:31 +11:00
) {
match handler_event {
HandlerEvent ::PeerKind ( kind ) = > {
// We have identified the protocol this peer is using
2021-12-21 17:31:19 -05:00
if let Some ( metrics ) = self . metrics . as_mut ( ) {
metrics . peer_protocol_connected ( kind . clone ( ) ) ;
}
2021-01-07 18:19:31 +11:00
if let PeerKind ::NotSupported = kind {
debug! (
" Peer does not support gossipsub protocols. {} " ,
propagation_source
) ;
2021-09-27 17:21:37 +10:00
self . events . push_back ( NetworkBehaviourAction ::GenerateEvent (
2023-01-27 05:44:04 +01:00
Event ::GossipsubNotSupported {
2021-09-27 17:21:37 +10:00
peer_id : propagation_source ,
} ,
) ) ;
2021-05-14 17:16:50 +10:00
} else if let Some ( conn ) = self . connected_peers . get_mut ( & propagation_source ) {
2021-01-07 18:19:31 +11:00
// Only change the value if the old value is Floodsub (the default set in
2022-11-17 09:28:40 +00:00
// `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished).
// All other PeerKind changes are ignored.
2021-01-07 18:19:31 +11:00
debug! (
" New peer type found: {} for peer: {} " ,
kind , propagation_source
) ;
2021-05-14 17:16:50 +10:00
if let PeerKind ::Floodsub = conn . kind {
conn . kind = kind ;
2021-01-07 18:19:31 +11:00
}
}
}
HandlerEvent ::Message {
rpc ,
invalid_messages ,
} = > {
// Handle the gossipsub RPC
// Handle subscriptions
// Update connected peers topics
if ! rpc . subscriptions . is_empty ( ) {
self . handle_received_subscriptions ( & rpc . subscriptions , & propagation_source ) ;
}
// Check if peer is graylisted in which case we ignore the event
if let ( true , _ ) =
self . score_below_threshold ( & propagation_source , | pst | pst . graylist_threshold )
{
debug! ( " RPC Dropped from greylisted peer {} " , propagation_source ) ;
return ;
}
// Handle any invalid messages from this peer
if self . peer_score . is_some ( ) {
for ( raw_message , validation_error ) in invalid_messages {
self . handle_invalid_message (
& propagation_source ,
2021-12-21 17:31:19 -05:00
& raw_message ,
RejectReason ::ValidationError ( validation_error ) ,
2021-01-07 18:19:31 +11:00
)
}
} else {
// log the invalid messages
for ( message , validation_error ) in invalid_messages {
warn! (
" Invalid message. Reason: {:?} propagation_peer {} source {:?} " ,
validation_error ,
propagation_source . to_string ( ) ,
message . source
) ;
}
}
// Handle messages
for ( count , raw_message ) in rpc . messages . into_iter ( ) . enumerate ( ) {
// Only process the amount of messages the configuration allows.
if self . config . max_messages_per_rpc ( ) . is_some ( )
& & Some ( count ) > = self . config . max_messages_per_rpc ( )
{
warn! ( " Received more messages than permitted. Ignoring further messages. Processed: {} " , count ) ;
break ;
}
self . handle_received_message ( raw_message , & propagation_source ) ;
}
// Handle control messages
// group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however)
let mut ihave_msgs = vec! [ ] ;
let mut graft_msgs = vec! [ ] ;
let mut prune_msgs = vec! [ ] ;
for control_msg in rpc . control_msgs {
match control_msg {
2023-01-27 05:44:04 +01:00
ControlAction ::IHave {
2021-01-07 18:19:31 +11:00
topic_hash ,
message_ids ,
} = > {
ihave_msgs . push ( ( topic_hash , message_ids ) ) ;
}
2023-01-27 05:44:04 +01:00
ControlAction ::IWant { message_ids } = > {
2021-01-07 18:19:31 +11:00
self . handle_iwant ( & propagation_source , message_ids )
}
2023-01-27 05:44:04 +01:00
ControlAction ::Graft { topic_hash } = > graft_msgs . push ( topic_hash ) ,
ControlAction ::Prune {
2021-01-07 18:19:31 +11:00
topic_hash ,
peers ,
backoff ,
} = > prune_msgs . push ( ( topic_hash , peers , backoff ) ) ,
}
}
if ! ihave_msgs . is_empty ( ) {
self . handle_ihave ( & propagation_source , ihave_msgs ) ;
}
if ! graft_msgs . is_empty ( ) {
self . handle_graft ( & propagation_source , graft_msgs ) ;
}
if ! prune_msgs . is_empty ( ) {
self . handle_prune ( & propagation_source , prune_msgs ) ;
}
}
2020-01-25 02:16:02 +11:00
}
}
fn poll (
& mut self ,
2020-07-27 20:27:33 +00:00
cx : & mut Context < '_ > ,
2020-01-25 02:16:02 +11:00
_ : & mut impl PollParameters ,
2023-02-14 14:09:29 +13:00
) -> Poll < NetworkBehaviourAction < Self ::OutEvent , THandlerInEvent < Self > > > {
2020-01-25 02:16:02 +11:00
if let Some ( event ) = self . events . pop_front ( ) {
2022-12-20 11:59:25 +11:00
return Poll ::Ready ( event ) ;
2020-01-25 02:16:02 +11:00
}
2021-01-07 18:19:31 +11:00
// update scores
if let Some ( ( peer_score , _ , interval , _ ) ) = & mut self . peer_score {
while let Poll ::Ready ( Some ( ( ) ) ) = interval . poll_next_unpin ( cx ) {
peer_score . refresh_scores ( ) ;
}
}
2020-01-25 02:16:02 +11:00
while let Poll ::Ready ( Some ( ( ) ) ) = self . heartbeat . poll_next_unpin ( cx ) {
self . heartbeat ( ) ;
}
Poll ::Pending
}
2022-11-17 09:28:40 +00:00
fn on_swarm_event ( & mut self , event : FromSwarm < Self ::ConnectionHandler > ) {
match event {
FromSwarm ::ConnectionEstablished ( connection_established ) = > {
self . on_connection_established ( connection_established )
}
FromSwarm ::ConnectionClosed ( connection_closed ) = > {
self . on_connection_closed ( connection_closed )
}
FromSwarm ::AddressChange ( address_change ) = > self . on_address_change ( address_change ) ,
FromSwarm ::DialFailure ( _ )
| FromSwarm ::ListenFailure ( _ )
| FromSwarm ::NewListener ( _ )
| FromSwarm ::NewListenAddr ( _ )
| FromSwarm ::ExpiredListenAddr ( _ )
| FromSwarm ::ListenerError ( _ )
| FromSwarm ::ListenerClosed ( _ )
| FromSwarm ::NewExternalAddr ( _ )
| FromSwarm ::ExpiredExternalAddr ( _ ) = > { }
}
}
2020-01-25 02:16:02 +11:00
}
2021-05-14 17:16:50 +10:00
/// This is called when peers are added to any mesh. It checks if the peer existed
/// in any other mesh. If this is the first mesh they have joined, it queues a message to notify
/// the appropriate connection handler to maintain a connection.
fn peer_added_to_mesh (
peer_id : PeerId ,
new_topics : Vec < & TopicHash > ,
mesh : & HashMap < TopicHash , BTreeSet < PeerId > > ,
known_topics : Option < & BTreeSet < TopicHash > > ,
2023-02-14 14:09:29 +13:00
events : & mut VecDeque < NetworkBehaviourAction < Event , HandlerIn > > ,
2021-05-14 17:16:50 +10:00
connections : & HashMap < PeerId , PeerConnections > ,
) {
// Ensure there is an active connection
let connection_id = {
let conn = connections . get ( & peer_id ) . expect ( " To be connected to peer. " ) ;
assert! (
! conn . connections . is_empty ( ) ,
" Must have at least one connection "
) ;
conn . connections [ 0 ]
} ;
if let Some ( topics ) = known_topics {
for topic in topics {
if ! new_topics . contains ( & topic ) {
if let Some ( mesh_peers ) = mesh . get ( topic ) {
if mesh_peers . contains ( & peer_id ) {
// the peer is already in a mesh for another topic
return ;
}
}
}
}
}
// This is the first mesh the peer has joined, inform the handler
events . push_back ( NetworkBehaviourAction ::NotifyHandler {
peer_id ,
2023-01-27 05:44:04 +01:00
event : HandlerIn ::JoinedMesh ,
2021-05-14 17:16:50 +10:00
handler : NotifyHandler ::One ( connection_id ) ,
} ) ;
}
/// This is called when peers are removed from a mesh. It checks if the peer exists
/// in any other mesh. If this is the last mesh they have joined, we return true, in order to
/// notify the handler to no longer maintain a connection.
fn peer_removed_from_mesh (
peer_id : PeerId ,
old_topic : & TopicHash ,
mesh : & HashMap < TopicHash , BTreeSet < PeerId > > ,
known_topics : Option < & BTreeSet < TopicHash > > ,
2023-02-14 14:09:29 +13:00
events : & mut VecDeque < NetworkBehaviourAction < Event , HandlerIn > > ,
2021-05-14 17:16:50 +10:00
connections : & HashMap < PeerId , PeerConnections > ,
) {
// Ensure there is an active connection
let connection_id = connections
. get ( & peer_id )
. expect ( " To be connected to peer. " )
. connections
. get ( 0 )
. expect ( " There should be at least one connection to a peer. " ) ;
if let Some ( topics ) = known_topics {
for topic in topics {
if topic ! = old_topic {
if let Some ( mesh_peers ) = mesh . get ( topic ) {
if mesh_peers . contains ( & peer_id ) {
// the peer exists in another mesh still
return ;
}
}
}
}
}
// The peer is not in any other mesh, inform the handler
events . push_back ( NetworkBehaviourAction ::NotifyHandler {
peer_id ,
2023-01-27 05:44:04 +01:00
event : HandlerIn ::LeftMesh ,
2021-05-14 17:16:50 +10:00
handler : NotifyHandler ::One ( * connection_id ) ,
} ) ;
}
2021-01-07 18:19:31 +11:00
/// Helper function to get a subset of random gossipsub peers for a `topic_hash`
/// filtered by the function `f`. The number of peers to get equals the output of `n_map`
/// that gets as input the number of filtered peers.
fn get_random_peers_dynamic (
topic_peers : & HashMap < TopicHash , BTreeSet < PeerId > > ,
2021-05-14 17:16:50 +10:00
connected_peers : & HashMap < PeerId , PeerConnections > ,
2021-01-07 18:19:31 +11:00
topic_hash : & TopicHash ,
// maps the number of total peers to the number of selected peers
n_map : impl Fn ( usize ) -> usize ,
mut f : impl FnMut ( & PeerId ) -> bool ,
) -> BTreeSet < PeerId > {
let mut gossip_peers = match topic_peers . get ( topic_hash ) {
// if they exist, filter the peers by `f`
Some ( peer_list ) = > peer_list
. iter ( )
. cloned ( )
. filter ( | p | {
2021-05-14 17:16:50 +10:00
f ( p ) & & match connected_peers . get ( p ) {
Some ( connections ) if connections . kind = = PeerKind ::Gossipsub = > true ,
Some ( connections ) if connections . kind = = PeerKind ::Gossipsubv1_1 = > true ,
2021-01-07 18:19:31 +11:00
_ = > false ,
}
} )
. collect ( ) ,
None = > Vec ::new ( ) ,
} ;
// if we have less than needed, return them
let n = n_map ( gossip_peers . len ( ) ) ;
if gossip_peers . len ( ) < = n {
debug! ( " RANDOM PEERS: Got {:?} peers " , gossip_peers . len ( ) ) ;
return gossip_peers . into_iter ( ) . collect ( ) ;
2020-08-03 18:13:43 +10:00
}
2021-01-07 18:19:31 +11:00
// we have more peers than needed, shuffle them and return n of them
let mut rng = thread_rng ( ) ;
gossip_peers . partial_shuffle ( & mut rng , n ) ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
debug! ( " RANDOM PEERS: Got {:?} peers " , n ) ;
2020-01-25 02:16:02 +11:00
2021-01-07 18:19:31 +11:00
gossip_peers . into_iter ( ) . take ( n ) . collect ( )
}
/// Helper function to get a set of `n` random gossipsub peers for a `topic_hash`
/// filtered by the function `f`.
fn get_random_peers (
topic_peers : & HashMap < TopicHash , BTreeSet < PeerId > > ,
2021-05-14 17:16:50 +10:00
connected_peers : & HashMap < PeerId , PeerConnections > ,
2021-01-07 18:19:31 +11:00
topic_hash : & TopicHash ,
n : usize ,
f : impl FnMut ( & PeerId ) -> bool ,
) -> BTreeSet < PeerId > {
2021-05-14 17:16:50 +10:00
get_random_peers_dynamic ( topic_peers , connected_peers , topic_hash , | _ | n , f )
2020-01-25 02:16:02 +11:00
}
2020-08-03 18:13:43 +10:00
/// Validates the combination of signing, privacy and message validation to ensure the
/// configuration will not reject published messages.
2021-01-07 18:19:31 +11:00
fn validate_config (
authenticity : & MessageAuthenticity ,
validation_mode : & ValidationMode ,
) -> Result < ( ) , & 'static str > {
2020-08-03 18:13:43 +10:00
match validation_mode {
ValidationMode ::Anonymous = > {
if authenticity . is_signing ( ) {
2021-01-07 18:19:31 +11:00
return Err ( " Cannot enable message signing with an Anonymous validation mode. Consider changing either the ValidationMode or MessageAuthenticity " ) ;
2020-08-03 18:13:43 +10:00
}
if ! authenticity . is_anonymous ( ) {
2021-01-07 18:19:31 +11:00
return Err ( " Published messages contain an author but incoming messages with an author will be rejected. Consider adjusting the validation or privacy settings in the config " ) ;
2020-08-03 18:13:43 +10:00
}
}
ValidationMode ::Strict = > {
if ! authenticity . is_signing ( ) {
2021-01-07 18:19:31 +11:00
return Err (
2020-08-03 18:13:43 +10:00
" Messages will be
published unsigned and incoming unsigned messages will be rejected . Consider adjusting
the validation or privacy settings in the config "
) ;
}
}
_ = > { }
}
2021-01-07 18:19:31 +11:00
Ok ( ( ) )
2020-08-03 18:13:43 +10:00
}
2023-01-27 05:44:04 +01:00
impl < C : DataTransform , F : TopicSubscriptionFilter > fmt ::Debug for Behaviour < C , F > {
2020-08-03 18:13:43 +10:00
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
2023-01-27 05:44:04 +01:00
f . debug_struct ( " Behaviour " )
2021-01-07 18:19:31 +11:00
. field ( " config " , & self . config )
2021-08-31 17:00:51 +02:00
. field ( " events " , & self . events . len ( ) )
2021-01-07 18:19:31 +11:00
. field ( " control_pool " , & self . control_pool )
. field ( " publish_config " , & self . publish_config )
. field ( " topic_peers " , & self . topic_peers )
. field ( " peer_topics " , & self . peer_topics )
. field ( " mesh " , & self . mesh )
. field ( " fanout " , & self . fanout )
. field ( " fanout_last_pub " , & self . fanout_last_pub )
. field ( " mcache " , & self . mcache )
. field ( " heartbeat " , & self . heartbeat )
. finish ( )
2020-08-03 18:13:43 +10:00
}
}
impl fmt ::Debug for PublishConfig {
fn fmt ( & self , f : & mut fmt ::Formatter < '_ > ) -> fmt ::Result {
match self {
2021-01-07 18:19:31 +11:00
PublishConfig ::Signing { author , .. } = > {
2022-12-17 13:01:45 +11:00
f . write_fmt ( format_args! ( " PublishConfig::Signing( {author} ) " ) )
2021-01-07 18:19:31 +11:00
}
PublishConfig ::Author ( author ) = > {
2022-12-17 13:01:45 +11:00
f . write_fmt ( format_args! ( " PublishConfig::Author( {author} ) " ) )
2021-01-07 18:19:31 +11:00
}
2020-08-13 12:10:52 +02:00
PublishConfig ::RandomAuthor = > f . write_fmt ( format_args! ( " PublishConfig::RandomAuthor " ) ) ,
PublishConfig ::Anonymous = > f . write_fmt ( format_args! ( " PublishConfig::Anonymous " ) ) ,
2020-08-03 18:13:43 +10:00
}
}
}
2021-01-07 18:19:31 +11:00
#[ cfg(test) ]
mod local_test {
use super ::* ;
use crate ::IdentTopic ;
2021-01-12 12:48:37 +01:00
use asynchronous_codec ::Encoder ;
2021-01-07 18:19:31 +11:00
use quickcheck ::* ;
2023-01-27 05:44:04 +01:00
fn empty_rpc ( ) -> Rpc {
Rpc {
2021-01-07 18:19:31 +11:00
subscriptions : Vec ::new ( ) ,
messages : Vec ::new ( ) ,
control_msgs : Vec ::new ( ) ,
}
}
2023-01-27 05:44:04 +01:00
fn test_message ( ) -> RawMessage {
RawMessage {
2021-01-07 18:19:31 +11:00
source : Some ( PeerId ::random ( ) ) ,
data : vec ! [ 0 ; 100 ] ,
sequence_number : None ,
topic : TopicHash ::from_raw ( " test_topic " ) ,
signature : None ,
key : None ,
validated : false ,
}
}
2023-01-27 05:44:04 +01:00
fn test_subscription ( ) -> Subscription {
Subscription {
action : SubscriptionAction ::Subscribe ,
2021-01-07 18:19:31 +11:00
topic_hash : IdentTopic ::new ( " TestTopic " ) . hash ( ) ,
}
}
2023-01-27 05:44:04 +01:00
fn test_control ( ) -> ControlAction {
ControlAction ::IHave {
2021-01-07 18:19:31 +11:00
topic_hash : IdentTopic ::new ( " TestTopic " ) . hash ( ) ,
message_ids : vec ! [ MessageId ( vec! [ 12 u8 ] ) ; 5 ] ,
}
}
2023-01-27 05:44:04 +01:00
impl Arbitrary for Rpc {
2022-09-22 12:48:32 +04:00
fn arbitrary ( g : & mut Gen ) -> Self {
2021-01-07 18:19:31 +11:00
let mut rpc = empty_rpc ( ) ;
2022-09-22 12:48:32 +04:00
for _ in 0 .. g . gen_range ( 0 .. 10 u8 ) {
2021-01-07 18:19:31 +11:00
rpc . subscriptions . push ( test_subscription ( ) ) ;
}
2022-09-22 12:48:32 +04:00
for _ in 0 .. g . gen_range ( 0 .. 10 u8 ) {
2021-01-07 18:19:31 +11:00
rpc . messages . push ( test_message ( ) ) ;
}
2022-09-22 12:48:32 +04:00
for _ in 0 .. g . gen_range ( 0 .. 10 u8 ) {
2021-01-07 18:19:31 +11:00
rpc . control_msgs . push ( test_control ( ) ) ;
}
rpc
}
}
#[ test ]
/// Tests RPC message fragmentation
fn test_message_fragmentation_deterministic ( ) {
let max_transmit_size = 500 ;
2023-01-27 05:44:04 +01:00
let config = crate ::config ::ConfigBuilder ::default ( )
2021-01-07 18:19:31 +11:00
. max_transmit_size ( max_transmit_size )
. validation_mode ( ValidationMode ::Permissive )
. build ( )
. unwrap ( ) ;
2023-01-27 05:44:04 +01:00
let gs : Behaviour = Behaviour ::new ( MessageAuthenticity ::RandomAuthor , config ) . unwrap ( ) ;
2021-01-07 18:19:31 +11:00
// Message under the limit should be fine.
let mut rpc = empty_rpc ( ) ;
rpc . messages . push ( test_message ( ) ) ;
let mut rpc_proto = rpc . clone ( ) . into_protobuf ( ) ;
2021-05-14 17:16:50 +10:00
let fragmented_messages = gs . fragment_message ( rpc_proto . clone ( ) ) . unwrap ( ) ;
2021-01-07 18:19:31 +11:00
assert_eq! (
fragmented_messages ,
2021-05-14 17:16:50 +10:00
vec! [ rpc_proto . clone ( ) ] ,
2021-01-07 18:19:31 +11:00
" Messages under the limit shouldn't be fragmented "
) ;
// Messages over the limit should be split
while rpc_proto . encoded_len ( ) < max_transmit_size {
rpc . messages . push ( test_message ( ) ) ;
rpc_proto = rpc . clone ( ) . into_protobuf ( ) ;
}
let fragmented_messages = gs
2021-05-14 17:16:50 +10:00
. fragment_message ( rpc_proto )
2021-01-07 18:19:31 +11:00
. expect ( " Should be able to fragment the messages " ) ;
assert! (
fragmented_messages . len ( ) > 1 ,
" the message should be fragmented "
) ;
// all fragmented messages should be under the limit
for message in fragmented_messages {
assert! (
message . encoded_len ( ) < max_transmit_size ,
" all messages should be less than the transmission size "
) ;
}
}
#[ test ]
fn test_message_fragmentation ( ) {
2023-01-27 05:44:04 +01:00
fn prop ( rpc : Rpc ) {
2021-01-07 18:19:31 +11:00
let max_transmit_size = 500 ;
2023-01-27 05:44:04 +01:00
let config = crate ::config ::ConfigBuilder ::default ( )
2021-01-07 18:19:31 +11:00
. max_transmit_size ( max_transmit_size )
. validation_mode ( ValidationMode ::Permissive )
. build ( )
. unwrap ( ) ;
2023-01-27 05:44:04 +01:00
let gs : Behaviour = Behaviour ::new ( MessageAuthenticity ::RandomAuthor , config ) . unwrap ( ) ;
2021-01-07 18:19:31 +11:00
let mut length_codec = unsigned_varint ::codec ::UviBytes ::default ( ) ;
length_codec . set_max_len ( max_transmit_size ) ;
let mut codec =
crate ::protocol ::GossipsubCodec ::new ( length_codec , ValidationMode ::Permissive ) ;
let rpc_proto = rpc . into_protobuf ( ) ;
let fragmented_messages = gs
2021-05-14 17:16:50 +10:00
. fragment_message ( rpc_proto . clone ( ) )
2021-01-07 18:19:31 +11:00
. expect ( " Messages must be valid " ) ;
if rpc_proto . encoded_len ( ) < max_transmit_size {
assert_eq! (
fragmented_messages . len ( ) ,
1 ,
" the message should not be fragmented "
) ;
} else {
assert! (
fragmented_messages . len ( ) > 1 ,
" the message should be fragmented "
) ;
}
// all fragmented messages should be under the limit
for message in fragmented_messages {
assert! (
message . encoded_len ( ) < max_transmit_size ,
" all messages should be less than the transmission size: list size {} max size{} " , message . encoded_len ( ) , max_transmit_size
) ;
// ensure they can all be encoded
let mut buf = bytes ::BytesMut ::with_capacity ( message . encoded_len ( ) ) ;
2021-05-14 17:16:50 +10:00
codec . encode ( message , & mut buf ) . unwrap ( )
2021-01-07 18:19:31 +11:00
}
}
QuickCheck ::new ( )
. max_tests ( 100 )
. quickcheck ( prop as fn ( _ ) -> _ )
}
}